diff --git a/.config/nextest.toml b/.config/nextest.toml index 381428729ddeff..082fee67c02ee7 100644 --- a/.config/nextest.toml +++ b/.config/nextest.toml @@ -1,6 +1,9 @@ [store] dir = "target/nextest" +[test-groups] +build-sbf = { max-threads = 1 } + [profile.ci] failure-output = "immediate-final" slow-timeout = { period = "60s", terminate-after = 1 } @@ -25,13 +28,17 @@ threads-required = "num-cpus" filter = "package(solana-gossip) & test(/^test_star_network_push_ring_200/)" threads-required = "num-cpus" +[[profile.ci.overrides]] +filter = "package(solana-gossip) & test(/^gossip_ring/)" +threads-required = "num-cpus" + [[profile.ci.overrides]] filter = "package(solana-gossip) & test(/^cluster_info::tests::new_with_external_ip_test_random/)" threads-required = "num-cpus" [[profile.ci.overrides]] filter = "package(solana-cargo-build-sbf)" -threads-required = "num-cpus" +test-group = "build-sbf" [[profile.ci.overrides]] filter = 'package(solana-local-cluster) & test(/^test_kill_partition_switch_threshold_progress$/)' diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 74c0219a170d37..18221ff8244373 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,16 +1,21 @@ -# The SVM team is in the process of migrating these subdirectories to a new -# repo and would like to avoid introducing dependencies in the meantime. +# Please keep this file sorted +/bloom/ @anza-xyz/networking /compute-budget-instruction/ @anza-xyz/fees +/core/src/repair/ @anza-xyz/networking /fee/ @anza-xyz/fees +/gossip/ @anza-xyz/networking /log-collector/ @anza-xyz/svm +/net-utils/ @anza-xyz/networking /program-runtime/ @anza-xyz/svm /programs/bpf_loader/ @anza-xyz/svm /programs/loader-v4/ @anza-xyz/svm /runtime-transaction/ @anza-xyz/tx-metadata +/svm-callback/ @anza-xyz/svm /svm-conformance/ @anza-xyz/svm /svm-transaction/ @anza-xyz/svm /svm/ @anza-xyz/svm -/svm/examples/Cargo.lock -/svm-callback/ @anza-xyz/svm +/tls-utils/ @anza-xyz/networking /transaction-context/ @anza-xyz/svm /transaction-view/ @anza-xyz/tx-metadata +/turbine/ @anza-xyz/networking +/xdp/ @anza-xyz/networking diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 95e3fb34445ee0..d4a244f1711830 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -5,12 +5,17 @@ version: 2 updates: -- package-ecosystem: cargo - directory: "/" - schedule: - interval: daily - time: "01:00" - timezone: America/Los_Angeles - #labels: - # - "automerge" - open-pull-requests-limit: 6 + - package-ecosystem: cargo + directory: "/" + schedule: + interval: daily + time: "01:00" + timezone: America/Los_Angeles + open-pull-requests-limit: 6 + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: daily + time: "01:00" + timezone: America/Los_Angeles + open-pull-requests-limit: 3 diff --git a/.github/label-actions.yml b/.github/label-actions.yml index 029ec96a2dc9fc..8a67d20c3de3ce 100644 --- a/.github/label-actions.yml +++ b/.github/label-actions.yml @@ -16,11 +16,11 @@ question: this is a bug with Solana itself, please post your question to the Solana Stack Exchange using this link: https://solana.stackexchange.com/questions/ask - + --- _This - [automated message](https://github.com/solana-labs/solana/blob/master/.github/label-actions.yml) + [automated message](https://github.com/anza-xyz/agave/blob/master/.github/label-actions.yml) is a result of having added the ‘question’ tag_. # Close the issue diff --git a/.github/workflows/add-team-to-ghsa.yml b/.github/workflows/add-team-to-ghsa.yml index 5e5f2f70881050..3a66048b732103 100644 --- a/.github/workflows/add-team-to-ghsa.yml +++ b/.github/workflows/add-team-to-ghsa.yml @@ -11,7 +11,7 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: ref: master - name: Run script diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 2cf9a5895ff347..0078e9c6788874 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -69,7 +69,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v5 - name: Before Command if: ${{ matrix.test.before_command != '' }} diff --git a/.github/workflows/cargo.yml b/.github/workflows/cargo.yml index 2a8b94ad2dd376..c4b3a7b644125a 100644 --- a/.github/workflows/cargo.yml +++ b/.github/workflows/cargo.yml @@ -54,7 +54,7 @@ jobs: apk update apk add bash git - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - uses: mozilla-actions/sccache-action@v0.0.9 with: diff --git a/.github/workflows/changelog-label.yml b/.github/workflows/changelog-label.yml index ffd8ec21033ef8..30c77cea8a2a13 100644 --- a/.github/workflows/changelog-label.yml +++ b/.github/workflows/changelog-label.yml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: fetch-depth: 0 - name: Check if changes to CHANGELOG.md diff --git a/.github/workflows/client-targets.yml b/.github/workflows/client-targets.yml index 1b875fed808a9d..a3ef05860a21f2 100644 --- a/.github/workflows/client-targets.yml +++ b/.github/workflows/client-targets.yml @@ -31,7 +31,7 @@ jobs: - armv7-linux-androideabi runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 # This can be removed once cargo-ndk >= 3.5.4 is used. - name: Setup environment for Android NDK @@ -61,7 +61,7 @@ jobs: - x86_64-apple-darwin runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - name: Setup Rust run: | diff --git a/.github/workflows/crate-check.yml b/.github/workflows/crate-check.yml index c8bbcf79a26496..8ed80637fd7617 100644 --- a/.github/workflows/crate-check.yml +++ b/.github/workflows/crate-check.yml @@ -18,7 +18,7 @@ jobs: if: github.repository == 'anza-xyz/agave' runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: fetch-depth: 0 diff --git a/.github/workflows/dependabot-pr.yml b/.github/workflows/dependabot-pr.yml index 70f7e939e35f25..1b06354332045e 100644 --- a/.github/workflows/dependabot-pr.yml +++ b/.github/workflows/dependabot-pr.yml @@ -12,7 +12,7 @@ jobs: if: github.triggering_actor == 'dependabot[bot]' steps: - name: checkout - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: ref: ${{ github.event.pull_request.head.ref }} token: ${{ secrets.PAT }} diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index f6bc0049614d05..26e456a8c7b45b 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -20,7 +20,7 @@ jobs: runs-on: ubuntu-22.04 steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: fetch-depth: 0 @@ -77,12 +77,12 @@ jobs: runs-on: ubuntu-22.04 steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v5 - name: Setup Node - uses: actions/setup-node@v4 + uses: actions/setup-node@v5 with: - node-version: 22 + node-version: 24 - name: Build working-directory: docs diff --git a/.github/workflows/downstream-project-anchor.yml b/.github/workflows/downstream-project-anchor.yml index 08afd74097e035..c50ae9d4c3d75a 100644 --- a/.github/workflows/downstream-project-anchor.yml +++ b/.github/workflows/downstream-project-anchor.yml @@ -45,7 +45,7 @@ jobs: matrix: version: ["master"] steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - shell: bash run: | diff --git a/.github/workflows/downstream-project-spl.yml b/.github/workflows/downstream-project-spl.yml index 3d4887cf817804..a9e3c667356ca6 100644 --- a/.github/workflows/downstream-project-spl.yml +++ b/.github/workflows/downstream-project-spl.yml @@ -38,7 +38,7 @@ env: jobs: check: - if: github.repository == 'anza-xyz/agave' + #if: github.repository == 'anza-xyz/agave' if: false runs-on: ubuntu-latest timeout-minutes: 60 @@ -57,7 +57,7 @@ jobs: # re-enable with https://github.com/buffalojoec/mollusk/pull/74 # - token steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - shell: bash run: | @@ -77,7 +77,7 @@ jobs: cargo check test_cli: - if: github.repository == 'anza-xyz/agave' + #if: github.repository == 'anza-xyz/agave' if: false runs-on: ubuntu-latest timeout-minutes: 60 @@ -87,7 +87,7 @@ jobs: - single-pool - token-2022 steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - shell: bash run: | @@ -108,7 +108,7 @@ jobs: cargo test --manifest-path clients/cli/Cargo.toml cargo-test-sbf: - if: github.repository == 'anza-xyz/agave' + #if: github.repository == 'anza-xyz/agave' if: false runs-on: ubuntu-latest timeout-minutes: 60 @@ -127,7 +127,7 @@ jobs: # re-enable with https://github.com/buffalojoec/mollusk/pull/74 # - token steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - shell: bash run: | diff --git a/.github/workflows/label-actions.yml b/.github/workflows/label-actions.yml new file mode 100644 index 00000000000000..7ccac26d559641 --- /dev/null +++ b/.github/workflows/label-actions.yml @@ -0,0 +1,15 @@ +name: "Issue Label Actions" + +on: + issues: + types: [labeled, unlabeled] + +permissions: + contents: read + issues: write + +jobs: + action: + runs-on: ubuntu-latest + steps: + - uses: dessant/label-actions@v4 diff --git a/.github/workflows/publish-windows-tarball.yml b/.github/workflows/publish-windows-tarball.yml index 42d93bee978f1a..6b92e1abb58d33 100644 --- a/.github/workflows/publish-windows-tarball.yml +++ b/.github/workflows/publish-windows-tarball.yml @@ -16,7 +16,7 @@ jobs: channel: ${{ steps.build.outputs.channel }} steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: ref: master fetch-depth: 0 @@ -85,7 +85,7 @@ jobs: path: ./windows-release - name: Setup crediential - uses: "google-github-actions/auth@v2" + uses: "google-github-actions/auth@v3" with: credentials_json: "${{ secrets.GCS_RELEASE_BUCKET_WRITER_CREDIENTIAL }}" diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 2a2cae1184d30c..18e6f2767f339f 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -11,7 +11,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Trigger a Buildkite Build - uses: "buildkite/trigger-pipeline-action@v2.2.0" + uses: "buildkite/trigger-pipeline-action@v2.3.0" with: buildkite_api_access_token: ${{ secrets.TRIGGER_BK_BUILD_TOKEN }} pipeline: "anza/agave-secondary" @@ -25,7 +25,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Create Release - uses: actions/github-script@v7 + uses: actions/github-script@v8 with: github-token: ${{ secrets.GITHUB_TOKEN }} script: | @@ -44,7 +44,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: fetch-depth: 0 token: ${{ secrets.VERSION_BUMP_PAT }} @@ -80,7 +80,7 @@ jobs: git push origin version-bump-$next_version - name: Create PR - uses: actions/github-script@v7 + uses: actions/github-script@v8 with: github-token: ${{ secrets.GITHUB_TOKEN }} script: | diff --git a/.github/workflows/svm-examples.yml b/.github/workflows/svm-examples.yml deleted file mode 100644 index d31007e53b42f8..00000000000000 --- a/.github/workflows/svm-examples.yml +++ /dev/null @@ -1,52 +0,0 @@ -name: SVM examples test - -on: - push: - branches: - - master - - v[0-9]+.[0-9]+ - pull_request: - branches: - - master - - v[0-9]+.[0-9]+ - paths: - - "**.rs" - - "Cargo.toml" - - "Cargo.lock" - -concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} - cancel-in-progress: true - -env: - SHELL: /bin/bash - SCCACHE_GHA_ENABLED: "true" - RUSTC_WRAPPER: "sccache" - -jobs: - test: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - - shell: bash - run: | - .github/scripts/purge-ubuntu-runner.sh - - - uses: mozilla-actions/sccache-action@v0.0.9 - with: - version: "v0.10.0" - - - shell: bash - run: | - source .github/scripts/downstream-project-spl-install-deps.sh - - - name: Run build - run: | - cd svm/examples - cargo build - - - name: Run tests - run: | - cd svm/examples - cargo test \ No newline at end of file diff --git a/.github/workflows/verify-packets.yml b/.github/workflows/verify-packets.yml index 56e0d49620adf7..22a62c24d63635 100644 --- a/.github/workflows/verify-packets.yml +++ b/.github/workflows/verify-packets.yml @@ -20,7 +20,7 @@ jobs: runs-on: ubuntu-22.04 steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v5 - name: Install required packages run: | diff --git a/.mergify.yml b/.mergify.yml index eaea8b75178b41..0c1c5ab8e7b4ea 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -104,9 +104,9 @@ pull_request_rules: - automerge comment: message: automerge label removed due to a CI failure - - name: v2.2 feature-gate backport + - name: v2.3 feature-gate backport conditions: - - label=v2.2 + - label=v2.3 - label=feature-gate actions: backport: @@ -117,10 +117,10 @@ pull_request_rules: labels: - feature-gate branches: - - v2.2 - - name: v2.2 non-feature-gate backport + - v2.3 + - name: v2.3 non-feature-gate backport conditions: - - label=v2.2 + - label=v2.3 - label!=feature-gate actions: backport: @@ -128,10 +128,10 @@ pull_request_rules: title: "{{ destination_branch }}: {{ title }} (backport of #{{ number }})" ignore_conflicts: true branches: - - v2.2 - - name: v2.2 backport warning comment + - v2.3 + - name: v2.3 backport warning comment conditions: - - label=v2.2 + - label=v2.3 actions: comment: message: > @@ -142,9 +142,9 @@ pull_request_rules: refactoring, plumbing, cleanup, etc that are not strictly necessary to achieve the goal. Any of the latter should go only into master and ride the normal stabilization schedule. - - name: v2.3 feature-gate backport + - name: v3.0 feature-gate backport conditions: - - label=v2.3 + - label=v3.0 - label=feature-gate actions: backport: @@ -154,10 +154,10 @@ pull_request_rules: labels: - feature-gate branches: - - v2.3 - - name: v2.3 non-feature-gate backport + - v3.0 + - name: v3.0 non-feature-gate backport conditions: - - label=v2.3 + - label=v3.0 - label!=feature-gate actions: backport: @@ -165,10 +165,10 @@ pull_request_rules: title: "{{ destination_branch }}: {{ title }} (backport of #{{ number }})" ignore_conflicts: true branches: - - v2.3 - - name: v2.3 backport warning comment + - v3.0 + - name: v3.0 backport warning comment conditions: - - label=v2.3 + - label=v3.0 actions: comment: message: > @@ -217,20 +217,16 @@ pull_request_rules: If this PR represents a change to a native program implementation (not tests), please include a reviewer from the Firedancer team. And please keep refactors to a minimum. - - name: Notify about future move of zk-keygen, zk-sdk, and zk-token-sdk + - name: Notify about the deprecation of zk-token-sdk conditions: - or: - - files~=^zk-keygen/ - - files~=^zk-sdk/ - files~=^zk-token-sdk/ actions: comment: message: | - For your information, the `zk-keygen` and `zk-sdk` directories are - scheduled to be relocated to `solana-program/zk-elgamal-proof` in a - separate repository. Additionally, the `zk-token-sdk` directory will - be removed. Please take these upcoming changes into account when - making modifications. + For your information, the `solana-zk-token-sdk` is deprecated, and this + directory will be removed in a future version. Please take this in mind + when making modifications. commands_restrictions: # The author of copied PRs is the Mergify user. diff --git a/CHANGELOG.md b/CHANGELOG.md index 8ccdbc4a78c3b2..ed790de50154d5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,15 +5,24 @@ All notable changes to this project will be documented in this file. Please follow the [guidance](#adding-to-this-changelog) at the bottom of this file when making changes The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html) -and follows a [Backwards Compatibility Policy](https://docs.solanalabs.com/backwards-compatibility) +and follows a [Backwards Compatibility Policy](https://docs.anza.xyz/backwards-compatibility) Release channels have their own copy of this changelog: -* [edge - v3.0](#edge-channel) -* [beta - v2.3](https://github.com/anza-xyz/agave/blob/v2.3/CHANGELOG.md) -* [stable - v2.2](https://github.com/anza-xyz/agave/blob/v2.2/CHANGELOG.md) +* [edge - v3.1](#edge-channel) +* [beta - v3.0](https://github.com/anza-xyz/agave/blob/v3.0/CHANGELOG.md) +* [stable - v2.3](https://github.com/anza-xyz/agave/blob/v2.3/CHANGELOG.md) -## 3.0.0 - Unreleased +## 3.1.0—Unreleased +### RPC +#### Breaking +#### Changes +### Validator +#### Breaking +#### Deprecations +* The `--monitor` flag with `agave-validator exit` is now deprecated. Operators can use the `monitor` command after `exit` instead. + +## 3.0.0 ### RPC @@ -40,10 +49,14 @@ Release channels have their own copy of this changelog: * `--skip-poh-verify` * Deprecated snapshot archive formats have been removed and are no longer loadable. * Using `--snapshot-interval-slots 0` to disable generating snapshots has been removed. Use `--no-snapshots` instead. +* Validator will now bind all ports within provided `--dynamic-port-range`, including the client ports. A range of at least 25 ports is recommended to avoid failures to bind during startup. +* Agave and agave-ledger-tool can no longer operate with legacy shreds. Legacy shreds have not been in circulation since the activation of https://explorer.solana.com/address/GV49KKQdBNaiv2pgqhS2Dy3GWYJGXMTVYbYkdk91orRy. This change may break operations with old ledgers that may still contain legacy shreds. #### Changes * `--transaction-structure view` is now the default. * The default full snapshot interval is now 100,000 slots. +* `SOLANA_BANKING_THREADS` environment variable is no longer supported. Use `--block-prouduction-num-workers` instead. +* By default, `agave-validator exit` will now wait for the validator process to terminate before returning. The `--wait-for-exit` flag has been deprecated, but operators can still opt out with the new `--no-wait-for-exit` flag. ## 2.3.0 diff --git a/Cargo.lock b/Cargo.lock index 09b1776bc16672..a9a5ae57c1d373 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -43,7 +43,7 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" dependencies = [ - "cfg-if 1.0.1", + "cfg-if 1.0.3", "cipher", "cpufeatures", ] @@ -69,7 +69,7 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "763e484feceb7dd021b21c5c6f81aee06b1594a743455ec7efbf72e6355e447b" dependencies = [ - "cfg-if 1.0.1", + "cfg-if 1.0.3", "errno", "libc", "num_cpus", @@ -77,7 +77,7 @@ dependencies = [ [[package]] name = "agave-banking-stage-ingress-types" -version = "3.0.0" +version = "3.1.0" dependencies = [ "crossbeam-channel", "solana-perf", @@ -85,7 +85,7 @@ dependencies = [ [[package]] name = "agave-cargo-registry" -version = "3.0.0" +version = "3.1.0" dependencies = [ "clap 2.33.3", "flate2", @@ -117,7 +117,7 @@ dependencies = [ [[package]] name = "agave-feature-set" -version = "3.0.0" +version = "3.1.0" dependencies = [ "ahash 0.8.11", "solana-epoch-schedule", @@ -131,7 +131,7 @@ dependencies = [ [[package]] name = "agave-geyser-plugin-interface" -version = "3.0.0" +version = "3.1.0" dependencies = [ "log", "solana-clock", @@ -139,34 +139,33 @@ dependencies = [ "solana-signature", "solana-transaction", "solana-transaction-status", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "agave-install" -version = "3.0.0" +version = "3.1.0" dependencies = [ "atty", "bincode", "bzip2", "chrono", "clap 2.33.3", - "console 0.16.0", + "console 0.16.1", "crossbeam-channel", "ctrlc", "dirs-next", "indicatif 0.18.0", "nix", - "reqwest 0.12.22", + "reqwest 0.12.23", "scopeguard", - "semver 1.0.26", + "semver 1.0.27", "serde", "serde_derive", "serde_yaml 0.8.26", "serde_yaml 0.9.34+deprecated", "solana-clap-utils", "solana-config-interface", - "solana-config-program-client", "solana-hash", "solana-keypair", "solana-logger", @@ -180,14 +179,14 @@ dependencies = [ "solana-version", "tar", "tempfile", - "url 2.5.4", + "url 2.5.7", "winapi 0.3.9", "winreg", ] [[package]] name = "agave-io-uring" -version = "3.0.0" +version = "3.1.0" dependencies = [ "io-uring", "libc", @@ -198,7 +197,7 @@ dependencies = [ [[package]] name = "agave-ledger-tool" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "agave-reserved-account-keys", @@ -230,6 +229,7 @@ dependencies = [ "solana-clap-utils", "solana-cli-output", "solana-clock", + "solana-cluster-type", "solana-compute-budget", "solana-core", "solana-cost-model", @@ -244,7 +244,6 @@ dependencies = [ "solana-keypair", "solana-ledger", "solana-loader-v3-interface", - "solana-log-collector", "solana-logger", "solana-measure", "solana-message", @@ -265,29 +264,34 @@ dependencies = [ "solana-streamer", "solana-svm-callback", "solana-svm-feature-set", + "solana-svm-log-collector", + "solana-svm-type-overrides", "solana-system-interface", "solana-transaction", "solana-transaction-context", "solana-transaction-status", - "solana-type-overrides", "solana-unified-scheduler-pool", "solana-version", "solana-vote", "solana-vote-program", - "thiserror 2.0.12", + "thiserror 2.0.16", "tikv-jemallocator", "tokio", ] +[[package]] +name = "agave-low-pass-filter" +version = "3.1.0" + [[package]] name = "agave-precompiles" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "bincode", "bytemuck", "digest 0.10.7", - "ed25519-dalek", + "ed25519-dalek 1.0.1", "hex", "libsecp256k1", "openssl", @@ -307,7 +311,7 @@ dependencies = [ [[package]] name = "agave-reserved-account-keys" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "solana-frozen-abi", @@ -318,9 +322,13 @@ dependencies = [ "solana-sysvar", ] +[[package]] +name = "agave-scheduler-bindings" +version = "3.1.0" + [[package]] name = "agave-store-histogram" -version = "3.0.0" +version = "3.1.0" dependencies = [ "clap 2.33.3", "solana-version", @@ -328,7 +336,7 @@ dependencies = [ [[package]] name = "agave-store-tool" -version = "3.0.0" +version = "3.1.0" dependencies = [ "ahash 0.8.11", "clap 2.33.3", @@ -342,7 +350,7 @@ dependencies = [ [[package]] name = "agave-syscalls" -version = "3.0.0" +version = "3.1.0" dependencies = [ "assert_matches", "bincode", @@ -355,7 +363,7 @@ dependencies = [ "solana-bn254", "solana-clock", "solana-cpi", - "solana-curve25519 3.0.0", + "solana-curve25519 3.1.0", "solana-epoch-rewards", "solana-epoch-schedule", "solana-fee-calculator", @@ -364,8 +372,6 @@ dependencies = [ "solana-keccak-hasher", "solana-last-restart-slot", "solana-loader-v3-interface", - "solana-log-collector", - "solana-measure", "solana-poseidon", "solana-program", "solana-program-entrypoint", @@ -378,27 +384,31 @@ dependencies = [ "solana-sha256-hasher", "solana-slot-hashes", "solana-stable-layout", + "solana-stake-interface", "solana-svm-callback", "solana-svm-feature-set", + "solana-svm-log-collector", + "solana-svm-measure", + "solana-svm-timings", + "solana-svm-type-overrides", "solana-sysvar", "solana-sysvar-id", - "solana-timings", "solana-transaction-context", - "solana-type-overrides", + "static_assertions", "test-case", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "agave-thread-manager" -version = "3.0.0" +version = "3.1.0" dependencies = [ "affinity", "agave-thread-manager", "anyhow", "axum 0.7.9", - "cfg-if 1.0.1", - "env_logger 0.11.8", + "cfg-if 1.0.3", + "env_logger", "hyper 0.14.32", "log", "num_cpus", @@ -414,7 +424,7 @@ dependencies = [ [[package]] name = "agave-transaction-view" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-transaction-view", "bincode", @@ -436,13 +446,13 @@ dependencies = [ [[package]] name = "agave-validator" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-geyser-plugin-interface", "assert_cmd", "chrono", "clap 2.33.3", - "console 0.16.0", + "console 0.16.1", "core_affinity", "crossbeam-channel", "fd-lock", @@ -458,8 +468,10 @@ dependencies = [ "num_cpus", "predicates", "pretty_assertions", + "qualifier_attr", "rand 0.8.5", "rayon", + "scopeguard", "serde", "serde_json", "serde_yaml 0.9.34+deprecated", @@ -520,23 +532,69 @@ dependencies = [ "symlink", "tempfile", "test-case", - "thiserror 2.0.12", + "thiserror 2.0.16", "tikv-jemallocator", "tokio", ] [[package]] name = "agave-verified-packet-receiver" -version = "3.0.0" +version = "3.1.0" dependencies = [ "assert_matches", "solana-perf", "solana-streamer", ] +[[package]] +name = "agave-votor" +version = "3.1.0" +dependencies = [ + "anyhow", + "bincode", + "bitvec", + "bs58", + "crossbeam-channel", + "dashmap", + "itertools 0.12.1", + "log", + "parking_lot 0.12.3", + "qualifier_attr", + "rayon", + "serde", + "serde_bytes", + "serde_derive", + "solana-accounts-db", + "solana-bloom", + "solana-bls-signatures", + "solana-clock", + "solana-entry", + "solana-epoch-schedule", + "solana-frozen-abi", + "solana-frozen-abi-macro", + "solana-gossip", + "solana-hash", + "solana-keypair", + "solana-ledger", + "solana-logger", + "solana-measure", + "solana-metrics", + "solana-pubkey", + "solana-rpc", + "solana-runtime", + "solana-signature", + "solana-signer", + "solana-signer-store", + "solana-time-utils", + "solana-transaction", + "solana-votor-messages", + "test-case", + "thiserror 2.0.16", +] + [[package]] name = "agave-watchtower" -version = "3.0.0" +version = "3.1.0" dependencies = [ "clap 2.33.3", "humantime", @@ -557,14 +615,14 @@ dependencies = [ [[package]] name = "agave-xdp" -version = "3.0.0" +version = "3.1.0" dependencies = [ "aya", "caps", "crossbeam-channel", "libc", "log", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] @@ -584,7 +642,7 @@ version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ - "cfg-if 1.0.1", + "cfg-if 1.0.3", "getrandom 0.2.15", "once_cell", "version_check", @@ -630,12 +688,6 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" -[[package]] -name = "android-tzdata" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" - [[package]] name = "android_system_properties" version = "0.1.4" @@ -711,9 +763,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.98" +version = "1.0.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" +checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" [[package]] name = "aquamarine" @@ -726,14 +778,14 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.106", ] [[package]] name = "arbitrary" -version = "1.4.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dde20b3d026af13f561bdd0f15edf01fc734f0dafcedbaf42bba506a9517f223" +checksum = "c3d036a3c4ab069c7b410a2ce876bd74808d2d0888a82667669f8e783a898bf1" dependencies = [ "derive_arbitrary", ] @@ -944,17 +996,6 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfdc70193dadb9d7287fa4b633f15f90c876915b31f6af17da307fc59c9859a8" -[[package]] -name = "async-channel" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" -dependencies = [ - "concurrent-queue", - "event-listener 2.5.2", - "futures-core", -] - [[package]] name = "async-compression" version = "0.4.1" @@ -975,7 +1016,7 @@ version = "3.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5fd03604047cee9b6ce9de9f70c6cd540a0520c813cbd49bae61f33ab80ed1dc" dependencies = [ - "event-listener 5.3.1", + "event-listener", "event-listener-strategy", "pin-project-lite", ] @@ -1003,13 +1044,13 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.88" +version = "0.1.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" +checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.106", ] [[package]] @@ -1056,7 +1097,7 @@ dependencies = [ "matchit", "memchr", "mime", - "percent-encoding 2.3.1", + "percent-encoding 2.3.2", "pin-project-lite", "rustversion", "serde", @@ -1085,7 +1126,7 @@ dependencies = [ "matchit", "memchr", "mime", - "percent-encoding 2.3.1", + "percent-encoding 2.3.2", "pin-project-lite", "rustversion", "serde", @@ -1146,7 +1187,7 @@ checksum = "d18bc4e506fbb85ab7392ed993a7db4d1a452c71b75a246af4a80ab8c9d2dd50" dependencies = [ "assert_matches", "aya-obj", - "bitflags 2.9.1", + "bitflags 2.9.4", "bytes", "libc", "log", @@ -1191,13 +1232,19 @@ checksum = "4319208da049c43661739c5fade2ba182f09d1dc2299b32298d3a31692b17e12" dependencies = [ "addr2line", "cc", - "cfg-if 1.0.1", + "cfg-if 1.0.3", "libc", "miniz_oxide", "object 0.31.1", "rustc-demangle", ] +[[package]] +name = "base16ct" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" + [[package]] name = "base64" version = "0.12.3" @@ -1222,6 +1269,12 @@ version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" +[[package]] +name = "base64ct" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55248b47b0caf0546f7988906588779981c43bb1bc9d0c44087278f80cdb44ba" + [[package]] name = "bencher" version = "0.1.5" @@ -1243,7 +1296,7 @@ version = "0.69.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.9.4", "cexpr", "clang-sys", "itertools 0.12.1", @@ -1254,7 +1307,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.104", + "syn 2.0.106", ] [[package]] @@ -1280,9 +1333,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.9.1" +version = "2.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" +checksum = "2261d10cca569e4643e526d8dc2e62e433cc8aba21ab764233731f8d369bf394" dependencies = [ "serde", ] @@ -1304,6 +1357,7 @@ checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" dependencies = [ "funty", "radium", + "serde", "tap", "wyz", ] @@ -1317,7 +1371,7 @@ dependencies = [ "arrayref", "arrayvec", "cc", - "cfg-if 1.0.1", + "cfg-if 1.0.3", "constant_time_eq", "digest 0.10.7", ] @@ -1362,36 +1416,41 @@ dependencies = [ ] [[package]] -name = "borsh" -version = "0.10.3" +name = "blst" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4114279215a005bc675e386011e594e1d9b800918cea18fcadadcce864a2046b" +checksum = "4fd49896f12ac9b6dcd7a5998466b9b58263a695a3dd1ecc1aaca2e12a90b080" dependencies = [ - "borsh-derive 0.10.3", - "hashbrown 0.12.3", + "cc", + "glob", + "threadpool", + "zeroize", ] [[package]] -name = "borsh" -version = "1.5.7" +name = "blstrs" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad8646f98db542e39fc66e68a20b2144f6a732636df7c2354e74645faaa433ce" +checksum = "7a8a8ed6fefbeef4a8c7b460e4110e12c5e22a5b7cf32621aae6ad650c4dcf29" dependencies = [ - "borsh-derive 1.5.7", - "cfg_aliases", + "blst", + "byte-slice-cast", + "ff", + "group", + "pairing", + "rand_core 0.6.4", + "serde", + "subtle", ] [[package]] -name = "borsh-derive" -version = "0.10.3" +name = "borsh" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0754613691538d51f329cce9af41d7b7ca150bc973056f1156611489475f54f7" +checksum = "ad8646f98db542e39fc66e68a20b2144f6a732636df7c2354e74645faaa433ce" dependencies = [ - "borsh-derive-internal", - "borsh-schema-derive-internal", - "proc-macro-crate 0.1.5", - "proc-macro2", - "syn 1.0.109", + "borsh-derive", + "cfg_aliases", ] [[package]] @@ -1404,29 +1463,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.104", -] - -[[package]] -name = "borsh-derive-internal" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afb438156919598d2c7bad7e1c0adf3d26ed3840dbc010db1a882a65583ca2fb" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "borsh-schema-derive-internal" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634205cc43f74a1b9046ef87c4540ebda95696ec0f315024860cad7c5b0f5ccd" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", + "syn 2.0.106", ] [[package]] @@ -1503,39 +1540,35 @@ dependencies = [ ] [[package]] -name = "byte-tools" -version = "0.3.1" +name = "byte-slice-cast" +version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" +checksum = "7575182f7272186991736b70173b0ea045398f984bf5ebbb3804736ce1330c9d" [[package]] -name = "byte-unit" -version = "4.0.19" +name = "byte-tools" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da78b32057b8fdfc352504708feeba7216dcd65a2c9ab02978cbd288d1279b6c" -dependencies = [ - "serde", - "utf8-width", -] +checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" [[package]] name = "bytemuck" -version = "1.23.1" +version = "1.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c76a5792e44e4abe34d3abf15636779261d45a7450612059293d1d2cfc63422" +checksum = "3995eaeebcdf32f91f980d360f78732ddc061097ab4e39991ae7a6ace9194677" dependencies = [ "bytemuck_derive", ] [[package]] name = "bytemuck_derive" -version = "1.10.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "441473f2b4b0459a68628c744bc61d23e730fb00128b841d30fa4bb3972257e4" +checksum = "4f154e572231cb6ba2bd1176980827e3d5dc04cc183a75dea38109fbdd672d29" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.106", ] [[package]] @@ -1625,7 +1658,7 @@ checksum = "eee4243f1f26fc7a42710e7439c149e2b10b05472f88090acce52632f231a73a" dependencies = [ "camino", "cargo-platform", - "semver 1.0.26", + "semver 1.0.27", "serde", "serde_json", "thiserror 1.0.69", @@ -1680,9 +1713,9 @@ checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" [[package]] name = "cfg-if" -version = "1.0.1" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" +checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9" [[package]] name = "cfg_aliases" @@ -1698,16 +1731,15 @@ checksum = "45565fc9416b9896014f5732ac776f810ee53a66730c17e4020c3ec064a8f88f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.106", ] [[package]] name = "chrono" -version = "0.4.41" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d" +checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" dependencies = [ - "android-tzdata", "iana-time-zone", "js-sys", "num-traits", @@ -1848,7 +1880,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.106", ] [[package]] @@ -1925,36 +1957,22 @@ dependencies = [ [[package]] name = "console" -version = "0.16.0" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e09ced7ebbccb63b4c65413d821f2e00ce54c5ca4514ddc6b3c892fdbcbc69d" +checksum = "b430743a6eb14e9764d4260d4c0d8123087d504eeb9c48f2b2a5e810dd369df4" dependencies = [ "encode_unicode", "libc", "once_cell", "unicode-width 0.2.0", - "windows-sys 0.60.2", -] - -[[package]] -name = "console_error_panic_hook" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a06aeb73f470f66dcdbf7223caeebb85984942f22f1adb2a088cf9668146bbbc" -dependencies = [ - "cfg-if 1.0.1", - "wasm-bindgen", + "windows-sys 0.61.0", ] [[package]] -name = "console_log" -version = "0.2.2" +name = "const-oid" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89f72f65e8501878b8a004d5a1afb780987e2ce2b4532c562e367a72c57499f" -dependencies = [ - "log", - "web-sys", -] +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "const_format" @@ -2059,7 +2077,7 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" dependencies = [ - "cfg-if 1.0.1", + "cfg-if 1.0.3", ] [[package]] @@ -2126,7 +2144,7 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" dependencies = [ - "cfg-if 1.0.1", + "cfg-if 1.0.3", "crossbeam-epoch", "crossbeam-utils", ] @@ -2136,7 +2154,7 @@ name = "crossbeam-epoch" version = "0.9.5" source = "git+https://github.com/anza-xyz/crossbeam?rev=fd279d707025f0e60951e429bf778b4813d1b6bf#fd279d707025f0e60951e429bf778b4813d1b6bf" dependencies = [ - "cfg-if 1.0.1", + "cfg-if 1.0.3", "crossbeam-utils", "lazy_static", "memoffset 0.6.4", @@ -2149,7 +2167,7 @@ version = "0.8.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3a430a770ebd84726f584a90ee7f020d28db52c6d02138900f22341f866d39c" dependencies = [ - "cfg-if 1.0.1", + "cfg-if 1.0.3", ] [[package]] @@ -2158,6 +2176,18 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" +[[package]] +name = "crypto-bigint" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" +dependencies = [ + "generic-array 0.14.7", + "rand_core 0.6.4", + "subtle", + "zeroize", +] + [[package]] name = "crypto-common" version = "0.1.6" @@ -2211,12 +2241,13 @@ dependencies = [ [[package]] name = "ctrlc" -version = "3.4.7" +version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46f93780a459b7d656ef7f071fe699c4d3d2cb201c4b24d085b6ddc505276e73" +checksum = "881c5d0a13b2f1498e2306e82cbada78390e152d4b1378fb28a84f4dcd0dc4f3" dependencies = [ + "dispatch", "nix", - "windows-sys 0.59.0", + "windows-sys 0.61.0", ] [[package]] @@ -2238,7 +2269,7 @@ version = "4.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" dependencies = [ - "cfg-if 1.0.1", + "cfg-if 1.0.3", "cpufeatures", "curve25519-dalek-derive", "digest 0.10.7", @@ -2258,14 +2289,14 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.106", ] [[package]] name = "darling" -version = "0.20.1" +version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0558d22a7b463ed0241e993f76f09f30b126687447751a8638587b864e4b3944" +checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0" dependencies = [ "darling_core", "darling_macro", @@ -2273,27 +2304,27 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.1" +version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab8bfa2e259f8ee1ce5e97824a3c55ec4404a0d772ca7fa96bf19f0752a046eb" +checksum = "1247195ecd7e3c85f83c8d2a366e4210d588e802133e1e355180a9870b517ea4" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", - "strsim 0.10.0", - "syn 2.0.104", + "strsim 0.11.1", + "syn 2.0.106", ] [[package]] name = "darling_macro" -version = "0.20.1" +version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" +checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ "darling_core", "quote", - "syn 2.0.104", + "syn 2.0.106", ] [[package]] @@ -2302,7 +2333,7 @@ version = "5.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" dependencies = [ - "cfg-if 1.0.1", + "cfg-if 1.0.3", "hashbrown 0.14.3", "lock_api", "once_cell", @@ -2317,6 +2348,16 @@ version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" +[[package]] +name = "der" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7c1832837b905bbfb5101e07cc24c8deddf52f93225eee6ead5f4d63d53ddcb" +dependencies = [ + "const-oid", + "zeroize", +] + [[package]] name = "der-parser" version = "8.1.0" @@ -2361,13 +2402,13 @@ dependencies = [ [[package]] name = "derive-where" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "510c292c8cf384b1a340b816a9a6cf2599eb8f566a44949024af88418000c50b" +checksum = "ef941ded77d15ca19b40374869ac6000af1c9f2a4c0f3d4c70926287e6364a8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.106", ] [[package]] @@ -2378,7 +2419,7 @@ checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.106", ] [[package]] @@ -2412,7 +2453,7 @@ dependencies = [ "convert_case 0.6.0", "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.106", "unicode-xid", ] @@ -2465,6 +2506,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer 0.10.4", + "const-oid", "crypto-common", "subtle", ] @@ -2484,7 +2526,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" dependencies = [ - "cfg-if 1.0.1", + "cfg-if 1.0.3", "dirs-sys-next", ] @@ -2499,6 +2541,12 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "dispatch" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd0c93bb4b0c6d9b77f4435b0ae98c24d17f1c45b2ff844c6151a07256ca923b" + [[package]] name = "displaydoc" version = "0.2.3" @@ -2530,7 +2578,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.106", ] [[package]] @@ -2557,13 +2605,37 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "abe71d579d1812060163dff96056261deb5bf6729b100fa2e36a68b9649ba3d3" +[[package]] +name = "ecdsa" +version = "0.16.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" +dependencies = [ + "der", + "digest 0.10.7", + "elliptic-curve", + "rfc6979", + "signature 2.2.0", + "spki", +] + [[package]] name = "ed25519" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4620d40f6d2601794401d6dd95a5cf69b6c157852539470eeda433a99b3c0efc" dependencies = [ - "signature", + "signature 1.4.0", +] + +[[package]] +name = "ed25519" +version = "2.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" +dependencies = [ + "pkcs8", + "signature 2.2.0", ] [[package]] @@ -2573,21 +2645,36 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" dependencies = [ "curve25519-dalek 3.2.0", - "ed25519", + "ed25519 1.2.0", "rand 0.7.3", "serde", "sha2 0.9.9", "zeroize", ] +[[package]] +name = "ed25519-dalek" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70e796c081cee67dc755e1a36a0a172b897fab85fc3f6bc48307991f64e4eca9" +dependencies = [ + "curve25519-dalek 4.1.3", + "ed25519 2.2.3", + "rand_core 0.6.4", + "serde", + "sha2 0.10.9", + "subtle", + "zeroize", +] + [[package]] name = "ed25519-dalek-bip32" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d2be62a4061b872c8c0873ee4fc6f101ce7b889d039f019c5fa2af471a59908" +checksum = "6b49a684b133c4980d7ee783936af771516011c8cd15f429dbda77245e282f03" dependencies = [ "derivation-path", - "ed25519-dalek", + "ed25519-dalek 2.2.0", "hmac 0.12.1", "sha2 0.10.9", ] @@ -2610,6 +2697,25 @@ version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2" +[[package]] +name = "elliptic-curve" +version = "0.13.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" +dependencies = [ + "base16ct", + "crypto-bigint", + "digest 0.10.7", + "ff", + "generic-array 0.14.7", + "group", + "pkcs8", + "rand_core 0.6.4", + "sec1", + "subtle", + "zeroize", +] + [[package]] name = "encode_unicode" version = "1.0.0" @@ -2622,7 +2728,7 @@ version = "0.8.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a74ea89a0a1b98f6332de42c95baff457ada66d1cb4030f9ff151b2041a1c746" dependencies = [ - "cfg-if 1.0.1", + "cfg-if 1.0.3", ] [[package]] @@ -2642,7 +2748,7 @@ checksum = "03cdc46ec28bd728e67540c528013c6a10eb69a02eb31078a1bda695438cbfb8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.106", ] [[package]] @@ -2668,19 +2774,6 @@ dependencies = [ "regex", ] -[[package]] -name = "env_logger" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" -dependencies = [ - "atty", - "humantime", - "log", - "regex", - "termcolor", -] - [[package]] name = "env_logger" version = "0.11.8" @@ -2710,28 +2803,6 @@ dependencies = [ "windows-sys 0.59.0", ] -[[package]] -name = "etcd-client" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4b0ea5ef6dc2388a4b1669fa32097249bc03a15417b97cb75e38afb309e4a89" -dependencies = [ - "http 0.2.12", - "prost", - "tokio", - "tokio-stream", - "tonic", - "tonic-build", - "tower 0.4.13", - "tower-service", -] - -[[package]] -name = "event-listener" -version = "2.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77f3309417938f28bf8228fcff79a4a37103981e3e186d2ccd19c74b38f4eb71" - [[package]] name = "event-listener" version = "5.3.1" @@ -2749,7 +2820,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" dependencies = [ - "event-listener 5.3.1", + "event-listener", "pin-project-lite", ] @@ -2770,14 +2841,14 @@ dependencies = [ [[package]] name = "fastbloom" -version = "0.9.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27cea6e7f512d43b098939ff4d5a5d6fe3db07971e1d05176fe26c642d33f5b8" +checksum = "18c1ddb9231d8554c2d6bdf4cfaabf0c59251658c68b6c95cd52dd0c513a912a" dependencies = [ "getrandom 0.3.3", + "libm", "rand 0.9.0", "siphasher 1.0.1", - "wide", ] [[package]] @@ -2792,7 +2863,7 @@ version = "3.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef033ed5e9bad94e55838ca0ca906db0e043f517adda0c8b79c7a8c66c93c1b5" dependencies = [ - "cfg-if 1.0.1", + "cfg-if 1.0.3", "rustix 0.38.39", "windows-sys 0.48.0", ] @@ -2803,6 +2874,17 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "835a3dc7d1ec9e75e2b5fb4ba75396837112d2060b03f7d43bc1897c7f7211da" +[[package]] +name = "ff" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0b50bfb653653f9ca9095b427bed08ab8d75a137839d9ad64eb11810d5b6393" +dependencies = [ + "bitvec", + "rand_core 0.6.4", + "subtle", +] + [[package]] name = "fiat-crypto" version = "0.2.9" @@ -2826,7 +2908,7 @@ version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "975ccf83d8d9d0d84682850a38c8169027be83368805971cc4f238c2b245bc98" dependencies = [ - "cfg-if 1.0.1", + "cfg-if 1.0.3", "libc", "redox_syscall 0.2.10", "winapi 0.3.9", @@ -2910,11 +2992,11 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" dependencies = [ - "percent-encoding 2.3.1", + "percent-encoding 2.3.2", ] [[package]] @@ -3004,7 +3086,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.106", ] [[package]] @@ -3061,7 +3143,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32c95766e0414f8bfc1d07055574c621b67739466d6ba516c4fef8e99d30d2e6" dependencies = [ "bitflags 1.3.2", - "cfg-if 1.0.1", + "cfg-if 1.0.3", "log", "managed", "num-traits", @@ -3070,7 +3152,7 @@ dependencies = [ [[package]] name = "gen-headers" -version = "3.0.0" +version = "3.1.0" dependencies = [ "log", "regex", @@ -3078,7 +3160,7 @@ dependencies = [ [[package]] name = "gen-syscall-list" -version = "3.0.0" +version = "3.1.0" dependencies = [ "regex", ] @@ -3090,7 +3172,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "186014d53bc231d0090ef8d6f03e0920c54d85a5ed22f4f2f74315ec56cf83fb" dependencies = [ "cc", - "cfg-if 1.0.1", + "cfg-if 1.0.3", "libc", "log", "rustversion", @@ -3114,6 +3196,7 @@ checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", + "zeroize", ] [[package]] @@ -3132,7 +3215,7 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if 1.0.1", + "cfg-if 1.0.3", "js-sys", "libc", "wasi 0.9.0+wasi-snapshot-preview1", @@ -3145,7 +3228,7 @@ version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ - "cfg-if 1.0.1", + "cfg-if 1.0.3", "js-sys", "libc", "wasi 0.11.0+wasi-snapshot-preview1", @@ -3158,7 +3241,7 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" dependencies = [ - "cfg-if 1.0.1", + "cfg-if 1.0.3", "js-sys", "libc", "r-efi", @@ -3216,7 +3299,7 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68a7f542ee6b35af73b06abc0dad1c1bae89964e4e253bc4b587b91c9637867b" dependencies = [ - "cfg-if 1.0.1", + "cfg-if 1.0.3", "dashmap", "futures 0.3.31", "futures-timer", @@ -3230,6 +3313,19 @@ dependencies = [ "spinning_top", ] +[[package]] +name = "group" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" +dependencies = [ + "ff", + "rand 0.8.5", + "rand_core 0.6.4", + "rand_xorshift 0.3.0", + "subtle", +] + [[package]] name = "h2" version = "0.3.26" @@ -3242,7 +3338,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.10.0", + "indexmap 2.11.4", "slab", "tokio", "tokio-util 0.7.16", @@ -3374,7 +3470,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "03b876ecf37e86b359573c16c8366bc3eba52b689884a0fc42ba3f67203d2a8b" dependencies = [ "cc", - "cfg-if 1.0.1", + "cfg-if 1.0.3", "libc", "pkg-config", "windows-sys 0.48.0", @@ -3486,9 +3582,9 @@ checksum = "6456b8a6c8f33fee7d958fcd1b60d55b11940a79e63ae87013e6d22e26034440" [[package]] name = "humantime" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b112acc8b3adf4b107a8ec20977da0273a8c386765a3ec0229bd500a1443f9f" +checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424" [[package]] name = "hxdmp" @@ -3582,7 +3678,8 @@ dependencies = [ "http 1.1.0", "hyper 1.6.0", "hyper-util", - "rustls 0.23.31", + "rustls 0.23.32", + "rustls-native-certs", "rustls-pki-types", "tokio", "tokio-rustls 0.26.2", @@ -3631,7 +3728,7 @@ dependencies = [ "hyper 1.6.0", "ipnet", "libc", - "percent-encoding 2.3.1", + "percent-encoding 2.3.2", "pin-project-lite", "socket2 0.5.10", "tokio", @@ -3767,7 +3864,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.106", ] [[package]] @@ -3789,9 +3886,9 @@ dependencies = [ [[package]] name = "idna" -version = "1.0.3" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" dependencies = [ "idna_adapter", "smallvec", @@ -3861,9 +3958,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.10.0" +version = "2.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661" +checksum = "4b0f83760fb341a774ed326568e19f5a863af4a952def8c39f9ab92fd95b88e5" dependencies = [ "equivalent", "hashbrown 0.15.1", @@ -3876,7 +3973,7 @@ version = "0.17.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4adb2ee6ad319a912210a36e56e3623555817bcc877a7e6e8802d1d69c4d8056" dependencies = [ - "console 0.16.0", + "console 0.16.1", "portable-atomic", "unicode-width 0.2.0", "unit-prefix", @@ -3889,7 +3986,7 @@ version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70a646d946d06bedbbc4cac4c218acf4bbf2d87757a784857025f4d447e4e1cd" dependencies = [ - "console 0.16.0", + "console 0.16.1", "portable-atomic", "unicode-width 0.2.0", "unit-prefix", @@ -3911,17 +4008,17 @@ version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ - "cfg-if 1.0.1", + "cfg-if 1.0.3", ] [[package]] name = "io-uring" -version = "0.7.9" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4" +checksum = "046fa2d4d00aea763528b4950358d0ead425372445dc8ff86312b3c69ff7727b" dependencies = [ - "bitflags 2.9.1", - "cfg-if 1.0.1", + "bitflags 2.9.4", + "cfg-if 1.0.3", "libc", ] @@ -4003,7 +4100,7 @@ checksum = "03343451ff899767262ec32146f6d559dd759fdadf42ff0e227c7c48f72594b4" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.106", ] [[package]] @@ -4013,7 +4110,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a87aa2bb7d2af34197c04845522473242e1aa17c12f4935d5856491a7fb8c97" dependencies = [ "cesu8", - "cfg-if 1.0.1", + "cfg-if 1.0.3", "combine 4.6.7", "jni-sys", "log", @@ -4039,9 +4136,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.77" +version = "0.3.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +checksum = "852f13bec5eba4ba9afbeb93fd7c13fe56147f055939ae21c43a29a0ecb2702e" dependencies = [ "once_cell", "wasm-bindgen", @@ -4179,13 +4276,17 @@ dependencies = [ ] [[package]] -name = "kaigan" -version = "0.2.6" +name = "k256" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ba15de5aeb137f0f65aa3bf82187647f1285abfe5b20c80c2c37f7007ad519a" +checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" dependencies = [ - "borsh 0.10.3", - "serde", + "cfg-if 1.0.3", + "ecdsa", + "elliptic-curve", + "once_cell", + "sha2 0.10.9", + "signature 2.2.0", ] [[package]] @@ -4230,9 +4331,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.174" +version = "0.2.175" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" +checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543" [[package]] name = "libloading" @@ -4240,7 +4341,7 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" dependencies = [ - "cfg-if 1.0.1", + "cfg-if 1.0.3", "winapi 0.3.9", ] @@ -4371,9 +4472,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.27" +version = "0.4.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" +checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" [[package]] name = "lru" @@ -4450,9 +4551,9 @@ dependencies = [ [[package]] name = "memmap2" -version = "0.9.7" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "483758ad303d734cec05e5c12b41d7e93e6a6390c5e9dae6bdeb7c1259012d28" +checksum = "843a98750cd611cc2965a8213b53b43e715f13c37a9e096c6408e69990961db7" dependencies = [ "libc", ] @@ -4531,7 +4632,7 @@ version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c84490118f2ee2d74570d114f3d0493cbf02790df303d2707606c3e14e07c96" dependencies = [ - "cfg-if 1.0.1", + "cfg-if 1.0.3", "downcast", "fragile", "lazy_static", @@ -4546,7 +4647,7 @@ version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ce75669015c4f47b289fd4d4f56e894e4c96003ffdf3ac51313126f94c6cbb" dependencies = [ - "cfg-if 1.0.1", + "cfg-if 1.0.3", "proc-macro2", "quote", "syn 1.0.109", @@ -4614,8 +4715,8 @@ version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74523f3a35e05aba87a1d978330aef40f67b0304ac79c1c00b294c9830543db6" dependencies = [ - "bitflags 2.9.1", - "cfg-if 1.0.1", + "bitflags 2.9.4", + "cfg-if 1.0.3", "cfg_aliases", "libc", "memoffset 0.9.1", @@ -4703,7 +4804,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.106", ] [[package]] @@ -4776,7 +4877,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.106", ] [[package]] @@ -4805,7 +4906,7 @@ checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" dependencies = [ "crc32fast", "hashbrown 0.15.1", - "indexmap 2.10.0", + "indexmap 2.11.4", "memchr", ] @@ -4848,8 +4949,8 @@ version = "0.10.73" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8505734d46c8ab1e19a1dce3aef597ad87dcb4c37e7188231769bd6bd51cebf8" dependencies = [ - "bitflags 2.9.1", - "cfg-if 1.0.1", + "bitflags 2.9.4", + "cfg-if 1.0.3", "foreign-types", "libc", "once_cell", @@ -4865,7 +4966,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.106", ] [[package]] @@ -4909,7 +5010,7 @@ dependencies = [ "futures-util", "js-sys", "lazy_static", - "percent-encoding 2.3.1", + "percent-encoding 2.3.2", "pin-project", "rand 0.8.5", "thiserror 1.0.69", @@ -4927,6 +5028,15 @@ version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c1b04fb49957986fdce4d6ee7a65027d55d4b6d2265e5848bbb507b58ccfdb6f" +[[package]] +name = "pairing" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81fec4625e73cf41ef4bb6846cafa6d44736525f442ba45e407c4a000a13996f" +dependencies = [ + "group", +] + [[package]] name = "parity-tokio-ipc" version = "0.9.0" @@ -4974,7 +5084,7 @@ version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" dependencies = [ - "cfg-if 1.0.1", + "cfg-if 1.0.3", "instant", "libc", "redox_syscall 0.2.10", @@ -4988,7 +5098,7 @@ version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" dependencies = [ - "cfg-if 1.0.1", + "cfg-if 1.0.3", "libc", "redox_syscall 0.3.5", "smallvec", @@ -5047,9 +5157,9 @@ checksum = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" [[package]] name = "percent-encoding" -version = "2.3.1" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" [[package]] name = "percentage" @@ -5155,6 +5265,16 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "pkcs8" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +dependencies = [ + "der", + "spki", +] + [[package]] name = "pkg-config" version = "0.3.22" @@ -5195,7 +5315,7 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25" dependencies = [ - "cfg-if 1.0.1", + "cfg-if 1.0.3", "cpufeatures", "opaque-debug 0.3.0", "universal-hash", @@ -5361,18 +5481,18 @@ dependencies = [ [[package]] name = "proptest" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fcdab19deb5195a31cf7726a210015ff1496ba1464fd42cb4f537b8b01b471f" +checksum = "2bb0be07becd10686a0bb407298fb425360a5c44a663774406340c59a22de4ce" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.9.1", + "bitflags 2.9.4", "lazy_static", "num-traits", "rand 0.9.0", "rand_chacha 0.9.0", - "rand_xorshift", + "rand_xorshift 0.4.0", "regex-syntax", "rusty-fork", "tempfile", @@ -5435,7 +5555,7 @@ dependencies = [ [[package]] name = "proto" -version = "3.0.0" +version = "3.1.0" dependencies = [ "protobuf-src", "tonic-build", @@ -5456,7 +5576,7 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d464fae65fff2680baf48019211ce37aaec0c78e9264c84a3e484717f965104e" dependencies = [ - "percent-encoding 2.3.1", + "percent-encoding 2.3.2", ] [[package]] @@ -5467,7 +5587,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.106", ] [[package]] @@ -5493,9 +5613,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quinn" -version = "0.11.8" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "626214629cda6781b6dc1d316ba307189c85ba657213ce642d9c77670f8202c8" +checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20" dependencies = [ "bytes", "cfg_aliases", @@ -5503,9 +5623,9 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.0.0", - "rustls 0.23.31", - "socket2 0.5.10", - "thiserror 2.0.12", + "rustls 0.23.32", + "socket2 0.6.0", + "thiserror 2.0.16", "tokio", "tracing", "web-time", @@ -5513,9 +5633,9 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.11.12" +version = "0.11.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49df843a9161c85bb8aae55f101bc0bac8bcafd637a620d9122fd7e0b2f7422e" +checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" dependencies = [ "bytes", "fastbloom", @@ -5524,11 +5644,11 @@ dependencies = [ "rand 0.9.0", "ring", "rustc-hash 2.0.0", - "rustls 0.23.31", + "rustls 0.23.32", "rustls-pki-types", "rustls-platform-verifier", "slab", - "thiserror 2.0.12", + "thiserror 2.0.16", "tinyvec", "tracing", "web-time", @@ -5706,6 +5826,15 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "rand_xorshift" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" +dependencies = [ + "rand_core 0.6.4", +] + [[package]] name = "rand_xorshift" version = "0.4.0" @@ -5730,14 +5859,14 @@ version = "11.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb9ee317cfe3fbd54b36a511efc1edd42e216903c9cd575e686dd68a2ba90d8d" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.9.4", ] [[package]] name = "rayon" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" +checksum = "368f01d005bf8fd9b1206fb6fa653e6c4a81ceb1466406b81792d87c5677a58f" dependencies = [ "either", "rayon-core", @@ -5745,9 +5874,9 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.12.1" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" +checksum = "22e18b0f0062d30d4230b2e85ff77fdfe4326feb054b9783a3460d8435c8ab91" dependencies = [ "crossbeam-deque", "crossbeam-utils", @@ -5755,7 +5884,7 @@ dependencies = [ [[package]] name = "rbpf-cli" -version = "3.0.0" +version = "3.1.0" [[package]] name = "rdrand" @@ -5811,9 +5940,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.11.1" +version = "1.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +checksum = "23d7fd106d8c02486a8d64e778353d1cffe08ce79ac2e82f540c86d0facf6912" dependencies = [ "aho-corasick 1.0.1", "memchr", @@ -5868,7 +5997,7 @@ dependencies = [ "mime", "native-tls", "once_cell", - "percent-encoding 2.3.1", + "percent-encoding 2.3.2", "pin-project-lite", "rustls 0.21.12", "rustls-pemfile", @@ -5882,7 +6011,7 @@ dependencies = [ "tokio-rustls 0.24.1", "tokio-util 0.7.16", "tower-service", - "url 2.5.4", + "url 2.5.7", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -5892,9 +6021,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.22" +version = "0.12.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbc931937e6ca3a06e3b6c0aa7841849b160a90351d6ab467a8b9b9959767531" +checksum = "d429f34c8092b2d42c7c93cec323bb4adeb7c67698f70839adec842ec10c7ceb" dependencies = [ "async-compression", "base64 0.22.1", @@ -5910,10 +6039,11 @@ dependencies = [ "hyper-util", "js-sys", "log", - "percent-encoding 2.3.1", + "percent-encoding 2.3.2", "pin-project-lite", "quinn", - "rustls 0.23.31", + "rustls 0.23.32", + "rustls-native-certs", "rustls-pki-types", "serde", "serde_json", @@ -5925,7 +6055,7 @@ dependencies = [ "tower 0.5.2", "tower-http", "tower-service", - "url 2.5.4", + "url 2.5.7", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -5941,12 +6071,22 @@ dependencies = [ "anyhow", "async-trait", "http 1.1.0", - "reqwest 0.12.22", + "reqwest 0.12.23", "serde", "thiserror 1.0.69", "tower-service", ] +[[package]] +name = "rfc6979" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" +dependencies = [ + "hmac 0.12.1", + "subtle", +] + [[package]] name = "ring" version = "0.17.14" @@ -5954,7 +6094,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", - "cfg-if 1.0.1", + "cfg-if 1.0.3", "getrandom 0.2.15", "libc", "untrusted", @@ -6034,7 +6174,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ - "semver 1.0.26", + "semver 1.0.27", ] [[package]] @@ -6052,7 +6192,7 @@ version = "0.38.39" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "375116bee2be9ed569afe2154ea6a99dfdffd257f533f187498c2a8f5feaf4ee" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.9.4", "errno", "libc", "linux-raw-sys 0.4.14", @@ -6065,7 +6205,7 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7178faa4b75a30e269c71e61c353ce2748cf3d76f0c44c393f4e60abf49b825" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.9.4", "errno", "libc", "linux-raw-sys 0.9.2", @@ -6086,14 +6226,14 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.31" +version = "0.23.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0ebcbd2f03de0fc1122ad9bb24b127a5a6cd51d72604a3f3c50ac459762b6cc" +checksum = "cd3c25631629d034ce7cd9940adc9d45762d46de2b0f57193c4443b92c6d4d40" dependencies = [ "once_cell", "ring", "rustls-pki-types", - "rustls-webpki 0.103.4", + "rustls-webpki 0.103.6", "subtle", "zeroize", ] @@ -6131,19 +6271,19 @@ dependencies = [ [[package]] name = "rustls-platform-verifier" -version = "0.5.1" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a5467026f437b4cb2a533865eaa73eb840019a0916f4b9ec563c6e617e086c9" +checksum = "be59af91596cac372a6942530653ad0c3a246cdd491aaa9dcaee47f88d67d5a0" dependencies = [ "core-foundation 0.10.0", "core-foundation-sys", "jni", "log", "once_cell", - "rustls 0.23.31", + "rustls 0.23.32", "rustls-native-certs", "rustls-platform-verifier-android", - "rustls-webpki 0.103.4", + "rustls-webpki 0.103.6", "security-framework 3.2.0", "security-framework-sys", "webpki-root-certs", @@ -6168,9 +6308,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.103.4" +version = "0.103.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a17884ae0c1b773f1ccd2bd4a8c72f16da897310a98b0e84bf349ad5ead92fc" +checksum = "8572f3c2cb9934231157b45499fc41e1f58c589fdfb81a844ba873265e80f8eb" dependencies = [ "ring", "rustls-pki-types", @@ -6201,15 +6341,6 @@ version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" -[[package]] -name = "safe_arch" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96b02de82ddbe1b636e6170c21be622223aea188ef2e139be0a5b219ec215323" -dependencies = [ - "bytemuck", -] - [[package]] name = "same-file" version = "1.0.6" @@ -6251,13 +6382,27 @@ dependencies = [ "untrusted", ] +[[package]] +name = "sec1" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" +dependencies = [ + "base16ct", + "der", + "generic-array 0.14.7", + "pkcs8", + "subtle", + "zeroize", +] + [[package]] name = "security-framework" version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.9.4", "core-foundation 0.9.4", "core-foundation-sys", "libc", @@ -6270,7 +6415,7 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.9.4", "core-foundation 0.10.0", "core-foundation-sys", "libc", @@ -6298,11 +6443,12 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.26" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" +checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" dependencies = [ "serde", + "serde_core", ] [[package]] @@ -6325,10 +6471,11 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.219" +version = "1.0.226" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +checksum = "0dca6411025b24b60bfa7ec1fe1f8e710ac09782dca409ee8237ba74b51295fd" dependencies = [ + "serde_core", "serde_derive", ] @@ -6343,34 +6490,45 @@ dependencies = [ [[package]] name = "serde_bytes" -version = "0.11.17" +version = "0.11.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8437fd221bde2d4ca316d61b90e337e9e702b3820b87d63caa9ba6c02bd06d96" +checksum = "a5d440709e79d88e51ac01c4b72fc6cb7314017bb7da9eeff678aa94c10e3ea8" dependencies = [ "serde", + "serde_core", +] + +[[package]] +name = "serde_core" +version = "1.0.226" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba2ba63999edb9dac981fb34b3e5c0d111a69b0924e253ed29d83f7c99e966a4" +dependencies = [ + "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.219" +version = "1.0.226" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +checksum = "8db53ae22f34573731bafa1db20f04027b2d25e02d8205921b569171699cdb33" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.106", ] [[package]] name = "serde_json" -version = "1.0.142" +version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "030fedb782600dcbd6f02d479bf0d817ac3bb40d644745b769d6a96bc3afc5a7" +checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" dependencies = [ "itoa", "memchr", "ryu", "serde", + "serde_core", ] [[package]] @@ -6406,9 +6564,9 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.14.0" +version = "3.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2c45cd61fefa9db6f254525d46e392b852e0e61d9a1fd36e5bd183450a556d5" +checksum = "c522100790450cf78eeac1507263d0a350d4d5b30df0c8e1fe051a10c22b376e" dependencies = [ "serde", "serde_derive", @@ -6417,14 +6575,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.14.0" +version = "3.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de90945e6565ce0d9a25098082ed4ee4002e047cb59892c318d66821e14bb30f" +checksum = "327ada00f7d64abaac1e55a6911e90cf665aa051b9a561c7006c157f4633135e" dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.106", ] [[package]] @@ -6445,7 +6603,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.10.0", + "indexmap 2.11.4", "itoa", "ryu", "serde", @@ -6474,7 +6632,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.106", ] [[package]] @@ -6496,7 +6654,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" dependencies = [ "block-buffer 0.9.0", - "cfg-if 1.0.1", + "cfg-if 1.0.3", "cpufeatures", "digest 0.9.0", "opaque-debug 0.3.0", @@ -6508,7 +6666,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" dependencies = [ - "cfg-if 1.0.1", + "cfg-if 1.0.3", "cpufeatures", "digest 0.10.7", ] @@ -6519,7 +6677,7 @@ version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ - "cfg-if 1.0.1", + "cfg-if 1.0.3", "cpufeatures", "digest 0.10.7", ] @@ -6531,7 +6689,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" dependencies = [ "block-buffer 0.9.0", - "cfg-if 1.0.1", + "cfg-if 1.0.3", "cpufeatures", "digest 0.9.0", "opaque-debug 0.3.0", @@ -6543,7 +6701,7 @@ version = "0.10.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" dependencies = [ - "cfg-if 1.0.1", + "cfg-if 1.0.3", "cpufeatures", "digest 0.10.7", ] @@ -6623,6 +6781,16 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02658e48d89f2bec991f9a78e69cfa4c316f8d6a6c4ec12fae1aeb263d486788" +[[package]] +name = "signature" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" +dependencies = [ + "digest 0.10.7", + "rand_core 0.6.4", +] + [[package]] name = "simpl" version = "0.1.0" @@ -6653,9 +6821,9 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04dc19736151f35336d325007ac991178d504a119863a2fcb3758cdb5e52c50d" +checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" [[package]] name = "smallvec" @@ -6716,9 +6884,9 @@ dependencies = [ [[package]] name = "solana-account" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f949fe4edaeaea78c844023bfc1c898e0b1f5a100f8a8d2d0f85d0a7b090258" +checksum = "f885ce7f937871ecb56aadbeaaec963b234a580b7d6ebbdb8fa4249a36f92433" dependencies = [ "bincode", "qualifier_attr", @@ -6729,7 +6897,7 @@ dependencies = [ "solana-clock", "solana-frozen-abi", "solana-frozen-abi-macro", - "solana-instruction", + "solana-instruction-error", "solana-logger", "solana-pubkey", "solana-sdk-ids", @@ -6738,7 +6906,7 @@ dependencies = [ [[package]] name = "solana-account-decoder" -version = "3.0.0" +version = "3.1.0" dependencies = [ "Inflector", "assert_matches", @@ -6753,7 +6921,7 @@ dependencies = [ "solana-account-decoder-client-types", "solana-address-lookup-table-interface", "solana-clock", - "solana-config-program-client", + "solana-config-interface", "solana-epoch-schedule", "solana-fee-calculator", "solana-hash", @@ -6776,13 +6944,13 @@ dependencies = [ "spl-token-group-interface", "spl-token-interface", "spl-token-metadata-interface", - "thiserror 2.0.12", + "thiserror 2.0.16", "zstd", ] [[package]] name = "solana-account-decoder-client-types" -version = "3.0.0" +version = "3.1.0" dependencies = [ "base64 0.22.1", "bs58", @@ -6796,9 +6964,9 @@ dependencies = [ [[package]] name = "solana-account-info" -version = "2.3.0" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8f5152a288ef1912300fc6efa6c2d1f9bb55d9398eb6c72326360b8063987da" +checksum = "82f4691b69b172c687d218dd2f1f23fc7ea5e9aa79df9ac26dab3d8dd829ce48" dependencies = [ "bincode", "serde", @@ -6809,7 +6977,7 @@ dependencies = [ [[package]] name = "solana-accounts-cluster-bench" -version = "3.0.0" +version = "3.1.0" dependencies = [ "clap 2.33.3", "log", @@ -6855,7 +7023,7 @@ dependencies = [ [[package]] name = "solana-accounts-db" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-io-uring", "agave-reserved-account-keys", @@ -6870,14 +7038,14 @@ dependencies = [ "criterion", "crossbeam-channel", "dashmap", - "indexmap 2.10.0", + "indexmap 2.11.4", "io-uring", "itertools 0.12.1", "libc", "libsecp256k1", "log", "lz4", - "memmap2 0.9.7", + "memmap2 0.9.8", "memoffset 0.9.1", "modular-bitfield", "num_cpus", @@ -6915,7 +7083,6 @@ dependencies = [ "solana-pubkey", "solana-rayon-threadlimit", "solana-rent", - "solana-rent-collector", "solana-reward-info", "solana-sdk-ids", "solana-sha256-hasher", @@ -6924,6 +7091,7 @@ dependencies = [ "solana-slot-hashes", "solana-slot-history", "solana-stake-program", + "solana-svm", "solana-svm-transaction", "solana-system-interface", "solana-sysvar", @@ -6939,15 +7107,40 @@ dependencies = [ "tar", "tempfile", "test-case", - "thiserror 2.0.12", + "thiserror 2.0.16", "tikv-jemallocator", ] +[[package]] +name = "solana-address" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a7a457086457ea9db9a5199d719dc8734dc2d0342fad0d8f77633c31eb62f19" +dependencies = [ + "arbitrary", + "borsh", + "bytemuck", + "bytemuck_derive", + "curve25519-dalek 4.1.3", + "five8", + "five8_const", + "rand 0.8.5", + "serde", + "serde_derive", + "solana-atomic-u64", + "solana-define-syscall 3.0.0", + "solana-frozen-abi", + "solana-frozen-abi-macro", + "solana-program-error", + "solana-sanitize", + "solana-sha256-hasher", +] + [[package]] name = "solana-address-lookup-table-interface" -version = "2.2.2" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1673f67efe870b64a65cb39e6194be5b26527691ce5922909939961a6e6b395" +checksum = "e2f56cac5e70517a2f27d05e5100b20de7182473ffd0035b23ea273307905987" dependencies = [ "bincode", "bytemuck", @@ -6955,6 +7148,7 @@ dependencies = [ "serde_derive", "solana-clock", "solana-instruction", + "solana-instruction-error", "solana-pubkey", "solana-sdk-ids", "solana-slot-hashes", @@ -6962,16 +7156,16 @@ dependencies = [ [[package]] name = "solana-atomic-u64" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52e52720efe60465b052b9e7445a01c17550666beec855cce66f44766697bc2" +checksum = "a933ff1e50aff72d02173cfcd7511bd8540b027ee720b75f353f594f834216d0" dependencies = [ "parking_lot 0.12.3", ] [[package]] name = "solana-banking-bench" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-banking-stage-ingress-types", "assert_matches", @@ -7007,9 +7201,9 @@ dependencies = [ [[package]] name = "solana-banks-client" -version = "3.0.0" +version = "3.1.0" dependencies = [ - "borsh 1.5.7", + "borsh", "futures 0.3.31", "solana-account", "solana-banks-interface", @@ -7030,14 +7224,14 @@ dependencies = [ "solana-transaction-context", "solana-transaction-error", "tarpc", - "thiserror 2.0.12", + "thiserror 2.0.16", "tokio", "tokio-serde", ] [[package]] name = "solana-banks-interface" -version = "3.0.0" +version = "3.1.0" dependencies = [ "serde", "serde_derive", @@ -7056,7 +7250,7 @@ dependencies = [ [[package]] name = "solana-banks-server" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "bincode", @@ -7084,7 +7278,7 @@ dependencies = [ [[package]] name = "solana-bench-streamer" -version = "3.0.0" +version = "3.1.0" dependencies = [ "clap 3.2.23", "crossbeam-channel", @@ -7096,7 +7290,7 @@ dependencies = [ [[package]] name = "solana-bench-tps" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "chrono", @@ -7157,13 +7351,13 @@ dependencies = [ "solana-version", "spl-instruction-padding-interface", "tempfile", - "thiserror 2.0.12", + "thiserror 2.0.16", "tikv-jemallocator", ] [[package]] name = "solana-bench-vote" -version = "3.0.0" +version = "3.1.0" dependencies = [ "bincode", "clap 2.33.3", @@ -7183,45 +7377,45 @@ dependencies = [ "solana-version", "solana-vote-program", "tikv-jemallocator", + "tokio-util 0.7.16", ] [[package]] name = "solana-big-mod-exp" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75db7f2bbac3e62cfd139065d15bcda9e2428883ba61fc8d27ccb251081e7567" +checksum = "30c80fb6d791b3925d5ec4bf23a7c169ef5090c013059ec3ed7d0b2c04efa085" dependencies = [ "num-bigint 0.4.6", "num-traits", - "solana-define-syscall", + "solana-define-syscall 3.0.0", ] [[package]] name = "solana-bincode" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19a3787b8cf9c9fe3dd360800e8b70982b9e5a8af9e11c354b6665dd4a003adc" +checksum = "534a37aecd21986089224d0c01006a75b96ac6fb2f418c24edc15baf0d2a4c99" dependencies = [ "bincode", "serde", - "solana-instruction", + "solana-instruction-error", ] [[package]] name = "solana-blake3-hasher" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1a0801e25a1b31a14494fc80882a036be0ffd290efc4c2d640bfcca120a4672" +checksum = "ffa2e3bdac3339c6d0423275e45dafc5ac25f4d43bf344d026a3cc9a85e244a6" dependencies = [ "blake3", - "solana-define-syscall", + "solana-define-syscall 3.0.0", "solana-hash", - "solana-sanitize", ] [[package]] name = "solana-bloom" -version = "3.0.0" +version = "3.1.0" dependencies = [ "bencher", "bv", @@ -7239,34 +7433,59 @@ dependencies = [ "solana-time-utils", ] +[[package]] +name = "solana-bls-signatures" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a40ce56d14f58c3ebe9275c3739c4052748ec5c4922854c12dc823dbf450ebd1" +dependencies = [ + "base64 0.22.1", + "blst", + "blstrs", + "bytemuck", + "cfg_eval", + "ff", + "group", + "pairing", + "rand 0.8.5", + "serde", + "serde_json", + "serde_with", + "solana-frozen-abi", + "solana-frozen-abi-macro", + "solana-signature", + "solana-signer", + "subtle", + "thiserror 2.0.16", +] + [[package]] name = "solana-bn254" -version = "2.2.2" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4420f125118732833f36facf96a27e7b78314b2d642ba07fa9ffdacd8d79e243" +checksum = "20a5f01e99addb316d95d4ed31aa6eacfda557fffc00ae316b919e8ba0fc5b91" dependencies = [ "ark-bn254", "ark-ec", "ark-ff", "ark-serialize", "bytemuck", - "solana-define-syscall", - "thiserror 2.0.12", + "solana-define-syscall 3.0.0", + "thiserror 2.0.16", ] [[package]] name = "solana-borsh" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "718333bcd0a1a7aed6655aa66bef8d7fb047944922b2d3a18f49cbc13e73d004" +checksum = "dc402b16657abbfa9991cd5cbfac5a11d809f7e7d28d3bb291baeb088b39060e" dependencies = [ - "borsh 0.10.3", - "borsh 1.5.7", + "borsh", ] [[package]] name = "solana-bpf-loader-program" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-syscalls", "assert_matches", @@ -7285,8 +7504,6 @@ dependencies = [ "solana-last-restart-slot", "solana-loader-v3-interface", "solana-loader-v4-interface", - "solana-log-collector", - "solana-measure", "solana-packet", "solana-program", "solana-program-entrypoint", @@ -7298,16 +7515,18 @@ dependencies = [ "solana-slot-hashes", "solana-svm-callback", "solana-svm-feature-set", + "solana-svm-log-collector", + "solana-svm-measure", + "solana-svm-type-overrides", "solana-system-interface", "solana-transaction-context", - "solana-type-overrides", "static_assertions", "test-case", ] [[package]] name = "solana-bpf-loader-program-tests" -version = "3.0.0" +version = "3.1.0" dependencies = [ "assert_matches", "bincode", @@ -7327,13 +7546,13 @@ dependencies = [ [[package]] name = "solana-bucket-map" -version = "3.0.0" +version = "3.1.0" dependencies = [ "bv", "bytemuck", "bytemuck_derive", "fs_extra", - "memmap2 0.9.7", + "memmap2 0.9.8", "modular-bitfield", "num_enum", "rand 0.8.5", @@ -7348,7 +7567,7 @@ dependencies = [ [[package]] name = "solana-builtins" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "solana-bpf-loader-program", @@ -7367,11 +7586,12 @@ dependencies = [ [[package]] name = "solana-builtins-default-costs" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "ahash 0.8.11", "log", + "qualifier_attr", "rand 0.8.5", "solana-bpf-loader-program", "solana-compute-budget-program", @@ -7387,7 +7607,7 @@ dependencies = [ [[package]] name = "solana-cargo-build-sbf" -version = "3.0.0" +version = "3.1.0" dependencies = [ "assert_cmd", "bzip2", @@ -7397,8 +7617,8 @@ dependencies = [ "log", "predicates", "regex", - "reqwest 0.12.22", - "semver 1.0.26", + "reqwest 0.12.23", + "semver 1.0.27", "serial_test", "solana-file-download", "solana-keypair", @@ -7408,7 +7628,7 @@ dependencies = [ [[package]] name = "solana-cargo-test-sbf" -version = "3.0.0" +version = "3.1.0" dependencies = [ "cargo_metadata", "clap 3.2.23", @@ -7420,7 +7640,7 @@ dependencies = [ [[package]] name = "solana-clap-utils" -version = "3.0.0" +version = "3.1.0" dependencies = [ "assert_matches", "chrono", @@ -7442,15 +7662,15 @@ dependencies = [ "solana-signer", "solana-system-interface", "tempfile", - "thiserror 2.0.12", + "thiserror 2.0.16", "tiny-bip39", "uriparse", - "url 2.5.4", + "url 2.5.7", ] [[package]] name = "solana-clap-v3-utils" -version = "3.0.0" +version = "3.1.0" dependencies = [ "assert_matches", "chrono", @@ -7472,17 +7692,17 @@ dependencies = [ "solana-signature", "solana-signer", "solana-system-interface", - "solana-zk-sdk 3.0.0", + "solana-zk-sdk", "tempfile", - "thiserror 2.0.12", + "thiserror 2.0.16", "tiny-bip39", "uriparse", - "url 2.5.4", + "url 2.5.7", ] [[package]] name = "solana-cli" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "agave-syscalls", @@ -7490,7 +7710,7 @@ dependencies = [ "bincode", "bs58", "clap 2.33.3", - "console 0.16.0", + "console 0.16.1", "const_format", "criterion-stats", "crossbeam-channel", @@ -7500,8 +7720,8 @@ dependencies = [ "log", "num-traits", "pretty-hex", - "reqwest 0.12.22", - "semver 1.0.26", + "reqwest 0.12.23", + "semver 1.0.27", "serde", "serde_derive", "serde_json", @@ -7518,11 +7738,9 @@ dependencies = [ "solana-commitment-config", "solana-compute-budget-interface", "solana-config-interface", - "solana-config-program-client", "solana-connection-cache", "solana-epoch-schedule", "solana-faucet", - "solana-feature-gate-client", "solana-feature-gate-interface", "solana-fee-calculator", "solana-fee-structure", @@ -7573,13 +7791,13 @@ dependencies = [ "spl-memo-interface", "tempfile", "test-case", - "thiserror 2.0.12", + "thiserror 2.0.16", "tiny-bip39", ] [[package]] name = "solana-cli-config" -version = "3.0.0" +version = "3.1.0" dependencies = [ "anyhow", "dirs-next", @@ -7588,24 +7806,24 @@ dependencies = [ "serde_yaml 0.9.34+deprecated", "solana-clap-utils", "solana-commitment-config", - "url 2.5.4", + "url 2.5.7", ] [[package]] name = "solana-cli-output" -version = "3.0.0" +version = "3.1.0" dependencies = [ "Inflector", "agave-reserved-account-keys", "base64 0.22.1", "chrono", "clap 2.33.3", - "console 0.16.0", - "ed25519-dalek", + "console 0.16.1", + "ed25519-dalek 1.0.1", "humantime", "indicatif 0.18.0", "pretty-hex", - "semver 1.0.26", + "semver 1.0.27", "serde", "serde_json", "solana-account", @@ -7618,7 +7836,6 @@ dependencies = [ "solana-hash", "solana-keypair", "solana-message", - "solana-native-token", "solana-packet", "solana-pubkey", "solana-rpc-client-api", @@ -7627,7 +7844,6 @@ dependencies = [ "solana-signer", "solana-stake-interface", "solana-system-interface", - "solana-sysvar", "solana-transaction", "solana-transaction-context", "solana-transaction-error", @@ -7639,7 +7855,7 @@ dependencies = [ [[package]] name = "solana-client" -version = "3.0.0" +version = "3.1.0" dependencies = [ "async-trait", "bincode", @@ -7647,7 +7863,7 @@ dependencies = [ "dashmap", "futures 0.3.31", "futures-util", - "indexmap 2.10.0", + "indexmap 2.11.4", "indicatif 0.18.0", "log", "quinn", @@ -7679,13 +7895,14 @@ dependencies = [ "solana-transaction-error", "solana-transaction-status-client-types", "solana-udp-client", - "thiserror 2.0.12", + "thiserror 2.0.16", "tokio", + "tokio-util 0.7.16", ] [[package]] name = "solana-client-test" -version = "3.0.0" +version = "3.1.0" dependencies = [ "futures-util", "serde_json", @@ -7722,9 +7939,9 @@ dependencies = [ [[package]] name = "solana-client-traits" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83f0071874e629f29e0eb3dab8a863e98502ac7aba55b7e0df1803fc5cac72a7" +checksum = "08618ed587e128105510c54ae3e456b9a06d674d8640db75afe66dad65cb4e02" dependencies = [ "solana-account", "solana-commitment-config", @@ -7743,9 +7960,9 @@ dependencies = [ [[package]] name = "solana-clock" -version = "2.2.2" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bb482ab70fced82ad3d7d3d87be33d466a3498eb8aa856434ff3c0dfc2e2e31" +checksum = "fb62e9381182459a4520b5fe7fb22d423cae736239a6427fc398a88743d0ed59" dependencies = [ "serde", "serde_derive", @@ -7756,9 +7973,9 @@ dependencies = [ [[package]] name = "solana-cluster-type" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ace9fea2daa28354d107ea879cff107181d85cd4e0f78a2bedb10e1a428c97e" +checksum = "eb7692fa6bf10a1a86b450c4775526f56d7e0e2116a53313f2533b5694abea64" dependencies = [ "serde", "serde_derive", @@ -7767,9 +7984,9 @@ dependencies = [ [[package]] name = "solana-commitment-config" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac49c4dde3edfa832de1697e9bcdb7c3b3f7cb7a1981b7c62526c8bb6700fb73" +checksum = "5fa5933a62dadb7d3ed35e6329de5cebb0678acc8f9cfdf413269084eeccc63f" dependencies = [ "serde", "serde_derive", @@ -7777,7 +7994,7 @@ dependencies = [ [[package]] name = "solana-compute-budget" -version = "3.0.0" +version = "3.1.0" dependencies = [ "qualifier_attr", "solana-fee-structure", @@ -7787,7 +8004,7 @@ dependencies = [ [[package]] name = "solana-compute-budget-instruction" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "bincode", @@ -7811,30 +8028,30 @@ dependencies = [ "solana-system-interface", "solana-transaction", "solana-transaction-error", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "solana-compute-budget-interface" -version = "2.2.2" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8432d2c4c22d0499aa06d62e4f7e333f81777b3d7c96050ae9e5cb71a8c3aee4" +checksum = "8292c436b269ad23cecc8b24f7da3ab07ca111661e25e00ce0e1d22771951ab9" dependencies = [ - "borsh 1.5.7", + "borsh", "solana-instruction", "solana-sdk-ids", ] [[package]] name = "solana-compute-budget-program" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-program-runtime", ] [[package]] name = "solana-compute-budget-program-bench" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "criterion", @@ -7849,9 +8066,9 @@ dependencies = [ [[package]] name = "solana-config-interface" -version = "1.0.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fbdbcfedb467322ac9686ca61da0a1fdede2fd99a01fb2ed52b49452abd22e0" +checksum = "63e401ae56aed512821cc7a0adaa412ff97fecd2dff4602be7b1330d2daec0c4" dependencies = [ "bincode", "serde", @@ -7864,29 +8081,15 @@ dependencies = [ "solana-system-interface", ] -[[package]] -name = "solana-config-program-client" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef9867b9ffae6e48a97ce6349e7796fcb34084298e909a8fa1fe427f41b52fd4" -dependencies = [ - "bincode", - "borsh 0.10.3", - "kaigan", - "serde", - "solana-config-interface", - "solana-program", -] - [[package]] name = "solana-connection-cache" -version = "3.0.0" +version = "3.1.0" dependencies = [ "async-trait", "bincode", "crossbeam-channel", "futures-util", - "indexmap 2.10.0", + "indexmap 2.11.4", "indicatif 0.18.0", "log", "rand 0.8.5", @@ -7899,19 +8102,20 @@ dependencies = [ "solana-net-utils", "solana-time-utils", "solana-transaction-error", - "thiserror 2.0.12", + "thiserror 2.0.16", "tokio", ] [[package]] name = "solana-core" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-banking-stage-ingress-types", "agave-feature-set", "agave-reserved-account-keys", "agave-transaction-view", "agave-verified-packet-receiver", + "agave-votor", "ahash 0.8.11", "anyhow", "arrayvec", @@ -7920,6 +8124,7 @@ dependencies = [ "base64 0.22.1", "bincode", "bs58", + "bytemuck", "bytes", "chrono", "conditional-mod", @@ -7927,7 +8132,6 @@ dependencies = [ "crossbeam-channel", "dashmap", "derive_more 1.0.0", - "etcd-client", "fs_extra", "futures 0.3.31", "histogram", @@ -7944,7 +8148,7 @@ dependencies = [ "rand_chacha 0.3.1", "rayon", "rolling-file", - "rustls 0.23.31", + "rustls 0.23.32", "serde", "serde_bytes", "serde_derive", @@ -7960,6 +8164,7 @@ dependencies = [ "solana-builtins-default-costs", "solana-client", "solana-clock", + "solana-cluster-type", "solana-compute-budget", "solana-compute-budget-instruction", "solana-compute-budget-interface", @@ -7995,6 +8200,7 @@ dependencies = [ "solana-perf", "solana-poh", "solana-poh-config", + "solana-program-binaries", "solana-program-runtime", "solana-pubkey", "solana-quic-client", @@ -8018,13 +8224,13 @@ dependencies = [ "solana-stake-program", "solana-streamer", "solana-svm", + "solana-svm-timings", "solana-svm-transaction", "solana-system-interface", "solana-system-program", "solana-system-transaction", "solana-sysvar", "solana-time-utils", - "solana-timings", "solana-tls-utils", "solana-tpu-client", "solana-tpu-client-next", @@ -8047,7 +8253,7 @@ dependencies = [ "sysctl", "tempfile", "test-case", - "thiserror 2.0.12", + "thiserror 2.0.16", "tikv-jemallocator", "tokio", "tokio-util 0.7.16", @@ -8056,7 +8262,7 @@ dependencies = [ [[package]] name = "solana-cost-model" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "agave-reserved-account-keys", @@ -8102,12 +8308,12 @@ dependencies = [ [[package]] name = "solana-cpi" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dc71126edddc2ba014622fc32d0f5e2e78ec6c5a1e0eb511b85618c09e9ea11" +checksum = "16238feb63d1cbdf915fb287f29ef7a7ebf81469bd6214f8b72a53866b593f8f" dependencies = [ "solana-account-info", - "solana-define-syscall", + "solana-define-syscall 3.0.0", "solana-instruction", "solana-program-error", "solana-pubkey", @@ -8116,50 +8322,47 @@ dependencies = [ [[package]] name = "solana-curve25519" -version = "2.2.15" +version = "2.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "def3cfe5279edb64fc39111cff6dcf77b01fbfba2c02c13ced41e6a48baf4cbe" +checksum = "b162f50499b391b785d57b2f2c73e3b9754d88fd4894bef444960b00bda8dcca" dependencies = [ "bytemuck", "bytemuck_derive", "curve25519-dalek 4.1.3", - "solana-define-syscall", + "solana-define-syscall 2.3.0", "subtle", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "solana-curve25519" -version = "3.0.0" +version = "3.1.0" dependencies = [ "bytemuck", "bytemuck_derive", "curve25519-dalek 4.1.3", - "solana-define-syscall", + "solana-define-syscall 3.0.0", "subtle", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] -name = "solana-decode-error" -version = "2.2.1" +name = "solana-define-syscall" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10a6a6383af236708048f8bd8d03db8ca4ff7baf4a48e5d580f4cce545925470" -dependencies = [ - "num-traits", -] +checksum = "2ae3e2abcf541c8122eafe9a625d4d194b4023c20adde1e251f94e056bb1aee2" [[package]] name = "solana-define-syscall" -version = "2.3.0" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ae3e2abcf541c8122eafe9a625d4d194b4023c20adde1e251f94e056bb1aee2" +checksum = "f9697086a4e102d28a156b8d6b521730335d6951bd39a5e766512bbe09007cee" [[package]] name = "solana-derivation-path" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "939756d798b25c5ec3cca10e06212bdca3b1443cb9bb740a38124f58b258737b" +checksum = "ff71743072690fdbdfcdc37700ae1cb77485aaad49019473a81aee099b1e0b8c" dependencies = [ "derivation-path", "qstring", @@ -8168,7 +8371,7 @@ dependencies = [ [[package]] name = "solana-dos" -version = "3.0.0" +version = "3.1.0" dependencies = [ "bincode", "clap 3.2.23", @@ -8211,7 +8414,7 @@ dependencies = [ [[package]] name = "solana-download-utils" -version = "3.0.0" +version = "3.1.0" dependencies = [ "log", "solana-clock", @@ -8222,25 +8425,22 @@ dependencies = [ [[package]] name = "solana-ed25519-program" -version = "2.2.3" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1feafa1691ea3ae588f99056f4bdd1293212c7ece28243d7da257c443e84753" +checksum = "e1419197f1c06abf760043f6d64ba9d79a03ad5a43f18c7586471937122094da" dependencies = [ "bytemuck", "bytemuck_derive", - "ed25519-dalek", - "solana-feature-set", "solana-instruction", - "solana-precompile-error", "solana-sdk-ids", ] [[package]] name = "solana-ed25519-program-tests" -version = "3.0.0" +version = "3.1.0" dependencies = [ "assert_matches", - "ed25519-dalek", + "ed25519-dalek 1.0.1", "rand 0.8.5", "solana-ed25519-program", "solana-instruction", @@ -8253,7 +8453,7 @@ dependencies = [ [[package]] name = "solana-entry" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-reserved-account-keys", "assert_matches", @@ -8287,9 +8487,9 @@ dependencies = [ [[package]] name = "solana-epoch-info" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90ef6f0b449290b0b9f32973eefd95af35b01c5c0c34c569f936c34c5b20d77b" +checksum = "f8a6b69bd71386f61344f2bcf0f527f5fd6dd3b22add5880e2e1bf1dd1fa8059" dependencies = [ "serde", "serde_derive", @@ -8297,9 +8497,9 @@ dependencies = [ [[package]] name = "solana-epoch-rewards" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b575d3dd323b9ea10bb6fe89bf6bf93e249b215ba8ed7f68f1a3633f384db7" +checksum = "b319a4ed70390af911090c020571f0ff1f4ec432522d05ab89f5c08080381995" dependencies = [ "serde", "serde_derive", @@ -8311,9 +8511,9 @@ dependencies = [ [[package]] name = "solana-epoch-rewards-hasher" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96c5fd2662ae7574810904585fd443545ed2b568dbd304b25a31e79ccc76e81b" +checksum = "e507099d0c2c5d7870c9b1848281ea67bbeee80d171ca85003ee5767994c9c38" dependencies = [ "siphasher 0.3.11", "solana-hash", @@ -8322,9 +8522,9 @@ dependencies = [ [[package]] name = "solana-epoch-schedule" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fce071fbddecc55d727b1d7ed16a629afe4f6e4c217bc8d00af3b785f6f67ed" +checksum = "6e5481e72cc4d52c169db73e4c0cd16de8bc943078aac587ec4817a75cc6388f" dependencies = [ "serde", "serde_derive", @@ -8335,11 +8535,21 @@ dependencies = [ "solana-sysvar-id", ] +[[package]] +name = "solana-epoch-stake" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc6693d0ea833b880514b9b88d95afb80b42762dca98b0712465d1fcbbcb89e" +dependencies = [ + "solana-define-syscall 3.0.0", + "solana-pubkey", +] + [[package]] name = "solana-example-mocks" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84461d56cbb8bb8d539347151e0525b53910102e4bced875d49d5139708e39d3" +checksum = "978855d164845c1b0235d4b4d101cadc55373fffaf0b5b6cfa2194d25b2ed658" dependencies = [ "serde", "serde_derive", @@ -8353,12 +8563,12 @@ dependencies = [ "solana-pubkey", "solana-sdk-ids", "solana-system-interface", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "solana-faucet" -version = "3.0.0" +version = "3.1.0" dependencies = [ "bincode", "clap 2.33.3", @@ -8368,13 +8578,15 @@ dependencies = [ "serde_derive", "solana-clap-utils", "solana-cli-config", + "solana-cli-output", + "solana-faucet", "solana-hash", "solana-instruction", "solana-keypair", "solana-logger", "solana-message", "solana-metrics", - "solana-native-token", + "solana-net-utils", "solana-packet", "solana-pubkey", "solana-signer", @@ -8383,28 +8595,15 @@ dependencies = [ "solana-transaction", "solana-version", "spl-memo-interface", - "thiserror 2.0.12", + "thiserror 2.0.16", "tokio", ] -[[package]] -name = "solana-feature-gate-client" -version = "0.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1056507c534839b5cd1b1010ffedb9e8c92313269786fb5066ff53b30326dc3" -dependencies = [ - "borsh 0.10.3", - "num-derive", - "num-traits", - "solana-program", - "thiserror 2.0.12", -] - [[package]] name = "solana-feature-gate-interface" -version = "2.2.2" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43f5c5382b449e8e4e3016fb05e418c53d57782d8b5c30aa372fc265654b956d" +checksum = "7347ab62e6d47a82e340c865133795b394feea7c2b2771d293f57691c6544c3f" dependencies = [ "bincode", "serde", @@ -8419,23 +8618,9 @@ dependencies = [ "solana-system-interface", ] -[[package]] -name = "solana-feature-set" -version = "2.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92f6c09cc41059c0e03ccbee7f5d4cc0a315d68ef0d59b67eb90246adfd8cc35" -dependencies = [ - "ahash 0.8.11", - "lazy_static", - "solana-epoch-schedule", - "solana-hash", - "solana-pubkey", - "solana-sha256-hasher", -] - [[package]] name = "solana-fee" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "solana-fee-structure", @@ -8444,9 +8629,9 @@ dependencies = [ [[package]] name = "solana-fee-calculator" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d89bc408da0fb3812bc3008189d148b4d3e08252c79ad810b245482a3f70cd8d" +checksum = "2a73cc03ca4bed871ca174558108835f8323e85917bb38b9c81c7af2ab853efe" dependencies = [ "log", "serde", @@ -8457,22 +8642,20 @@ dependencies = [ [[package]] name = "solana-fee-structure" -version = "2.3.0" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33adf673581c38e810bf618f745bf31b683a0a4a4377682e6aaac5d9a058dd4e" +checksum = "5e2abdb1223eea8ec64136f39cb1ffcf257e00f915c957c35c0dd9e3f4e700b0" dependencies = [ "serde", "serde_derive", "solana-frozen-abi", - "solana-message", - "solana-native-token", ] [[package]] name = "solana-file-download" -version = "2.2.2" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05a9744774fdbd7ae8575e5bd6d5df6946f321fb9b6019410b300a515369a37d" +checksum = "842227f0ae5ebffdfe686597a909cb406d2bd9b92432c516503b8cbd490a3ea6" dependencies = [ "console 0.15.11", "indicatif 0.17.12", @@ -8482,9 +8665,9 @@ dependencies = [ [[package]] name = "solana-frozen-abi" -version = "2.3.0" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ac93e831736e9cbd1571c5c692fa7533a304f184f77cba52e5b83c4c7eeebda" +checksum = "f19aad3b79cf84cd24de85e711ed1718de1e5bf46a710fa73179efa6a117d707" dependencies = [ "boxcar", "bs58", @@ -8499,23 +8682,23 @@ dependencies = [ "serde_with", "sha2 0.10.9", "solana-frozen-abi-macro", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "solana-frozen-abi-macro" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b83f88a126213cbcb57672c5e70ddb9791eff9b480e9f39fe9285fd2abca66fa" +checksum = "d42809b90c84963eb5f2e17afafb1384892341b0d8ec12ae8f4a8c69a96138e4" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.106", ] [[package]] name = "solana-genesis" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "base64 0.22.1", @@ -8531,6 +8714,7 @@ dependencies = [ "solana-clap-utils", "solana-cli-config", "solana-clock", + "solana-cluster-type", "solana-commitment-config", "solana-entry", "solana-epoch-schedule", @@ -8561,9 +8745,9 @@ dependencies = [ [[package]] name = "solana-genesis-config" -version = "2.3.0" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3725085d47b96d37fef07a29d78d2787fc89a0b9004c66eed7753d1e554989f" +checksum = "749eccc960e85c9b33608450093d256006253e1cb436b8380e71777840a3f675" dependencies = [ "bincode", "chrono", @@ -8578,7 +8762,6 @@ dependencies = [ "solana-hash", "solana-inflation", "solana-keypair", - "solana-logger", "solana-poh-config", "solana-pubkey", "solana-rent", @@ -8591,7 +8774,7 @@ dependencies = [ [[package]] name = "solana-genesis-utils" -version = "3.0.0" +version = "3.1.0" dependencies = [ "log", "solana-accounts-db", @@ -8603,7 +8786,7 @@ dependencies = [ [[package]] name = "solana-geyser-plugin-manager" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-geyser-plugin-interface", "bs58", @@ -8627,15 +8810,16 @@ dependencies = [ "solana-signature", "solana-transaction", "solana-transaction-status", - "thiserror 2.0.12", + "thiserror 2.0.16", "tokio", ] [[package]] name = "solana-gossip" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", + "agave-low-pass-filter", "anyhow", "arrayvec", "assert_matches", @@ -8646,7 +8830,7 @@ dependencies = [ "criterion", "crossbeam-channel", "flate2", - "indexmap 2.10.0", + "indexmap 2.11.4", "itertools 0.12.1", "log", "lru", @@ -8663,10 +8847,12 @@ dependencies = [ "serde_derive", "serial_test", "siphasher 1.0.1", + "solana-account", "solana-bloom", "solana-clap-utils", "solana-client", "solana-clock", + "solana-cluster-type", "solana-connection-cache", "solana-entry", "solana-epoch-schedule", @@ -8704,14 +8890,14 @@ dependencies = [ "solana-vote-program", "static_assertions", "test-case", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "solana-hard-forks" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c28371f878e2ead55611d8ba1b5fb879847156d04edea13693700ad1a28baf" +checksum = "0abacc4b66ce471f135f48f22facf75cbbb0f8a252fbe2c1e0aa59d5b203f519" dependencies = [ "serde", "serde_derive", @@ -8721,29 +8907,27 @@ dependencies = [ [[package]] name = "solana-hash" -version = "2.3.0" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5b96e9f0300fa287b545613f007dfe20043d7812bee255f418c1eb649c93b63" +checksum = "8a063723b9e84c14d8c0d2cdf0268207dc7adecf546e31251f9e07c7b00b566c" dependencies = [ - "borsh 1.5.7", + "borsh", "bytemuck", "bytemuck_derive", "five8", - "js-sys", "serde", "serde_derive", "solana-atomic-u64", "solana-frozen-abi", "solana-frozen-abi-macro", "solana-sanitize", - "wasm-bindgen", ] [[package]] name = "solana-inflation" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23eef6a09eb8e568ce6839573e4966850e85e9ce71e6ae1a6c930c1c43947de3" +checksum = "e92f37a14e7c660628752833250dd3dcd8e95309876aee751d7f8769a27947c6" dependencies = [ "serde", "serde_derive", @@ -8753,33 +8937,45 @@ dependencies = [ [[package]] name = "solana-instruction" -version = "2.3.0" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47298e2ce82876b64f71e9d13a46bc4b9056194e7f9937ad3084385befa50885" +checksum = "8df4e8fcba01d7efa647ed20a081c234475df5e11a93acb4393cc2c9a7b99bab" dependencies = [ "bincode", - "borsh 1.5.7", - "getrandom 0.2.15", - "js-sys", - "num-traits", + "borsh", "serde", "serde_derive", - "solana-define-syscall", + "solana-define-syscall 3.0.0", "solana-frozen-abi", "solana-frozen-abi-macro", + "solana-instruction-error", "solana-pubkey", - "wasm-bindgen", +] + +[[package]] +name = "solana-instruction-error" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f0d483b8ae387178d9210e0575b666b05cdd4bd0f2f188128249f6e454d39d" +dependencies = [ + "num-traits", + "serde", + "serde_derive", + "solana-frozen-abi", + "solana-frozen-abi-macro", + "solana-program-error", ] [[package]] name = "solana-instructions-sysvar" -version = "2.2.2" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0e85a6fad5c2d0c4f5b91d34b8ca47118fc593af706e523cdbedf846a954f57" +checksum = "7ddf67876c541aa1e21ee1acae35c95c6fbc61119814bfef70579317a5e26955" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.9.4", "solana-account-info", "solana-instruction", + "solana-instruction-error", "solana-program-error", "solana-pubkey", "solana-sanitize", @@ -8790,19 +8986,18 @@ dependencies = [ [[package]] name = "solana-keccak-hasher" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7aeb957fbd42a451b99235df4942d96db7ef678e8d5061ef34c9b34cae12f79" +checksum = "57eebd3012946913c8c1b8b43cdf8a6249edb09c0b6be3604ae910332a3acd97" dependencies = [ "sha3", - "solana-define-syscall", + "solana-define-syscall 3.0.0", "solana-hash", - "solana-sanitize", ] [[package]] name = "solana-keygen" -version = "3.0.0" +version = "3.1.0" dependencies = [ "bs58", "clap 3.2.23", @@ -8826,28 +9021,27 @@ dependencies = [ [[package]] name = "solana-keypair" -version = "2.2.1" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dbb7042c2e0c561afa07242b2099d55c57bd1b1da3b6476932197d84e15e3e4" +checksum = "952ed9074c12edd2060cb09c2a8c664303f4ab7f7056a407ac37dd1da7bdaa3e" dependencies = [ - "bs58", - "ed25519-dalek", + "ed25519-dalek 2.2.0", "ed25519-dalek-bip32", - "rand 0.7.3", + "five8", + "rand 0.8.5", "solana-derivation-path", "solana-pubkey", "solana-seed-derivable", "solana-seed-phrase", "solana-signature", "solana-signer", - "wasm-bindgen", ] [[package]] name = "solana-last-restart-slot" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a6360ac2fdc72e7463565cd256eedcf10d7ef0c28a1249d261ec168c1b55cdd" +checksum = "dcda154ec827f5fc1e4da0af3417951b7e9b8157540f81f936c4a8b1156134d0" dependencies = [ "serde", "serde_derive", @@ -8858,7 +9052,7 @@ dependencies = [ [[package]] name = "solana-lattice-hash" -version = "3.0.0" +version = "3.1.0" dependencies = [ "base64 0.22.1", "blake3", @@ -8871,18 +9065,20 @@ dependencies = [ [[package]] name = "solana-ledger" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "agave-reserved-account-keys", "anyhow", "assert_matches", "bincode", - "bitflags 2.9.1", + "bitflags 2.9.4", "bs58", + "bytes", "bzip2", "chrono", "chrono-humanize", + "conditional-mod", "criterion", "crossbeam-channel", "dashmap", @@ -8931,6 +9127,7 @@ dependencies = [ "solana-metrics", "solana-native-token", "solana-net-utils", + "solana-nohash-hasher", "solana-packet", "solana-perf", "solana-program-option", @@ -8951,11 +9148,11 @@ dependencies = [ "solana-storage-proto", "solana-streamer", "solana-svm", + "solana-svm-timings", "solana-svm-transaction", "solana-system-interface", "solana-system-transaction", "solana-time-utils", - "solana-timings", "solana-transaction", "solana-transaction-context", "solana-transaction-error", @@ -8970,7 +9167,7 @@ dependencies = [ "tar", "tempfile", "test-case", - "thiserror 2.0.12", + "thiserror 2.0.16", "tokio", "tokio-stream", "trees", @@ -8978,9 +9175,9 @@ dependencies = [ [[package]] name = "solana-loader-v2-interface" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8ab08006dad78ae7cd30df8eea0539e207d08d91eaefb3e1d49a446e1c49654" +checksum = "1e4a6f0ad4fd9c30679bfee2ce3ea6a449cac38049f210480b751f65676dfe82" dependencies = [ "serde", "serde_bytes", @@ -8992,9 +9189,9 @@ dependencies = [ [[package]] name = "solana-loader-v3-interface" -version = "5.0.0" +version = "6.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f7162a05b8b0773156b443bccd674ea78bb9aa406325b467ea78c06c99a63a2" +checksum = "dee44c9b1328c5c712c68966fb8de07b47f3e7bac006e74ddd1bb053d3e46e5d" dependencies = [ "serde", "serde_bytes", @@ -9007,9 +9204,9 @@ dependencies = [ [[package]] name = "solana-loader-v4-interface" -version = "2.2.1" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "706a777242f1f39a83e2a96a2a6cb034cb41169c6ecbee2cf09cb873d9659e7e" +checksum = "e4c948b33ff81fa89699911b207059e493defdba9647eaf18f23abdf3674e0fb" dependencies = [ "serde", "serde_bytes", @@ -9022,7 +9219,7 @@ dependencies = [ [[package]] name = "solana-loader-v4-program" -version = "3.0.0" +version = "3.1.0" dependencies = [ "bincode", "log", @@ -9034,21 +9231,21 @@ dependencies = [ "solana-instruction", "solana-loader-v3-interface", "solana-loader-v4-interface", - "solana-log-collector", - "solana-measure", "solana-packet", "solana-program-runtime", "solana-pubkey", "solana-sbpf", "solana-sdk-ids", + "solana-svm-log-collector", + "solana-svm-measure", + "solana-svm-type-overrides", "solana-sysvar", "solana-transaction-context", - "solana-type-overrides", ] [[package]] name = "solana-local-cluster" -version = "3.0.0" +version = "3.1.0" dependencies = [ "assert_matches", "crossbeam-channel", @@ -9064,6 +9261,7 @@ dependencies = [ "solana-client", "solana-client-traits", "solana-clock", + "solana-cluster-type", "solana-commitment-config", "solana-core", "solana-download-utils", @@ -9081,9 +9279,11 @@ dependencies = [ "solana-native-token", "solana-net-utils", "solana-poh-config", + "solana-program-binaries", "solana-pubkey", "solana-pubsub-client", "solana-quic-client", + "solana-rent", "solana-rpc-client", "solana-rpc-client-api", "solana-runtime", @@ -9111,33 +9311,13 @@ dependencies = [ "trees", ] -[[package]] -name = "solana-log-analyzer" -version = "3.0.0" -dependencies = [ - "byte-unit", - "clap 3.2.23", - "serde", - "serde_derive", - "serde_json", - "solana-logger", - "solana-version", -] - -[[package]] -name = "solana-log-collector" -version = "3.0.0" -dependencies = [ - "log", -] - [[package]] name = "solana-logger" -version = "2.3.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db8e777ec1afd733939b532a42492d888ec7c88d8b4127a5d867eb45c6eb5cd5" +checksum = "ef7421d1092680d72065edbf5c7605856719b021bf5f173656c71febcdd5d003" dependencies = [ - "env_logger 0.9.3", + "env_logger", "lazy_static", "libc", "log", @@ -9146,15 +9326,15 @@ dependencies = [ [[package]] name = "solana-measure" -version = "3.0.0" +version = "3.1.0" [[package]] name = "solana-memory-management" -version = "3.0.0" +version = "3.1.0" [[package]] name = "solana-merkle-tree" -version = "3.0.0" +version = "3.1.0" dependencies = [ "fast-math", "hex", @@ -9164,79 +9344,66 @@ dependencies = [ [[package]] name = "solana-message" -version = "2.4.0" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1796aabce376ff74bf89b78d268fa5e683d7d7a96a0a4e4813ec34de49d5314b" +checksum = "85666605c9fd727f865ed381665db0a8fc29f984a030ecc1e40f43bfb2541623" dependencies = [ "bincode", "blake3", "lazy_static", "serde", "serde_derive", - "solana-bincode", + "solana-address", "solana-hash", "solana-instruction", - "solana-pubkey", "solana-sanitize", "solana-sdk-ids", "solana-short-vec", - "solana-system-interface", "solana-transaction-error", - "wasm-bindgen", ] [[package]] name = "solana-metrics" -version = "3.0.0" +version = "3.1.0" dependencies = [ "bencher", "crossbeam-channel", - "env_logger 0.11.8", + "env_logger", "gethostname", "log", "rand 0.8.5", - "reqwest 0.12.22", + "reqwest 0.12.23", "serial_test", "solana-cluster-type", "solana-sha256-hasher", "solana-time-utils", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "solana-msg" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f36a1a14399afaabc2781a1db09cb14ee4cc4ee5c7a5a3cfcc601811379a8092" +checksum = "264275c556ea7e22b9d3f87d56305546a38d4eee8ec884f3b126236cb7dcbbb4" dependencies = [ - "solana-define-syscall", + "solana-define-syscall 3.0.0", ] [[package]] name = "solana-native-token" -version = "2.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "307fb2f78060995979e9b4f68f833623565ed4e55d3725f100454ce78a99a1a3" - -[[package]] -name = "solana-net-shaper" version = "3.0.0" -dependencies = [ - "clap 3.2.23", - "rand 0.8.5", - "serde", - "serde_derive", - "serde_json", - "solana-logger", -] +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae8dd4c280dca9d046139eb5b7a5ac9ad10403fbd64964c7d7571214950d758f" [[package]] name = "solana-net-utils" -version = "3.0.0" +version = "3.1.0" dependencies = [ "anyhow", "bincode", "bytes", + "cfg-if 1.0.3", + "dashmap", "hxdmp", "itertools 0.12.1", "log", @@ -9245,11 +9412,13 @@ dependencies = [ "rand 0.8.5", "serde", "serde_derive", + "shuttle", "socket2 0.6.0", "solana-logger", "solana-serde", + "solana-svm-type-overrides", "tokio", - "url 2.5.4", + "url 2.5.7", ] [[package]] @@ -9260,9 +9429,9 @@ checksum = "8b8a731ed60e89177c8a7ab05fe0f1511cedd3e70e773f288f9de33a9cfdc21e" [[package]] name = "solana-nonce" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "703e22eb185537e06204a5bd9d509b948f0066f2d1d814a6f475dafb3ddf1325" +checksum = "abbdc6c8caf1c08db9f36a50967539d0f72b9f1d4aea04fec5430f532e5afadc" dependencies = [ "serde", "serde_derive", @@ -9274,9 +9443,9 @@ dependencies = [ [[package]] name = "solana-nonce-account" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cde971a20b8dbf60144d6a84439dda86b5466e00e2843091fe731083cda614da" +checksum = "805fd25b29e5a1a0e6c3dd6320c9da80f275fbe4ff6e392617c303a2085c435e" dependencies = [ "solana-account", "solana-hash", @@ -9286,19 +9455,19 @@ dependencies = [ [[package]] name = "solana-notifier" -version = "3.0.0" +version = "3.1.0" dependencies = [ "log", - "reqwest 0.12.22", + "reqwest 0.12.23", "serde_json", "solana-hash", ] [[package]] name = "solana-offchain-message" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b526398ade5dea37f1f147ce55dae49aa017a5d7326606359b0445ca8d946581" +checksum = "f6e2a1141a673f72a05cf406b99e4b2b8a457792b7c01afa07b3f00d4e2de393" dependencies = [ "num_enum", "solana-hash", @@ -9312,12 +9481,12 @@ dependencies = [ [[package]] name = "solana-packet" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "004f2d2daf407b3ec1a1ca5ec34b3ccdfd6866dd2d3c7d0715004a96e4b6d127" +checksum = "6edf2f25743c95229ac0fdc32f8f5893ef738dbf332c669e9861d33ddb0f469d" dependencies = [ "bincode", - "bitflags 2.9.1", + "bitflags 2.9.4", "cfg_eval", "serde", "serde_derive", @@ -9328,7 +9497,7 @@ dependencies = [ [[package]] name = "solana-perf" -version = "3.0.0" +version = "3.1.0" dependencies = [ "ahash 0.8.11", "assert_matches", @@ -9375,7 +9544,7 @@ dependencies = [ [[package]] name = "solana-poh" -version = "3.0.0" +version = "3.1.0" dependencies = [ "arc-swap", "assert_matches", @@ -9404,13 +9573,13 @@ dependencies = [ "solana-system-transaction", "solana-time-utils", "solana-transaction", - "thiserror 2.0.12", + "thiserror 2.0.16", "tikv-jemallocator", ] [[package]] name = "solana-poh-bench" -version = "3.0.0" +version = "3.1.0" dependencies = [ "clap 3.2.23", "log", @@ -9426,9 +9595,9 @@ dependencies = [ [[package]] name = "solana-poh-config" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d650c3b4b9060082ac6b0efbbb66865089c58405bfb45de449f3f2b91eccee75" +checksum = "2f1fef1f2ff2480fdbcc64bef5e3c47bec6e1647270db88b43f23e3a55f8d9cf" dependencies = [ "serde", "serde_derive", @@ -9436,46 +9605,28 @@ dependencies = [ [[package]] name = "solana-poseidon" -version = "3.0.0" +version = "3.1.0" dependencies = [ "ark-bn254", "light-poseidon", - "solana-define-syscall", - "thiserror 2.0.12", + "solana-define-syscall 3.0.0", + "thiserror 2.0.16", ] [[package]] name = "solana-precompile-error" -version = "2.2.2" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d87b2c1f5de77dfe2b175ee8dd318d196aaca4d0f66f02842f80c852811f9f8" +checksum = "cafcd950de74c6c39d55dc8ca108bbb007799842ab370ef26cf45a34453c31e1" dependencies = [ "num-traits", - "solana-decode-error", ] [[package]] -name = "solana-precompiles" -version = "2.2.1" +name = "solana-presigner" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a460ab805ec063802105b463ecb5eb02c3ffe469e67a967eea8a6e778e0bc06" -dependencies = [ - "lazy_static", - "solana-ed25519-program", - "solana-feature-set", - "solana-message", - "solana-precompile-error", - "solana-pubkey", - "solana-sdk-ids", - "solana-secp256k1-program", - "solana-secp256r1-program", -] - -[[package]] -name = "solana-presigner" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81a57a24e6a4125fc69510b6774cd93402b943191b6cddad05de7281491c90fe" +checksum = "0f704eaf825be3180832445b9e4983b875340696e8e7239bf2d535b0f86c14a2" dependencies = [ "solana-pubkey", "solana-signature", @@ -9484,57 +9635,30 @@ dependencies = [ [[package]] name = "solana-program" -version = "2.3.0" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98eca145bd3545e2fbb07166e895370576e47a00a7d824e325390d33bf467210" +checksum = "91b12305dd81045d705f427acd0435a2e46444b65367d7179d7bdcfc3bc5f5eb" dependencies = [ - "bincode", - "blake3", - "borsh 0.10.3", - "borsh 1.5.7", - "bs58", - "bytemuck", - "console_error_panic_hook", - "console_log", - "getrandom 0.2.15", - "lazy_static", - "log", "memoffset 0.9.1", - "num-bigint 0.4.6", - "num-derive", - "num-traits", - "rand 0.8.5", - "serde", - "serde_bytes", - "serde_derive", "solana-account-info", - "solana-address-lookup-table-interface", - "solana-atomic-u64", "solana-big-mod-exp", - "solana-bincode", "solana-blake3-hasher", - "solana-borsh", "solana-clock", "solana-cpi", - "solana-decode-error", - "solana-define-syscall", + "solana-define-syscall 3.0.0", "solana-epoch-rewards", "solana-epoch-schedule", + "solana-epoch-stake", "solana-example-mocks", - "solana-feature-gate-interface", "solana-fee-calculator", "solana-hash", "solana-instruction", + "solana-instruction-error", "solana-instructions-sysvar", "solana-keccak-hasher", "solana-last-restart-slot", - "solana-loader-v2-interface", - "solana-loader-v3-interface", - "solana-loader-v4-interface", - "solana-message", "solana-msg", "solana-native-token", - "solana-nonce", "solana-program-entrypoint", "solana-program-error", "solana-program-memory", @@ -9542,9 +9666,7 @@ dependencies = [ "solana-program-pack", "solana-pubkey", "solana-rent", - "solana-sanitize", "solana-sdk-ids", - "solana-sdk-macro", "solana-secp256k1-recover", "solana-serde-varint", "solana-serialize-utils", @@ -9553,22 +9675,32 @@ dependencies = [ "solana-slot-hashes", "solana-slot-history", "solana-stable-layout", - "solana-stake-interface", - "solana-system-interface", "solana-sysvar", "solana-sysvar-id", - "solana-vote-interface", - "thiserror 2.0.12", - "wasm-bindgen", +] + +[[package]] +name = "solana-program-binaries" +version = "3.1.0" +dependencies = [ + "bincode", + "serde", + "solana-account", + "solana-loader-v3-interface", + "solana-pubkey", + "solana-rent", + "solana-sdk-ids", + "spl-generic-token", ] [[package]] name = "solana-program-entrypoint" -version = "2.3.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32ce041b1a0ed275290a5008ee1a4a6c48f5054c8a3d78d313c08958a06aedbd" +checksum = "6557cf5b5e91745d1667447438a1baa7823c6086e4ece67f8e6ebfa7a8f72660" dependencies = [ "solana-account-info", + "solana-define-syscall 3.0.0", "solana-msg", "solana-program-error", "solana-pubkey", @@ -9576,52 +9708,46 @@ dependencies = [ [[package]] name = "solana-program-error" -version = "2.2.2" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ee2e0217d642e2ea4bee237f37bd61bb02aec60da3647c48ff88f6556ade775" +checksum = "a1af32c995a7b692a915bb7414d5f8e838450cf7c70414e763d8abcae7b51f28" dependencies = [ - "borsh 1.5.7", - "num-traits", + "borsh", "serde", "serde_derive", - "solana-decode-error", - "solana-instruction", - "solana-msg", - "solana-pubkey", ] [[package]] name = "solana-program-memory" -version = "2.3.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a5426090c6f3fd6cfdc10685322fede9ca8e5af43cd6a59e98bfe4e91671712" +checksum = "10e5660c60749c7bfb30b447542529758e4dbcecd31b1e8af1fdc92e2bdde90a" dependencies = [ - "solana-define-syscall", + "solana-define-syscall 3.0.0", ] [[package]] name = "solana-program-option" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc677a2e9bc616eda6dbdab834d463372b92848b2bfe4a1ed4e4b4adba3397d0" +checksum = "8e7b4ddb464f274deb4a497712664c3b612e3f5f82471d4e47710fc4ab1c3095" [[package]] name = "solana-program-pack" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "319f0ef15e6e12dc37c597faccb7d62525a509fec5f6975ecb9419efddeb277b" +checksum = "c169359de21f6034a63ebf96d6b380980307df17a8d371344ff04a883ec4e9d0" dependencies = [ "solana-program-error", ] [[package]] name = "solana-program-runtime" -version = "3.0.0" +version = "3.1.0" dependencies = [ "assert_matches", "base64 0.22.1", "bincode", - "enum-iterator", "itertools 0.12.1", "log", "percentage", @@ -9637,34 +9763,39 @@ dependencies = [ "solana-frozen-abi-macro", "solana-hash", "solana-instruction", + "solana-instruction-error", + "solana-keypair", "solana-last-restart-slot", - "solana-log-collector", - "solana-measure", - "solana-metrics", + "solana-loader-v3-interface", "solana-program-entrypoint", "solana-program-runtime", "solana-pubkey", "solana-rent", "solana-sbpf", "solana-sdk-ids", + "solana-signer", "solana-slot-hashes", "solana-stable-layout", + "solana-stake-interface", "solana-svm-callback", "solana-svm-feature-set", + "solana-svm-log-collector", + "solana-svm-measure", + "solana-svm-timings", "solana-svm-transaction", + "solana-svm-type-overrides", "solana-system-interface", "solana-sysvar", "solana-sysvar-id", - "solana-timings", + "solana-transaction", "solana-transaction-context", - "solana-type-overrides", "test-case", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "solana-program-test" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "assert_matches", @@ -9682,6 +9813,7 @@ dependencies = [ "solana-banks-interface", "solana-banks-server", "solana-clock", + "solana-cluster-type", "solana-commitment-config", "solana-compute-budget", "solana-cpi", @@ -9693,13 +9825,13 @@ dependencies = [ "solana-instruction", "solana-keypair", "solana-loader-v3-interface", - "solana-log-collector", "solana-logger", "solana-message", "solana-msg", "solana-native-token", "solana-poh-config", "solana-program", + "solana-program-binaries", "solana-program-entrypoint", "solana-program-error", "solana-program-runtime", @@ -9713,60 +9845,41 @@ dependencies = [ "solana-stake-interface", "solana-stake-program", "solana-svm", + "solana-svm-log-collector", + "solana-svm-timings", "solana-system-interface", "solana-sysvar", "solana-sysvar-id", - "solana-timings", "solana-transaction", "solana-transaction-context", "solana-transaction-error", "solana-vote-program", "spl-generic-token", "test-case", - "thiserror 2.0.12", + "thiserror 2.0.16", "tokio", ] [[package]] name = "solana-pubkey" -version = "2.4.0" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b62adb9c3261a052ca1f999398c388f1daf558a1b492f60a6d9e64857db4ff1" +checksum = "8909d399deb0851aa524420beeb5646b115fd253ef446e35fe4504c904da3941" dependencies = [ - "arbitrary", - "borsh 0.10.3", - "borsh 1.5.7", - "bytemuck", - "bytemuck_derive", - "curve25519-dalek 4.1.3", - "five8", - "five8_const", - "getrandom 0.2.15", - "js-sys", - "num-traits", "rand 0.8.5", - "serde", - "serde_derive", - "solana-atomic-u64", - "solana-decode-error", - "solana-define-syscall", - "solana-frozen-abi", - "solana-frozen-abi-macro", - "solana-sanitize", - "solana-sha256-hasher", - "wasm-bindgen", + "solana-address", ] [[package]] name = "solana-pubsub-client" -version = "3.0.0" +version = "3.1.0" dependencies = [ "anyhow", "crossbeam-channel", "futures-util", "http 0.2.12", "log", - "semver 1.0.26", + "semver 1.0.27", "serde", "serde_derive", "serde_json", @@ -9776,17 +9889,17 @@ dependencies = [ "solana-pubkey", "solana-rpc-client-types", "solana-signature", - "thiserror 2.0.12", + "thiserror 2.0.16", "tokio", "tokio-stream", "tokio-tungstenite", "tungstenite", - "url 2.5.4", + "url 2.5.7", ] [[package]] name = "solana-quic-client" -version = "3.0.0" +version = "3.1.0" dependencies = [ "async-lock", "async-trait", @@ -9796,7 +9909,7 @@ dependencies = [ "log", "quinn", "quinn-proto", - "rustls 0.23.31", + "rustls 0.23.32", "solana-connection-cache", "solana-keypair", "solana-logger", @@ -9812,22 +9925,23 @@ dependencies = [ "solana-streamer", "solana-tls-utils", "solana-transaction-error", - "thiserror 2.0.12", + "thiserror 2.0.16", "tokio", + "tokio-util 0.7.16", ] [[package]] name = "solana-quic-definitions" -version = "2.3.0" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7011ee2af2baad991762b6d63ea94b08d06f7928effb76ce273b232c9902c205" +checksum = "15319accf7d3afd845817aeffa6edd8cc185f135cefbc6b985df29cfd8c09609" dependencies = [ "solana-keypair", ] [[package]] name = "solana-rayon-threadlimit" -version = "3.0.0" +version = "3.1.0" dependencies = [ "log", "num_cpus", @@ -9835,10 +9949,10 @@ dependencies = [ [[package]] name = "solana-remote-wallet" -version = "3.0.0" +version = "3.1.0" dependencies = [ "assert_matches", - "console 0.16.0", + "console 0.16.1", "dialoguer", "hidapi", "log", @@ -9846,21 +9960,21 @@ dependencies = [ "num-traits", "parking_lot 0.12.3", "qstring", - "semver 1.0.26", + "semver 1.0.27", "solana-derivation-path", "solana-offchain-message", "solana-pubkey", "solana-signature", "solana-signer", - "thiserror 2.0.12", + "thiserror 2.0.16", "uriparse", ] [[package]] name = "solana-rent" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1aea8fdea9de98ca6e8c2da5827707fb3842833521b528a713810ca685d2480" +checksum = "b702d8c43711e3c8a9284a4f1bbc6a3de2553deb25b0c8142f9a44ef0ce5ddc1" dependencies = [ "serde", "serde_derive", @@ -9871,30 +9985,11 @@ dependencies = [ "solana-sysvar-id", ] -[[package]] -name = "solana-rent-collector" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c1e19f5d5108b0d824244425e43bc78bbb9476e2199e979b0230c9f632d3bf4" -dependencies = [ - "serde", - "serde_derive", - "solana-account", - "solana-clock", - "solana-epoch-schedule", - "solana-frozen-abi", - "solana-frozen-abi-macro", - "solana-genesis-config", - "solana-pubkey", - "solana-rent", - "solana-sdk-ids", -] - [[package]] name = "solana-reward-info" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18205b69139b1ae0ab8f6e11cdcb627328c0814422ad2482000fa2ca54ae4a2f" +checksum = "82be7946105c2ee6be9f9ee7bd18a068b558389221d29efa92b906476102bfcc" dependencies = [ "serde", "serde_derive", @@ -9902,7 +9997,7 @@ dependencies = [ [[package]] name = "solana-rpc" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "agave-reserved-account-keys", @@ -9930,8 +10025,10 @@ dependencies = [ "solana-account-decoder", "solana-accounts-db", "solana-address-lookup-table-interface", + "solana-cli-output", "solana-client", "solana-clock", + "solana-cluster-type", "solana-commitment-config", "solana-compute-budget-interface", "solana-entry", @@ -9947,7 +10044,6 @@ dependencies = [ "solana-instruction", "solana-keypair", "solana-ledger", - "solana-log-collector", "solana-measure", "solana-message", "solana-metrics", @@ -9980,6 +10076,7 @@ dependencies = [ "solana-storage-bigtable", "solana-streamer", "solana-svm", + "solana-svm-log-collector", "solana-system-interface", "solana-system-transaction", "solana-sysvar", @@ -10001,14 +10098,14 @@ dependencies = [ "stream-cancel", "symlink", "test-case", - "thiserror 2.0.12", + "thiserror 2.0.16", "tokio", "tokio-util 0.7.16", ] [[package]] name = "solana-rpc-client" -version = "3.0.0" +version = "3.1.0" dependencies = [ "assert_matches", "async-trait", @@ -10021,9 +10118,9 @@ dependencies = [ "jsonrpc-core", "jsonrpc-http-server", "log", - "reqwest 0.12.22", + "reqwest 0.12.23", "reqwest-middleware", - "semver 1.0.26", + "semver 1.0.27", "serde", "serde_derive", "serde_json", @@ -10050,16 +10147,17 @@ dependencies = [ "solana-version", "solana-vote-interface", "static_assertions", + "test-case", "tokio", ] [[package]] name = "solana-rpc-client-api" -version = "3.0.0" +version = "3.1.0" dependencies = [ "anyhow", "jsonrpc-core", - "reqwest 0.12.22", + "reqwest 0.12.23", "reqwest-middleware", "serde", "serde_derive", @@ -10071,12 +10169,12 @@ dependencies = [ "solana-transaction-error", "solana-transaction-status-client-types", "test-case", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "solana-rpc-client-nonce-utils" -version = "3.0.0" +version = "3.1.0" dependencies = [ "anyhow", "clap 2.33.3", @@ -10099,23 +10197,24 @@ dependencies = [ "solana-signer", "solana-system-interface", "solana-transaction", - "thiserror 2.0.12", + "thiserror 2.0.16", "tokio", ] [[package]] name = "solana-rpc-client-types" -version = "3.0.0" +version = "3.1.0" dependencies = [ "base64 0.22.1", "bs58", "const_format", - "semver 1.0.26", + "semver 1.0.27", "serde", "serde_derive", "serde_json", "solana-account", "solana-account-decoder-client-types", + "solana-address", "solana-clock", "solana-commitment-config", "solana-fee-calculator", @@ -10125,19 +10224,19 @@ dependencies = [ "solana-transaction-status-client-types", "solana-version", "spl-generic-token", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "solana-rpc-test" -version = "3.0.0" +version = "3.1.0" dependencies = [ "bincode", "bs58", "crossbeam-channel", "futures-util", "log", - "reqwest 0.12.22", + "reqwest 0.12.23", "serde", "serde_json", "solana-account-decoder", @@ -10168,7 +10267,7 @@ dependencies = [ [[package]] name = "solana-runtime" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "agave-precompiles", @@ -10188,7 +10287,7 @@ dependencies = [ "crossbeam-channel", "dashmap", "dir-diff", - "ed25519-dalek", + "ed25519-dalek 1.0.1", "fnv", "im", "itertools 0.12.1", @@ -10196,7 +10295,7 @@ dependencies = [ "libsecp256k1", "log", "lz4", - "memmap2 0.9.7", + "memmap2 0.9.8", "memoffset 0.9.1", "mockall", "modular-bitfield", @@ -10211,6 +10310,7 @@ dependencies = [ "rand_chacha 0.3.1", "rayon", "regex", + "semver 1.0.27", "serde", "serde_derive", "serde_json", @@ -10219,11 +10319,13 @@ dependencies = [ "solana-account-info", "solana-accounts-db", "solana-address-lookup-table-interface", + "solana-bls-signatures", "solana-bpf-loader-program", "solana-bucket-map", "solana-builtins", "solana-client-traits", "solana-clock", + "solana-cluster-type", "solana-commitment-config", "solana-compute-budget", "solana-compute-budget-instruction", @@ -10245,6 +10347,7 @@ dependencies = [ "solana-hash", "solana-inflation", "solana-instruction", + "solana-instruction-error", "solana-keypair", "solana-lattice-hash", "solana-loader-v3-interface", @@ -10261,11 +10364,11 @@ dependencies = [ "solana-perf", "solana-poh-config", "solana-precompile-error", + "solana-program-binaries", "solana-program-runtime", "solana-pubkey", "solana-rayon-threadlimit", "solana-rent", - "solana-rent-collector", "solana-reward-info", "solana-runtime", "solana-runtime-transaction", @@ -10282,6 +10385,7 @@ dependencies = [ "solana-stake-program", "solana-svm", "solana-svm-callback", + "solana-svm-timings", "solana-svm-transaction", "solana-system-interface", "solana-system-program", @@ -10289,7 +10393,6 @@ dependencies = [ "solana-sysvar", "solana-sysvar-id", "solana-time-utils", - "solana-timings", "solana-transaction", "solana-transaction-context", "solana-transaction-error", @@ -10307,13 +10410,13 @@ dependencies = [ "tar", "tempfile", "test-case", - "thiserror 2.0.12", + "thiserror 2.0.16", "zstd", ] [[package]] name = "solana-runtime-transaction" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "agave-reserved-account-keys", @@ -10339,20 +10442,20 @@ dependencies = [ "solana-transaction", "solana-transaction-error", "solana-vote-interface", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "solana-sanitize" -version = "2.2.1" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61f1bc1357b8188d9c4a3af3fc55276e56987265eb7ad073ae6f8180ee54cecf" +checksum = "dcf09694a0fc14e5ffb18f9b7b7c0f15ecb6eac5b5610bf76a1853459d19daf9" [[package]] name = "solana-sbpf" -version = "0.12.0" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c7a3d3cff34df928b804917bf111d3ede779af406703580cd7ed8fb239f5acf" +checksum = "0f224d906c14efc7ed7f42bc5fe9588f3f09db8cabe7f6023adda62a69678e1a" dependencies = [ "byteorder", "combine 3.8.1", @@ -10363,89 +10466,85 @@ dependencies = [ "rand 0.8.5", "rustc-demangle", "shuttle", - "thiserror 2.0.12", + "thiserror 2.0.16", "winapi 0.3.9", ] [[package]] name = "solana-sdk-ids" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5d8b9cc68d5c88b062a33e23a6466722467dde0035152d8fb1afbcdf350a5f" +checksum = "b1b6d6aaf60669c592838d382266b173881c65fb1cdec83b37cb8ce7cb89f9ad" dependencies = [ "solana-pubkey", ] [[package]] name = "solana-sdk-macro" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86280da8b99d03560f6ab5aca9de2e38805681df34e0bb8f238e69b29433b9df" +checksum = "d6430000e97083460b71d9fbadc52a2ab2f88f53b3a4c5e58c5ae3640a0e8c00" dependencies = [ "bs58", "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.106", ] [[package]] name = "solana-secp256k1-program" -version = "2.2.3" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f19833e4bc21558fe9ec61f239553abe7d05224347b57d65c2218aeeb82d6149" +checksum = "8efa767b0188f577edae7080e8bf080e5db9458e2b6ee5beaa73e2e6bb54e99d" dependencies = [ "bincode", "digest 0.10.7", - "libsecp256k1", + "k256", "serde", "serde_derive", "sha3", - "solana-feature-set", "solana-instruction", - "solana-precompile-error", "solana-sdk-ids", "solana-signature", ] [[package]] name = "solana-secp256k1-recover" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baa3120b6cdaa270f39444f5093a90a7b03d296d362878f7a6991d6de3bbe496" +checksum = "394a4470477d66296af5217970a905b1c5569032a7732c367fb69e5666c8607e" dependencies = [ - "libsecp256k1", - "solana-define-syscall", - "thiserror 2.0.12", + "k256", + "solana-define-syscall 3.0.0", + "thiserror 2.0.16", ] [[package]] name = "solana-secp256r1-program" -version = "2.2.4" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce0ae46da3071a900f02d367d99b2f3058fe2e90c5062ac50c4f20cfedad8f0f" +checksum = "445d8e12592631d76fc4dc57858bae66c9fd7cc838c306c62a472547fc9d0ce6" dependencies = [ "bytemuck", "openssl", - "solana-feature-set", "solana-instruction", - "solana-precompile-error", "solana-sdk-ids", ] [[package]] name = "solana-seed-derivable" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3beb82b5adb266c6ea90e5cf3967235644848eac476c5a1f2f9283a143b7c97f" +checksum = "ff7bdb72758e3bec33ed0e2658a920f1f35dfb9ed576b951d20d63cb61ecd95c" dependencies = [ "solana-derivation-path", ] [[package]] name = "solana-seed-phrase" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36187af2324f079f65a675ec22b31c24919cb4ac22c79472e85d819db9bbbc15" +checksum = "dc905b200a95f2ea9146e43f2a7181e3aeb55de6bc12afb36462d00a3c7310de" dependencies = [ "hmac 0.12.1", "pbkdf2 0.11.0", @@ -10454,7 +10553,7 @@ dependencies = [ [[package]] name = "solana-send-transaction-service" -version = "3.0.0" +version = "3.1.0" dependencies = [ "async-trait", "crossbeam-channel", @@ -10489,49 +10588,49 @@ dependencies = [ [[package]] name = "solana-serde" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1931484a408af466e14171556a47adaa215953c7f48b24e5f6b0282763818b04" +checksum = "709a93cab694c70f40b279d497639788fc2ccbcf9b4aa32273d4b361322c02dd" dependencies = [ "serde", ] [[package]] name = "solana-serde-varint" -version = "2.2.2" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a7e155eba458ecfb0107b98236088c3764a09ddf0201ec29e52a0be40857113" +checksum = "3e5174c57d5ff3c1995f274d17156964664566e2cde18a07bba1586d35a70d3b" dependencies = [ "serde", ] [[package]] name = "solana-serialize-utils" -version = "2.2.1" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "817a284b63197d2b27afdba829c5ab34231da4a9b4e763466a003c40ca4f535e" +checksum = "56e41dd8feea239516c623a02f0a81c2367f4b604d7965237fed0751aeec33ed" dependencies = [ - "solana-instruction", + "solana-instruction-error", "solana-pubkey", "solana-sanitize", ] [[package]] name = "solana-sha256-hasher" -version = "2.3.0" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aa3feb32c28765f6aa1ce8f3feac30936f16c5c3f7eb73d63a5b8f6f8ecdc44" +checksum = "a9b912ba6f71cb202c0c3773ec77bf898fa9fe0c78691a2d6859b3b5b8954719" dependencies = [ "sha2 0.10.9", - "solana-define-syscall", + "solana-define-syscall 3.0.0", "solana-hash", ] [[package]] name = "solana-short-vec" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c54c66f19b9766a56fa0057d060de8378676cb64987533fa088861858fc5a69" +checksum = "b69d029da5428fc1c57f7d49101b2077c61f049d4112cd5fb8456567cc7d2638" dependencies = [ "serde", "solana-frozen-abi", @@ -10540,9 +10639,9 @@ dependencies = [ [[package]] name = "solana-shred-version" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afd3db0461089d1ad1a78d9ba3f15b563899ca2386351d38428faa5350c60a98" +checksum = "94953e22ca28fe4541a3447d6baeaf519cc4ddc063253bfa673b721f34c136bb" dependencies = [ "solana-hard-forks", "solana-hash", @@ -10551,11 +10650,11 @@ dependencies = [ [[package]] name = "solana-signature" -version = "2.3.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64c8ec8e657aecfc187522fc67495142c12f35e55ddeca8698edbb738b8dbd8c" +checksum = "4bb8057cc0e9f7b5e89883d49de6f407df655bb6f3a71d0b7baf9986a2218fd9" dependencies = [ - "ed25519-dalek", + "ed25519-dalek 2.2.0", "five8", "rand 0.8.5", "serde", @@ -10568,20 +10667,31 @@ dependencies = [ [[package]] name = "solana-signer" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c41991508a4b02f021c1342ba00bcfa098630b213726ceadc7cb032e051975b" +checksum = "5bfea97951fee8bae0d6038f39a5efcb6230ecdfe33425ac75196d1a1e3e3235" dependencies = [ "solana-pubkey", "solana-signature", "solana-transaction-error", ] +[[package]] +name = "solana-signer-store" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36329bba208f0e41954389ae4ad5d973fe15952672cfd71a9b49deb7d2ecbc2f" +dependencies = [ + "bitvec", + "num-derive", + "num-traits", +] + [[package]] name = "solana-slot-hashes" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c8691982114513763e88d04094c9caa0376b867a29577939011331134c301ce" +checksum = "80a293f952293281443c04f4d96afd9d547721923d596e92b4377ed2360f1746" dependencies = [ "serde", "serde_derive", @@ -10592,9 +10702,9 @@ dependencies = [ [[package]] name = "solana-slot-history" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97ccc1b2067ca22754d5283afb2b0126d61eae734fc616d23871b0943b0d935e" +checksum = "f914f6b108f5bba14a280b458d023e3621c9973f27f015a4d755b50e88d89e97" dependencies = [ "bv", "serde", @@ -10605,9 +10715,9 @@ dependencies = [ [[package]] name = "solana-stable-layout" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f14f7d02af8f2bc1b5efeeae71bc1c2b7f0f65cd75bcc7d8180f2c762a57f54" +checksum = "1da74507795b6e8fb60b7c7306c0c36e2c315805d16eaaf479452661234685ac" dependencies = [ "solana-instruction", "solana-pubkey", @@ -10615,12 +10725,13 @@ dependencies = [ [[package]] name = "solana-stake-accounts" -version = "3.0.0" +version = "3.1.0" dependencies = [ "clap 2.33.3", "solana-account", "solana-clap-utils", "solana-cli-config", + "solana-cli-output", "solana-client-traits", "solana-clock", "solana-commitment-config", @@ -10630,6 +10741,7 @@ dependencies = [ "solana-keypair", "solana-message", "solana-native-token", + "solana-program-binaries", "solana-pubkey", "solana-remote-wallet", "solana-rpc-client", @@ -10639,97 +10751,57 @@ dependencies = [ "solana-signer", "solana-stake-interface", "solana-stake-program", + "solana-sysvar", "solana-transaction", "solana-version", ] [[package]] name = "solana-stake-interface" -version = "1.2.1" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5269e89fde216b4d7e1d1739cf5303f8398a1ff372a81232abbee80e554a838c" +checksum = "f6f912ae679b683365348dea482dbd9468d22ff258b554fd36e3d3683c2122e3" dependencies = [ - "borsh 0.10.3", - "borsh 1.5.7", + "borsh", "num-traits", "serde", "serde_derive", "solana-clock", "solana-cpi", - "solana-decode-error", "solana-frozen-abi", "solana-frozen-abi-macro", "solana-instruction", "solana-program-error", "solana-pubkey", "solana-system-interface", + "solana-sysvar", "solana-sysvar-id", ] [[package]] name = "solana-stake-program" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", - "assert_matches", "bincode", - "criterion", "log", - "proptest", "solana-account", - "solana-bincode", "solana-clock", - "solana-compute-budget", - "solana-config-program-client", - "solana-epoch-rewards", - "solana-epoch-schedule", + "solana-config-interface", "solana-genesis-config", - "solana-instruction", - "solana-log-collector", "solana-native-token", - "solana-packet", - "solana-program-runtime", "solana-pubkey", "solana-rent", "solana-sdk-ids", "solana-stake-interface", - "solana-svm-callback", - "solana-svm-feature-set", "solana-sysvar", - "solana-sysvar-id", "solana-transaction-context", - "solana-type-overrides", "solana-vote-interface", - "solana-vote-program", - "test-case", -] - -[[package]] -name = "solana-stake-program-tests" -version = "3.0.0" -dependencies = [ - "agave-feature-set", - "assert_matches", - "bincode", - "solana-account", - "solana-instruction", - "solana-keypair", - "solana-program-error", - "solana-program-test", - "solana-pubkey", - "solana-signer", - "solana-stake-interface", - "solana-system-interface", - "solana-sysvar", - "solana-transaction", - "solana-transaction-error", - "solana-vote-program", - "test-case", ] [[package]] name = "solana-storage-bigtable" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-reserved-account-keys", "backoff", @@ -10765,7 +10837,7 @@ dependencies = [ "solana-transaction-context", "solana-transaction-error", "solana-transaction-status", - "thiserror 2.0.12", + "thiserror 2.0.16", "tokio", "tonic", "zstd", @@ -10773,7 +10845,7 @@ dependencies = [ [[package]] name = "solana-storage-proto" -version = "3.0.0" +version = "3.1.0" dependencies = [ "bincode", "bs58", @@ -10798,19 +10870,20 @@ dependencies = [ [[package]] name = "solana-streamer" -version = "3.0.0" +version = "3.1.0" dependencies = [ + "anyhow", "arc-swap", "assert_matches", - "async-channel", "bytes", + "clap 4.5.31", "crossbeam-channel", "dashmap", "futures 0.3.31", "futures-util", "governor", "histogram", - "indexmap 2.10.0", + "indexmap 2.11.4", "itertools 0.12.1", "libc", "log", @@ -10821,7 +10894,7 @@ dependencies = [ "quinn", "quinn-proto", "rand 0.8.5", - "rustls 0.23.31", + "rustls 0.23.32", "smallvec", "socket2 0.6.0", "solana-keypair", @@ -10840,7 +10913,7 @@ dependencies = [ "solana-tls-utils", "solana-transaction-error", "solana-transaction-metrics-tracker", - "thiserror 2.0.12", + "thiserror 2.0.16", "tokio", "tokio-util 0.7.16", "x509-parser", @@ -10848,21 +10921,17 @@ dependencies = [ [[package]] name = "solana-svm" -version = "3.0.0" +version = "3.1.0" dependencies = [ - "agave-feature-set", - "agave-reserved-account-keys", "agave-syscalls", "ahash 0.8.11", "assert_matches", "bincode", - "ed25519-dalek", - "itertools 0.12.1", + "ed25519-dalek 1.0.1", "libsecp256k1", "log", "openssl", "percentage", - "prost", "qualifier_attr", "rand 0.7.3", "serde", @@ -10871,7 +10940,7 @@ dependencies = [ "solana-account", "solana-bpf-loader-program", "solana-clock", - "solana-compute-budget-instruction", + "solana-compute-budget", "solana-compute-budget-interface", "solana-compute-budget-program", "solana-ed25519-program", @@ -10887,73 +10956,83 @@ dependencies = [ "solana-loader-v3-interface", "solana-loader-v4-interface", "solana-loader-v4-program", - "solana-log-collector", "solana-logger", - "solana-measure", "solana-message", "solana-native-token", "solana-nonce", "solana-nonce-account", "solana-precompile-error", + "solana-program-binaries", "solana-program-entrypoint", "solana-program-pack", "solana-program-runtime", "solana-pubkey", "solana-rent", - "solana-rent-collector", "solana-sbpf", "solana-sdk-ids", "solana-secp256k1-program", "solana-secp256r1-program", "solana-signature", "solana-signer", - "solana-slot-hashes", "solana-svm", "solana-svm-callback", - "solana-svm-conformance", "solana-svm-feature-set", + "solana-svm-log-collector", + "solana-svm-measure", + "solana-svm-timings", "solana-svm-transaction", + "solana-svm-type-overrides", "solana-system-interface", "solana-system-program", "solana-system-transaction", "solana-sysvar", "solana-sysvar-id", - "solana-timings", "solana-transaction", "solana-transaction-context", "solana-transaction-error", - "solana-type-overrides", "spl-generic-token", "spl-token-interface", "test-case", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "solana-svm-callback" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-account", + "solana-clock", "solana-precompile-error", "solana-pubkey", ] [[package]] -name = "solana-svm-conformance" -version = "3.0.0" +name = "solana-svm-feature-set" +version = "3.1.0" + +[[package]] +name = "solana-svm-log-collector" +version = "3.1.0" dependencies = [ - "prost", - "prost-build", - "prost-types", + "log", ] [[package]] -name = "solana-svm-feature-set" -version = "3.0.0" +name = "solana-svm-measure" +version = "3.1.0" + +[[package]] +name = "solana-svm-timings" +version = "3.1.0" +dependencies = [ + "eager", + "enum-iterator", + "solana-pubkey", +] [[package]] name = "solana-svm-transaction" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-hash", "solana-message", @@ -10967,25 +11046,33 @@ dependencies = [ "test-case", ] +[[package]] +name = "solana-svm-type-overrides" +version = "3.1.0" +dependencies = [ + "futures 0.3.31", + "rand 0.8.5", + "shuttle", +] + [[package]] name = "solana-system-interface" -version = "1.0.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94d7c18cb1a91c6be5f5a8ac9276a1d7c737e39a21beba9ea710ab4b9c63bc90" +checksum = "4e1790547bfc3061f1ee68ea9d8dc6c973c02a163697b24263a8e9f2e6d4afa2" dependencies = [ - "js-sys", "num-traits", "serde", "serde_derive", - "solana-decode-error", "solana-instruction", + "solana-msg", + "solana-program-error", "solana-pubkey", - "wasm-bindgen", ] [[package]] name = "solana-system-program" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "assert_matches", @@ -11000,7 +11087,6 @@ dependencies = [ "solana-fee-calculator", "solana-hash", "solana-instruction", - "solana-log-collector", "solana-nonce", "solana-nonce-account", "solana-packet", @@ -11011,17 +11097,18 @@ dependencies = [ "solana-sha256-hasher", "solana-svm-callback", "solana-svm-feature-set", + "solana-svm-log-collector", + "solana-svm-type-overrides", "solana-system-interface", "solana-sysvar", "solana-transaction-context", - "solana-type-overrides", ] [[package]] name = "solana-system-transaction" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bd98a25e5bcba8b6be8bcbb7b84b24c2a6a8178d7fb0e3077a916855ceba91a" +checksum = "a31b5699ec533621515e714f1533ee6b3b0e71c463301d919eb59b8c1e249d30" dependencies = [ "solana-hash", "solana-keypair", @@ -11034,9 +11121,9 @@ dependencies = [ [[package]] name = "solana-sysvar" -version = "2.2.2" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d50c92bc019c590f5e42c61939676e18d14809ed00b2a59695dd5c67ae72c097" +checksum = "63205e68d680bcc315337dec311b616ab32fea0a612db3b883ce4de02e0953f9" dependencies = [ "base64 0.22.1", "bincode", @@ -11047,33 +11134,32 @@ dependencies = [ "serde_derive", "solana-account-info", "solana-clock", - "solana-define-syscall", + "solana-define-syscall 3.0.0", "solana-epoch-rewards", "solana-epoch-schedule", "solana-fee-calculator", + "solana-frozen-abi", + "solana-frozen-abi-macro", "solana-hash", "solana-instruction", - "solana-instructions-sysvar", "solana-last-restart-slot", "solana-program-entrypoint", "solana-program-error", "solana-program-memory", "solana-pubkey", "solana-rent", - "solana-sanitize", "solana-sdk-ids", "solana-sdk-macro", "solana-slot-hashes", "solana-slot-history", - "solana-stake-interface", "solana-sysvar-id", ] [[package]] name = "solana-sysvar-id" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5762b273d3325b047cfda250787f8d796d781746860d5d0a746ee29f3e8812c1" +checksum = "5051bc1a16d5d96a96bc33b5b2ec707495c48fe978097bdaba68d3c47987eb32" dependencies = [ "solana-pubkey", "solana-sdk-ids", @@ -11081,7 +11167,7 @@ dependencies = [ [[package]] name = "solana-test-validator" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "base64 0.22.1", @@ -11112,6 +11198,7 @@ dependencies = [ "solana-message", "solana-native-token", "solana-net-utils", + "solana-program-binaries", "solana-program-test", "solana-pubkey", "solana-rent", @@ -11125,29 +11212,20 @@ dependencies = [ "solana-tpu-client", "solana-transaction", "solana-validator-exit", - "tokio", -] - -[[package]] -name = "solana-time-utils" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6af261afb0e8c39252a04d026e3ea9c405342b08c871a2ad8aa5448e068c784c" - -[[package]] -name = "solana-timings" -version = "3.0.0" -dependencies = [ - "eager", - "enum-iterator", - "solana-pubkey", + "tokio", ] [[package]] -name = "solana-tls-utils" +name = "solana-time-utils" version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ced92c60aa76ec4780a9d93f3bd64dfa916e1b998eacc6f1c110f3f444f02c9" + +[[package]] +name = "solana-tls-utils" +version = "3.1.0" dependencies = [ - "rustls 0.23.31", + "rustls 0.23.32", "solana-keypair", "solana-pubkey", "solana-signer", @@ -11156,16 +11234,16 @@ dependencies = [ [[package]] name = "solana-tokens" -version = "3.0.0" +version = "3.1.0" dependencies = [ "assert_matches", "bincode", "chrono", "clap 2.33.3", - "console 0.16.0", + "console 0.16.1", "csv", "ctrlc", - "indexmap 2.10.0", + "indexmap 2.11.4", "indicatif 0.18.0", "pickledb", "serde", @@ -11173,6 +11251,7 @@ dependencies = [ "solana-account-decoder", "solana-clap-utils", "solana-cli-config", + "solana-cli-output", "solana-clock", "solana-commitment-config", "solana-hash", @@ -11200,12 +11279,12 @@ dependencies = [ "spl-associated-token-account-interface", "spl-token-interface", "tempfile", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "solana-tps-client" -version = "3.0.0" +version = "3.1.0" dependencies = [ "log", "solana-account", @@ -11218,6 +11297,7 @@ dependencies = [ "solana-hash", "solana-keypair", "solana-message", + "solana-net-utils", "solana-pubkey", "solana-quic-client", "solana-rpc-client", @@ -11231,17 +11311,17 @@ dependencies = [ "solana-transaction-error", "solana-transaction-status", "tempfile", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "solana-tpu-client" -version = "3.0.0" +version = "3.1.0" dependencies = [ "async-trait", "bincode", "futures-util", - "indexmap 2.10.0", + "indexmap 2.11.4", "indicatif 0.18.0", "log", "rayon", @@ -11262,21 +11342,22 @@ dependencies = [ "solana-signer", "solana-transaction", "solana-transaction-error", - "thiserror 2.0.12", + "thiserror 2.0.16", "tokio", ] [[package]] name = "solana-tpu-client-next" -version = "3.0.0" +version = "3.1.0" dependencies = [ "async-trait", "crossbeam-channel", "futures 0.3.31", "log", "lru", + "qualifier_attr", "quinn", - "rustls 0.23.31", + "rustls 0.23.32", "solana-cli-config", "solana-clock", "solana-commitment-config", @@ -11293,49 +11374,47 @@ dependencies = [ "solana-time-utils", "solana-tls-utils", "solana-tpu-client", - "thiserror 2.0.12", + "thiserror 2.0.16", "tokio", "tokio-util 0.7.16", + "tracing", ] [[package]] name = "solana-transaction" -version = "2.2.3" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80657d6088f721148f5d889c828ca60c7daeedac9a8679f9ec215e0c42bcbf41" +checksum = "64928e6af3058dcddd6da6680cbe08324b4e071ad73115738235bbaa9e9f72a5" dependencies = [ "bincode", "serde", "serde_derive", - "solana-bincode", - "solana-feature-set", + "solana-address", "solana-hash", "solana-instruction", - "solana-keypair", + "solana-instruction-error", "solana-message", - "solana-precompiles", - "solana-pubkey", "solana-sanitize", "solana-sdk-ids", "solana-short-vec", "solana-signature", "solana-signer", - "solana-system-interface", "solana-transaction-error", - "wasm-bindgen", ] [[package]] name = "solana-transaction-context" -version = "3.0.0" +version = "3.1.0" dependencies = [ "bincode", + "qualifier_attr", "serde", "serde_derive", "solana-account", "solana-account-info", "solana-instruction", "solana-instructions-sysvar", + "solana-program-entrypoint", "solana-pubkey", "solana-rent", "solana-sbpf", @@ -11348,7 +11427,7 @@ dependencies = [ [[package]] name = "solana-transaction-dos" -version = "3.0.0" +version = "3.1.0" dependencies = [ "bincode", "clap 2.33.3", @@ -11385,21 +11464,21 @@ dependencies = [ [[package]] name = "solana-transaction-error" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "222a9dc8fdb61c6088baab34fc3a8b8473a03a7a5fd404ed8dd502fa79b67cb1" +checksum = "4222065402340d7e6aec9dc3e54d22992ddcf923d91edcd815443c2bfca3144a" dependencies = [ "serde", "serde_derive", "solana-frozen-abi", "solana-frozen-abi-macro", - "solana-instruction", + "solana-instruction-error", "solana-sanitize", ] [[package]] name = "solana-transaction-metrics-tracker" -version = "3.0.0" +version = "3.1.0" dependencies = [ "base64 0.22.1", "bincode", @@ -11417,14 +11496,14 @@ dependencies = [ [[package]] name = "solana-transaction-status" -version = "3.0.0" +version = "3.1.0" dependencies = [ "Inflector", "agave-reserved-account-keys", "base64 0.22.1", "bencher", "bincode", - "borsh 1.5.7", + "borsh", "bs58", "bytemuck", "log", @@ -11457,12 +11536,12 @@ dependencies = [ "spl-token-group-interface", "spl-token-interface", "spl-token-metadata-interface", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "solana-transaction-status-client-types" -version = "3.0.0" +version = "3.1.0" dependencies = [ "base64 0.22.1", "bincode", @@ -11481,12 +11560,12 @@ dependencies = [ "solana-transaction-context", "solana-transaction-error", "test-case", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "solana-turbine" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "agave-xdp", @@ -11496,6 +11575,7 @@ dependencies = [ "bs58", "bytes", "caps", + "conditional-mod", "crossbeam-channel", "futures 0.3.31", "itertools 0.12.1", @@ -11506,7 +11586,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rayon", - "rustls 0.23.31", + "rustls 0.23.32", "solana-clock", "solana-cluster-type", "solana-entry", @@ -11520,6 +11600,7 @@ dependencies = [ "solana-metrics", "solana-native-token", "solana-net-utils", + "solana-nohash-hasher", "solana-perf", "solana-poh", "solana-pubkey", @@ -11536,24 +11617,16 @@ dependencies = [ "solana-tls-utils", "solana-transaction", "solana-transaction-error", + "solana-turbine", "static_assertions", "test-case", - "thiserror 2.0.12", + "thiserror 2.0.16", "tokio", ] -[[package]] -name = "solana-type-overrides" -version = "3.0.0" -dependencies = [ - "futures 0.3.31", - "rand 0.8.5", - "shuttle", -] - [[package]] name = "solana-udp-client" -version = "3.0.0" +version = "3.1.0" dependencies = [ "async-trait", "solana-connection-cache", @@ -11562,13 +11635,13 @@ dependencies = [ "solana-packet", "solana-streamer", "solana-transaction-error", - "thiserror 2.0.12", + "thiserror 2.0.16", "tokio", ] [[package]] name = "solana-unified-scheduler-logic" -version = "3.0.0" +version = "3.1.0" dependencies = [ "assert_matches", "solana-instruction", @@ -11582,7 +11655,7 @@ dependencies = [ [[package]] name = "solana-unified-scheduler-pool" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-banking-stage-ingress-types", "aquamarine", @@ -11608,8 +11681,8 @@ dependencies = [ "solana-runtime", "solana-runtime-transaction", "solana-svm", + "solana-svm-timings", "solana-system-transaction", - "solana-timings", "solana-transaction", "solana-transaction-error", "solana-unified-scheduler-logic", @@ -11620,27 +11693,19 @@ dependencies = [ "vec_extract_if_polyfill", ] -[[package]] -name = "solana-upload-perf" -version = "3.0.0" -dependencies = [ - "serde_json", - "solana-metrics", -] - [[package]] name = "solana-validator-exit" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bbf6d7a3c0b28dd5335c52c0e9eae49d0ae489a8f324917faf0ded65a812c1d" +checksum = "c5d2face763df5afeaa9509b9019968860e69cc1531ec8b4a2e6c7b702204d5a" [[package]] name = "solana-version" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "rand 0.8.5", - "semver 1.0.26", + "semver 1.0.27", "serde", "serde_derive", "solana-frozen-abi", @@ -11651,11 +11716,10 @@ dependencies = [ [[package]] name = "solana-vortexor" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-banking-stage-ingress-types", "assert_matches", - "async-channel", "bytes", "clap 4.5.31", "crossbeam-channel", @@ -11664,7 +11728,7 @@ dependencies = [ "futures-util", "governor", "histogram", - "indexmap 2.10.0", + "indexmap 2.11.4", "itertools 0.12.1", "libc", "log", @@ -11674,7 +11738,7 @@ dependencies = [ "quinn", "quinn-proto", "rand 0.8.5", - "rustls 0.23.31", + "rustls 0.23.32", "signal-hook", "smallvec", "socket2 0.6.0", @@ -11697,15 +11761,16 @@ dependencies = [ "solana-streamer", "solana-transaction-metrics-tracker", "solana-version", - "thiserror 2.0.12", + "thiserror 2.0.16", "tokio", - "url 2.5.4", + "tokio-util 0.7.16", + "url 2.5.7", "x509-parser", ] [[package]] name = "solana-vote" -version = "3.0.0" +version = "3.1.0" dependencies = [ "arbitrary", "bencher", @@ -11735,27 +11800,29 @@ dependencies = [ "solana-transaction", "solana-vote-interface", "static_assertions", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "solana-vote-interface" -version = "2.2.6" +version = "4.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b80d57478d6599d30acc31cc5ae7f93ec2361a06aefe8ea79bc81739a08af4c3" +checksum = "c33f1a30b1e61944e52afef0992a2be93720c5770eaf1f6d8e6e34f87d90e754" dependencies = [ "arbitrary", "bincode", + "cfg_eval", "num-derive", "num-traits", "serde", "serde_derive", + "serde_with", "solana-clock", - "solana-decode-error", "solana-frozen-abi", "solana-frozen-abi-macro", "solana-hash", "solana-instruction", + "solana-instruction-error", "solana-pubkey", "solana-rent", "solana-sdk-ids", @@ -11767,7 +11834,7 @@ dependencies = [ [[package]] name = "solana-vote-program" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "assert_matches", @@ -11802,12 +11869,25 @@ dependencies = [ "solana-transaction-context", "solana-vote-interface", "test-case", - "thiserror 2.0.12", + "thiserror 2.0.16", +] + +[[package]] +name = "solana-votor-messages" +version = "3.1.0" +dependencies = [ + "serde", + "solana-bls-signatures", + "solana-clock", + "solana-frozen-abi", + "solana-frozen-abi-macro", + "solana-hash", + "solana-logger", ] [[package]] name = "solana-wen-restart" -version = "3.0.0" +version = "3.1.0" dependencies = [ "anyhow", "assert_matches", @@ -11833,8 +11913,8 @@ dependencies = [ "solana-shred-version", "solana-signer", "solana-streamer", + "solana-svm-timings", "solana-time-utils", - "solana-timings", "solana-vote", "solana-vote-interface", "solana-vote-program", @@ -11843,7 +11923,7 @@ dependencies = [ [[package]] name = "solana-zk-elgamal-proof-program" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "bytemuck", @@ -11852,15 +11932,15 @@ dependencies = [ "num-derive", "num-traits", "solana-instruction", - "solana-log-collector", "solana-program-runtime", "solana-sdk-ids", - "solana-zk-sdk 3.0.0", + "solana-svm-log-collector", + "solana-zk-sdk", ] [[package]] name = "solana-zk-elgamal-proof-program-tests" -version = "3.0.0" +version = "3.1.0" dependencies = [ "bytemuck", "solana-account", @@ -11874,69 +11954,14 @@ dependencies = [ "solana-system-interface", "solana-transaction", "solana-transaction-error", - "solana-zk-sdk 3.0.0", -] - -[[package]] -name = "solana-zk-keygen" -version = "3.0.0" -dependencies = [ - "bs58", - "clap 3.2.23", - "dirs-next", - "solana-clap-v3-utils", - "solana-pubkey", - "solana-remote-wallet", - "solana-seed-derivable", - "solana-signer", - "solana-version", - "solana-zk-token-sdk", - "tempfile", - "thiserror 2.0.12", - "tiny-bip39", -] - -[[package]] -name = "solana-zk-sdk" -version = "2.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05857892ac50fe03c125d8445fd790c6768015b76f4ad1e4b4b1499938b357f0" -dependencies = [ - "aes-gcm-siv", - "base64 0.22.1", - "bincode", - "bytemuck", - "bytemuck_derive", - "curve25519-dalek 4.1.3", - "itertools 0.12.1", - "js-sys", - "merlin", - "num-derive", - "num-traits", - "rand 0.8.5", - "serde", - "serde_derive", - "serde_json", - "sha3", - "solana-derivation-path", - "solana-instruction", - "solana-pubkey", - "solana-sdk-ids", - "solana-seed-derivable", - "solana-seed-phrase", - "solana-signature", - "solana-signer", - "subtle", - "thiserror 2.0.12", - "wasm-bindgen", - "zeroize", + "solana-zk-sdk", ] [[package]] name = "solana-zk-sdk" -version = "3.0.0" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dffbd0b7537f4249d69b74c632f8eac1d2726572022791f9ead65a67d3f6905" +checksum = "9602bcb1f7af15caef92b91132ec2347e1c51a72ecdbefdaefa3eac4b8711475" dependencies = [ "aes-gcm-siv", "base64 0.22.1", @@ -11944,6 +11969,7 @@ dependencies = [ "bytemuck", "bytemuck_derive", "curve25519-dalek 4.1.3", + "getrandom 0.2.15", "itertools 0.12.1", "js-sys", "merlin", @@ -11963,14 +11989,14 @@ dependencies = [ "solana-signature", "solana-signer", "subtle", - "thiserror 2.0.12", + "thiserror 2.0.16", "wasm-bindgen", "zeroize", ] [[package]] name = "solana-zk-token-proof-program" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "bytemuck", @@ -11979,15 +12005,15 @@ dependencies = [ "num-derive", "num-traits", "solana-instruction", - "solana-log-collector", "solana-program-runtime", "solana-sdk-ids", + "solana-svm-log-collector", "solana-zk-token-sdk", ] [[package]] name = "solana-zk-token-sdk" -version = "3.0.0" +version = "3.1.0" dependencies = [ "aes-gcm-siv", "base64 0.22.1", @@ -12004,7 +12030,7 @@ dependencies = [ "serde_derive", "serde_json", "sha3", - "solana-curve25519 3.0.0", + "solana-curve25519 3.1.0", "solana-derivation-path", "solana-instruction", "solana-keypair", @@ -12015,7 +12041,7 @@ dependencies = [ "solana-signature", "solana-signer", "subtle", - "thiserror 2.0.12", + "thiserror 2.0.16", "tiny-bip39", "zeroize", ] @@ -12035,22 +12061,32 @@ dependencies = [ "lock_api", ] +[[package]] +name = "spki" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" +dependencies = [ + "base64ct", + "der", +] + [[package]] name = "spl-associated-token-account-interface" -version = "1.0.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e6bbe0794e532ac08428d3abf5bf8ae75bd81dfddd785c388e326c00c92c6f5" +checksum = "e6433917b60441d68d99a17e121d9db0ea15a9a69c0e5afa34649cf5ba12612f" dependencies = [ - "borsh 1.5.7", + "borsh", "solana-instruction", "solana-pubkey", ] [[package]] name = "spl-discriminator" -version = "0.4.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a20542d4c8264856d205c0090512f374dbf7b3124479a3d93ab6184ae3631aa" +checksum = "d48cc11459e265d5b501534144266620289720b4c44522a47bc6b63cd295d2f3" dependencies = [ "bytemuck", "solana-program-error", @@ -12066,7 +12102,7 @@ checksum = "d9e8418ea6269dcfb01c712f0444d2c75542c04448b480e87de59d2865edc750" dependencies = [ "quote", "spl-discriminator-syn", - "syn 2.0.104", + "syn 2.0.106", ] [[package]] @@ -12078,15 +12114,15 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.9", - "syn 2.0.104", + "syn 2.0.106", "thiserror 1.0.69", ] [[package]] name = "spl-generic-token" -version = "1.0.1" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "741a62a566d97c58d33f9ed32337ceedd4e35109a686e31b1866c5dfa56abddc" +checksum = "233df81b75ab99b42f002b5cdd6e65a7505ffa930624f7096a7580a56765e9cf" dependencies = [ "bytemuck", "solana-pubkey", @@ -12094,9 +12130,9 @@ dependencies = [ [[package]] name = "spl-instruction-padding-interface" -version = "0.1.0" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f738b75144edbb32c01de832632eecad71113b62a48ef8e55e60c5a692bae4e" +checksum = "9c3a77c0c9b83b111ee29bc6aa6eaab54b82e1ed5db40ba9786527b80283c3ef" dependencies = [ "num_enum", "solana-instruction", @@ -12106,9 +12142,9 @@ dependencies = [ [[package]] name = "spl-memo-interface" -version = "1.0.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24af0730130fea732616be9425fe8eb77782e2aab2f0e76837b6a66aaba96c6b" +checksum = "3d4e2aedd58f858337fa609af5ad7100d4a243fdaf6a40d6eb4c28c5f19505d3" dependencies = [ "solana-instruction", "solana-pubkey", @@ -12116,29 +12152,28 @@ dependencies = [ [[package]] name = "spl-pod" -version = "0.5.1" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d994afaf86b779104b4a95ba9ca75b8ced3fdb17ee934e38cb69e72afbe17799" +checksum = "b1233fdecd7461611d69bb87bc2e95af742df47291975d21232a0be8217da9de" dependencies = [ - "borsh 1.5.7", + "borsh", "bytemuck", "bytemuck_derive", "num-derive", "num-traits", - "solana-decode-error", - "solana-msg", + "num_enum", "solana-program-error", "solana-program-option", "solana-pubkey", - "solana-zk-sdk 2.3.6", - "thiserror 2.0.12", + "solana-zk-sdk", + "thiserror 2.0.16", ] [[package]] name = "spl-token-2022-interface" -version = "1.0.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62d7ae2ee6b856f8ddcbdc3b3a9f4d2141582bbe150f93e5298ee97e0251fa04" +checksum = "0888304af6b3d839e435712e6c84025e09513017425ff62045b6b8c41feb77d9" dependencies = [ "arrayref", "bytemuck", @@ -12146,79 +12181,76 @@ dependencies = [ "num-traits", "num_enum", "solana-account-info", - "solana-decode-error", "solana-instruction", - "solana-msg", "solana-program-error", "solana-program-option", "solana-program-pack", "solana-pubkey", "solana-sdk-ids", - "solana-zk-sdk 2.3.6", + "solana-zk-sdk", "spl-pod", "spl-token-confidential-transfer-proof-extraction", "spl-token-confidential-transfer-proof-generation", "spl-token-group-interface", "spl-token-metadata-interface", "spl-type-length-value", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "spl-token-confidential-transfer-proof-extraction" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bedc4675c80409a004da46978674e4073c65c4b1c611bf33d120381edeffe036" +checksum = "7a22217af69b7a61ca813f47c018afb0b00b02a74a4c70ff099cd4287740bc3d" dependencies = [ "bytemuck", "solana-account-info", - "solana-curve25519 2.2.15", + "solana-curve25519 2.3.7", "solana-instruction", "solana-instructions-sysvar", "solana-msg", "solana-program-error", "solana-pubkey", "solana-sdk-ids", - "solana-zk-sdk 2.3.6", + "solana-zk-sdk", "spl-pod", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "spl-token-confidential-transfer-proof-generation" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae5b124840d4aed474cef101d946a798b806b46a509ee4df91021e1ab1cef3ef" +checksum = "f63a2b41095945dc15274b924b21ccae9b3ec9dc2fdd43dbc08de8c33bbcd915" dependencies = [ "curve25519-dalek 4.1.3", - "solana-zk-sdk 2.3.6", - "thiserror 2.0.12", + "solana-zk-sdk", + "thiserror 2.0.16", ] [[package]] name = "spl-token-group-interface" -version = "0.6.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5597b4cd76f85ce7cd206045b7dc22da8c25516573d42d267c8d1fd128db5129" +checksum = "452d0f758af20caaa10d9a6f7608232e000d4c74462f248540b3d2ddfa419776" dependencies = [ "bytemuck", "num-derive", "num-traits", - "solana-decode-error", + "num_enum", "solana-instruction", - "solana-msg", "solana-program-error", "solana-pubkey", "spl-discriminator", "spl-pod", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "spl-token-interface" -version = "1.0.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06e0c2d4e38ef5834cf7fb1b592b8a8c6eab8485f5ac7a04a151b502c63a0aaa" +checksum = "8c564ac05a7c8d8b12e988a37d82695b5ba4db376d07ea98bc4882c81f96c7f3" dependencies = [ "arrayref", "bytemuck", @@ -12231,46 +12263,44 @@ dependencies = [ "solana-program-pack", "solana-pubkey", "solana-sdk-ids", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "spl-token-metadata-interface" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "304d6e06f0de0c13a621464b1fd5d4b1bebf60d15ca71a44d3839958e0da16ee" +checksum = "9c467c7c3bd056f8fe60119e7ec34ddd6f23052c2fa8f1f51999098063b72676" dependencies = [ - "borsh 1.5.7", + "borsh", "num-derive", "num-traits", "solana-borsh", - "solana-decode-error", "solana-instruction", - "solana-msg", "solana-program-error", "solana-pubkey", "spl-discriminator", "spl-pod", "spl-type-length-value", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "spl-type-length-value" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d417eb548214fa822d93f84444024b4e57c13ed6719d4dcc68eec24fb481e9f5" +checksum = "ca20a1a19f4507a98ca4b28ff5ed54cac9b9d34ed27863e2bde50a3238f9a6ac" dependencies = [ "bytemuck", "num-derive", "num-traits", + "num_enum", "solana-account-info", - "solana-decode-error", "solana-msg", "solana-program-error", "spl-discriminator", "spl-pod", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] @@ -12361,9 +12391,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.104" +version = "2.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40" +checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6" dependencies = [ "proc-macro2", "quote", @@ -12405,7 +12435,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.106", ] [[package]] @@ -12520,15 +12550,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.20.0" +version = "3.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" +checksum = "84fa4d11fadde498443cca10fd3ac23c951f0dc59e080e9f4b93d4df4e4eea53" dependencies = [ "fastrand", "getrandom 0.3.3", "once_cell", "rustix 1.0.2", - "windows-sys 0.59.0", + "windows-sys 0.61.0", ] [[package]] @@ -12561,11 +12591,11 @@ version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "54c25e2cb8f5fcd7318157634e8838aa6f7e4715c96637f969fabaccd1ef5462" dependencies = [ - "cfg-if 1.0.1", + "cfg-if 1.0.3", "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.106", ] [[package]] @@ -12577,7 +12607,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.106", "test-case-core", ] @@ -12607,11 +12637,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.12" +version = "2.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" +checksum = "3467d614147380f2e4e374161426ff399c91084acd2363eaf549172b3d5e60c0" dependencies = [ - "thiserror-impl 2.0.12", + "thiserror-impl 2.0.16", ] [[package]] @@ -12622,18 +12652,18 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.106", ] [[package]] name = "thiserror-impl" -version = "2.0.12" +version = "2.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" +checksum = "6c5e1be1c48b9172ee610da68fd9cd2770e7a4056cb3fc98710ee6906f0c7960" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.106", ] [[package]] @@ -12642,8 +12672,8 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfe075d7053dae61ac5413a34ea7d4913b6e6207844fd726bdd858b37ff72bf5" dependencies = [ - "bitflags 2.9.1", - "cfg-if 1.0.1", + "bitflags 2.9.4", + "cfg-if 1.0.3", "libc", "log", "rustversion", @@ -12665,6 +12695,15 @@ dependencies = [ "once_cell", ] +[[package]] +name = "threadpool" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" +dependencies = [ + "num_cpus", +] + [[package]] name = "tikv-jemalloc-sys" version = "0.6.0+5.3.0-1-ge13ca993e8ccb9ba9847cc330696e02839f328f7" @@ -12795,7 +12834,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.106", ] [[package]] @@ -12824,7 +12863,7 @@ version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" dependencies = [ - "rustls 0.23.31", + "rustls 0.23.32", "tokio", ] @@ -12935,7 +12974,7 @@ version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ - "indexmap 2.10.0", + "indexmap 2.11.4", "toml_datetime", "winnow 0.5.16", ] @@ -12946,7 +12985,7 @@ version = "0.22.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3328d4f68a705b2a4498da1d580585d39a6510f98318a2cec3018a7ec61ddef" dependencies = [ - "indexmap 2.10.0", + "indexmap 2.11.4", "serde", "serde_spanned", "toml_datetime", @@ -12971,7 +13010,7 @@ dependencies = [ "http-body 0.4.5", "hyper 0.14.32", "hyper-timeout", - "percent-encoding 2.3.1", + "percent-encoding 2.3.2", "pin-project", "prost", "rustls-pemfile", @@ -13039,7 +13078,7 @@ version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.9.4", "bytes", "futures-util", "http 1.1.0", @@ -13065,9 +13104,9 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.40" +version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" dependencies = [ "log", "pin-project-lite", @@ -13077,20 +13116,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.27" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" +checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.106", ] [[package]] name = "tracing-core" -version = "0.1.32" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" dependencies = [ "once_cell", "valuable", @@ -13110,9 +13149,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.7" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5312f325fe3588e277415f5a6cca1f4ccad0f248c4cd5a4bd33032d7286abc22" +checksum = "2054a14f5307d601f88daf0553e1cbf472acc4f2c51afab632431cdcd72124d5" dependencies = [ "sharded-slab", "thread_local", @@ -13158,7 +13197,7 @@ dependencies = [ "rustls 0.21.12", "sha1", "thiserror 1.0.69", - "url 2.5.4", + "url 2.5.7", "utf-8", "webpki-roots 0.24.0", ] @@ -13301,13 +13340,14 @@ dependencies = [ [[package]] name = "url" -version = "2.5.4" +version = "2.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" +checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b" dependencies = [ "form_urlencoded", - "idna 1.0.3", - "percent-encoding 2.3.1", + "idna 1.1.0", + "percent-encoding 2.3.2", + "serde", ] [[package]] @@ -13322,12 +13362,6 @@ version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" -[[package]] -name = "utf8-width" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cf7d77f457ef8dfa11e4cd5933c5ddb5dc52a94664071951219a97710f0a32b" - [[package]] name = "utf8_iter" version = "1.0.4" @@ -13429,27 +13463,28 @@ dependencies = [ [[package]] name = "wasm-bindgen" -version = "0.2.100" +version = "0.2.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +checksum = "ab10a69fbd0a177f5f649ad4d8d3305499c42bab9aef2f7ff592d0ec8f833819" dependencies = [ - "cfg-if 1.0.1", + "cfg-if 1.0.3", "once_cell", "rustversion", "wasm-bindgen-macro", + "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.100" +version = "0.2.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" +checksum = "0bb702423545a6007bbc368fde243ba47ca275e549c8a28617f56f6ba53b1d1c" dependencies = [ "bumpalo", "log", "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.106", "wasm-bindgen-shared", ] @@ -13459,7 +13494,7 @@ version = "0.4.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e8d7523cb1f2a4c96c1317ca690031b714a51cc14e05f712446691f413f5d39" dependencies = [ - "cfg-if 1.0.1", + "cfg-if 1.0.3", "js-sys", "wasm-bindgen", "web-sys", @@ -13467,9 +13502,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.100" +version = "0.2.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +checksum = "fc65f4f411d91494355917b605e1480033152658d71f722a90647f56a70c88a0" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -13477,22 +13512,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.100" +version = "0.2.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +checksum = "ffc003a991398a8ee604a401e194b6b3a39677b3173d6e74495eb51b82e99a32" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.106", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.100" +version = "0.2.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +checksum = "293c37f4efa430ca14db3721dfbe48d8c33308096bd44d80ebaa775ab71ba1cf" dependencies = [ "unicode-ident", ] @@ -13519,9 +13554,9 @@ dependencies = [ [[package]] name = "webpki-root-certs" -version = "0.26.6" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8c6dfa3ac045bc517de14c7b1384298de1dbd229d38e08e169d9ae8c170937c" +checksum = "4e4ffd8df1c57e87c325000a3d6ef93db75279dc3a231125aac571650f22b12a" dependencies = [ "rustls-pki-types", ] @@ -13570,16 +13605,6 @@ dependencies = [ "libc", ] -[[package]] -name = "wide" -version = "0.7.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41b5576b9a81633f3e8df296ce0063042a73507636cbe956c61133dd7034ab22" -dependencies = [ - "bytemuck", - "safe_arch", -] - [[package]] name = "winapi" version = "0.2.8" @@ -13645,9 +13670,9 @@ dependencies = [ [[package]] name = "windows-link" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dccfd733ce2b1753b03b6d3c65edf020262ea35e20ccdf3e288043e6dd620e3" +checksum = "45e46c0661abb7180e7b9c281db115305d49ca1709ab8242adf09666d2173c65" [[package]] name = "windows-result" @@ -13696,11 +13721,11 @@ dependencies = [ [[package]] name = "windows-sys" -version = "0.60.2" +version = "0.61.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +checksum = "e201184e40b2ede64bc2ea34968b28e33622acdbbf37104f0e4a33f7abe657aa" dependencies = [ - "windows-targets 0.53.2", + "windows-link", ] [[package]] @@ -13742,29 +13767,13 @@ dependencies = [ "windows_aarch64_gnullvm 0.52.6", "windows_aarch64_msvc 0.52.6", "windows_i686_gnu 0.52.6", - "windows_i686_gnullvm 0.52.6", + "windows_i686_gnullvm", "windows_i686_msvc 0.52.6", "windows_x86_64_gnu 0.52.6", "windows_x86_64_gnullvm 0.52.6", "windows_x86_64_msvc 0.52.6", ] -[[package]] -name = "windows-targets" -version = "0.53.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c66f69fcc9ce11da9966ddb31a40968cad001c5bedeb5c2b82ede4253ab48aef" -dependencies = [ - "windows_aarch64_gnullvm 0.53.0", - "windows_aarch64_msvc 0.53.0", - "windows_i686_gnu 0.53.0", - "windows_i686_gnullvm 0.53.0", - "windows_i686_msvc 0.53.0", - "windows_x86_64_gnu 0.53.0", - "windows_x86_64_gnullvm 0.53.0", - "windows_x86_64_msvc 0.53.0", -] - [[package]] name = "windows_aarch64_gnullvm" version = "0.42.2" @@ -13783,12 +13792,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" - [[package]] name = "windows_aarch64_msvc" version = "0.42.2" @@ -13807,12 +13810,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" -[[package]] -name = "windows_aarch64_msvc" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" - [[package]] name = "windows_i686_gnu" version = "0.42.2" @@ -13831,24 +13828,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" -[[package]] -name = "windows_i686_gnu" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" - [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" -[[package]] -name = "windows_i686_gnullvm" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" - [[package]] name = "windows_i686_msvc" version = "0.42.2" @@ -13867,12 +13852,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" -[[package]] -name = "windows_i686_msvc" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" - [[package]] name = "windows_x86_64_gnu" version = "0.42.2" @@ -13891,12 +13870,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" -[[package]] -name = "windows_x86_64_gnu" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" - [[package]] name = "windows_x86_64_gnullvm" version = "0.42.2" @@ -13915,12 +13888,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" - [[package]] name = "windows_x86_64_msvc" version = "0.42.2" @@ -13939,12 +13906,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" -[[package]] -name = "windows_x86_64_msvc" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" - [[package]] name = "winnow" version = "0.5.16" @@ -13969,7 +13930,7 @@ version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" dependencies = [ - "cfg-if 1.0.1", + "cfg-if 1.0.3", "windows-sys 0.48.0", ] @@ -13979,7 +13940,7 @@ version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.9.4", ] [[package]] @@ -14067,7 +14028,7 @@ checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.106", "synstructure 0.13.1", ] @@ -14097,7 +14058,7 @@ checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.106", ] [[package]] @@ -14108,7 +14069,7 @@ checksum = "6352c01d0edd5db859a63e2605f4ea3183ddbd15e2c4a9e7d32184df75e4f154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.106", ] [[package]] @@ -14128,7 +14089,7 @@ checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.106", "synstructure 0.13.1", ] @@ -14149,7 +14110,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.106", ] [[package]] @@ -14171,7 +14132,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.104", + "syn 2.0.106", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 9079b324e1e4a0..4ae6690152f2e2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -50,13 +50,11 @@ members = [ "ledger", "ledger-tool", "local-cluster", - "log-analyzer", - "log-collector", + "low-pass-filter", "measure", "memory-management", "merkle-tree", "metrics", - "net-shaper", "net-utils", "notifier", "perf", @@ -67,6 +65,7 @@ members = [ "poh-bench", "poseidon", "precompiles", + "program-binaries", "program-runtime", "program-test", "programs/bpf-loader-tests", @@ -76,7 +75,6 @@ members = [ "programs/ed25519-tests", "programs/loader-v4", "programs/stake", - "programs/stake-tests", "programs/system", "programs/vote", "programs/zk-elgamal-proof", @@ -96,6 +94,7 @@ members = [ "rpc-test", "runtime", "runtime-transaction", + "scheduler-bindings", "send-transaction-service", "stake-accounts", "storage-bigtable", @@ -104,14 +103,16 @@ members = [ "streamer", "svm", "svm-callback", - "svm-conformance", "svm-feature-set", + "svm-log-collector", + "svm-measure", + "svm-timings", "svm-transaction", + "svm-type-overrides", "syscalls", "syscalls/gen-syscall-list", "test-validator", "thread-manager", - "timings", "tls-utils", "tokens", "tps-client", @@ -124,30 +125,30 @@ members = [ "transaction-status-client-types", "transaction-view", "turbine", - "type-overrides", "udp-client", "unified-scheduler-logic", "unified-scheduler-pool", - "upload-perf", "validator", "verified-packet-receiver", "version", "vortexor", "vote", + "votor", + "votor-messages", "watchtower", "wen-restart", "xdp", - "zk-keygen", "zk-token-sdk", ] -exclude = ["programs/sbf", "svm/examples", "svm/tests/example-programs"] +exclude = ["programs/sbf", "svm/tests/example-programs"] resolver = "2" [workspace.package] -version = "3.0.0" +version = "3.1.0" authors = ["Anza Maintainers "] +description = "Blockchain, Rebuilt for Scale" repository = "https://github.com/anza-xyz/agave" homepage = "https://anza.xyz/" license = "Apache-2.0" @@ -173,22 +174,25 @@ used_underscore_binding = "deny" [workspace.dependencies] Inflector = "0.11.4" aes-gcm-siv = "0.11.1" -agave-banking-stage-ingress-types = { path = "banking-stage-ingress-types", version = "=3.0.0" } -agave-cargo-registry = { path = "cargo-registry", version = "=3.0.0" } -agave-feature-set = { path = "feature-set", version = "=3.0.0" } -agave-geyser-plugin-interface = { path = "geyser-plugin-interface", version = "=3.0.0" } -agave-io-uring = { path = "io-uring", version = "=3.0.0" } -agave-precompiles = { path = "precompiles", version = "=3.0.0" } -agave-reserved-account-keys = { path = "reserved-account-keys", version = "=3.0.0" } -agave-syscalls = { path = "syscalls", version = "=3.0.0" } -agave-thread-manager = { path = "thread-manager", version = "=3.0.0" } -agave-transaction-view = { path = "transaction-view", version = "=3.0.0" } -agave-verified-packet-receiver = { path = "verified-packet-receiver", version = "=3.0.0" } -agave-xdp = { path = "xdp", version = "=3.0.0" } +agave-banking-stage-ingress-types = { path = "banking-stage-ingress-types", version = "=3.1.0" } +agave-cargo-registry = { path = "cargo-registry", version = "=3.1.0" } +agave-feature-set = { path = "feature-set", version = "=3.1.0" } +agave-geyser-plugin-interface = { path = "geyser-plugin-interface", version = "=3.1.0" } +agave-io-uring = { path = "io-uring", version = "=3.1.0" } +agave-low-pass-filter = { path = "low-pass-filter", version = "=3.1.0" } +agave-precompiles = { path = "precompiles", version = "=3.1.0" } +agave-reserved-account-keys = { path = "reserved-account-keys", version = "=3.1.0" } +agave-scheduler-bindings = { path = "scheduler-bindings", version = "=3.1.0" } +agave-syscalls = { path = "syscalls", version = "=3.1.0" } +agave-thread-manager = { path = "thread-manager", version = "=3.1.0" } +agave-transaction-view = { path = "transaction-view", version = "=3.1.0" } +agave-verified-packet-receiver = { path = "verified-packet-receiver", version = "=3.1.0" } +agave-votor = { path = "votor", version = "=3.1.0" } +agave-xdp = { path = "xdp", version = "=3.1.0" } ahash = "0.8.11" -anyhow = "1.0.98" +anyhow = "1.0.100" aquamarine = "0.6.0" -arbitrary = "1.4.1" +arbitrary = "1.4.2" arc-swap = "1.7.1" ark-bn254 = "0.4.0" ark-ec = "0.4.0" @@ -199,9 +203,8 @@ arrayref = "0.3.9" arrayvec = "0.7.6" assert_cmd = "2.0" assert_matches = "1.5.0" -async-channel = "1.9.0" async-lock = "3.4.1" -async-trait = "0.1.88" +async-trait = "0.1.89" atty = "0.2.11" axum = "0.7.9" aya = "0.13" @@ -209,27 +212,27 @@ backoff = "0.4.0" base64 = "0.22.1" bencher = "0.1.5" bincode = "1.3.3" -bitflags = { version = "2.9.1" } +bitflags = { version = "2.9.4" } +bitvec = { version = "1.0.1", features = ["serde"] } blake3 = "1.8.2" borsh = { version = "1.5.7", features = ["derive", "unstable__schema"] } -borsh0-10 = { package = "borsh", version = "0.10.3" } bs58 = { version = "0.5.1", default-features = false } bv = "0.11.1" byte-unit = "4.0.19" -bytemuck = "1.23.1" -bytemuck_derive = "1.10.0" +bytemuck = "1.23.2" +bytemuck_derive = "1.10.1" bytes = "1.10" bzip2 = "0.4.4" caps = "0.5.5" cargo_metadata = "0.15.4" -cfg-if = "1.0.1" +cfg-if = "1.0.3" cfg_eval = "0.1.2" -chrono = { version = "0.4.41", default-features = false } +chrono = { version = "0.4.42", default-features = false } chrono-humanize = "0.2.3" clap = "2.33.1" # Remove this dependency when procedural macros will support non-inline modules. conditional-mod = "0.1.0" -console = "0.16.0" +console = "0.16.1" console_error_panic_hook = "0.1.7" console_log = "0.2.2" const_format = "0.2.34" @@ -238,11 +241,11 @@ criterion = "0.5.1" criterion-stats = "0.3.0" crossbeam-channel = "0.5.15" csv = "1.3.1" -ctrlc = "3.4.7" +ctrlc = "3.5.0" curve25519-dalek = { version = "4.1.3", features = ["digest", "rand_core"] } dashmap = "5.5.3" derivation-path = { version = "0.2.0", default-features = false } -derive-where = "1.5.0" +derive-where = "1.6.0" derive_more = { version = "1.0.0", features = ["full"] } dialoguer = "0.10.4" digest = "0.10.7" @@ -255,7 +258,6 @@ ed25519-dalek = "=1.0.1" ed25519-dalek-bip32 = "0.2.0" enum-iterator = "1.5.0" env_logger = "0.11.8" -etcd-client = "0.11.1" fast-math = "0.1" fd-lock = "3.0.13" five8_const = "0.1.4" @@ -274,18 +276,18 @@ hidapi = { version = "2.6.3", default-features = false } histogram = "0.6.9" hmac = "0.12.1" http = "0.2.12" -humantime = "2.2.0" +humantime = "2.3.0" hyper = "0.14.32" hyper-proxy = "0.9.1" im = "15.1.0" -indexmap = "2.10.0" +indexmap = "2.11.4" indicatif = "0.18.0" -io-uring = "0.7.9" +io-uring = "0.7.10" itertools = "0.12.1" jemallocator = { package = "tikv-jemallocator", version = "0.6.0", features = [ "unprefixed_malloc_on_supported_platforms", ] } -js-sys = "0.3.77" +js-sys = "0.3.80" json5 = "0.4.1" jsonrpc-core = "18.0.0" jsonrpc-core-client = "18.0.0" @@ -294,17 +296,17 @@ jsonrpc-http-server = "18.0.0" jsonrpc-ipc-server = "18.0.0" jsonrpc-pubsub = "18.0.0" lazy-lru = "0.1.3" -libc = "0.2.174" +libc = "0.2.175" libloading = "0.7.4" libsecp256k1 = { version = "0.6.0", default-features = false, features = [ "std", "static-context", ] } light-poseidon = "0.2.0" -log = "0.4.27" +log = "0.4.28" lru = "0.7.7" lz4 = "1.28.1" -memmap2 = "0.9.7" +memmap2 = "0.9.8" memoffset = "0.9" merlin = { version = "3", default-features = false } min-max-heap = "1.3.0" @@ -326,38 +328,38 @@ predicates = "2.1" pretty-hex = "0.3.0" pretty_assertions = "1.4.1" prio-graph = "0.3.0" -proc-macro2 = "1.0.95" -proptest = "1.7" +proc-macro2 = "1.0.97" +proptest = "1.8" prost = "0.11.9" prost-build = "0.11.9" prost-types = "0.11.9" protobuf-src = "1.1.0" qstring = "0.7.2" qualifier_attr = { version = "0.2.2", default-features = false } -quinn = "0.11.8" -quinn-proto = "0.11.12" +quinn = "0.11.9" +quinn-proto = "0.11.13" quote = "1.0" rand = "0.8.5" rand0-7 = { package = "rand", version = "0.7" } rand_chacha = "0.3.1" rand_chacha0-2 = { package = "rand_chacha", version = "0.2.2" } -rayon = "1.10.0" +rayon = "1.11.0" reed-solomon-erasure = "6.0.0" -regex = "1.11.1" -reqwest = { version = "0.12.22", default-features = false } +regex = "1.11.2" +reqwest = { version = "0.12.23", default-features = false } reqwest-middleware = "0.4.2" rolling-file = "0.2.0" rpassword = "7.4" -rustls = { version = "0.23.31", features = ["std"], default-features = false } +rustls = { version = "0.23.32", features = ["std"], default-features = false } scopeguard = "1.2.0" -semver = "1.0.26" +semver = "1.0.27" seqlock = "0.2.0" -serde = "1.0.219" # must match the serde_derive version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 +serde = "1.0.226" # must match the serde_derive version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 serde-big-array = "0.5.1" -serde_bytes = "0.11.17" -serde_derive = "1.0.219" # must match the serde version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 -serde_json = "1.0.142" -serde_with = { version = "3.14.0", default-features = false } +serde_bytes = "0.11.19" +serde_derive = "1.0.224" # must match the serde version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 +serde_json = "1.0.145" +serde_with = { version = "3.14.1", default-features = false } serde_yaml = "0.9.34" serial_test = "2.0.0" sha2 = "0.10.9" @@ -365,210 +367,212 @@ sha3 = "0.10.8" shuttle = "0.7.1" signal-hook = "0.3.18" siphasher = "1.0.1" -slab = "0.4.10" -smallvec = "1.15.1" +slab = "0.4.11" +smallvec = { version = "1.15.1", default-features = false, features = ["union"] } smpl_jwt = "0.7.1" socket2 = "0.6.0" soketto = "0.7" -solana-account = "2.2.1" -solana-account-decoder = { path = "account-decoder", version = "=3.0.0" } -solana-account-decoder-client-types = { path = "account-decoder-client-types", version = "=3.0.0" } -solana-account-info = "2.3.0" -solana-accounts-db = { path = "accounts-db", version = "=3.0.0" } -solana-address-lookup-table-interface = "2.2.2" -solana-atomic-u64 = "2.2.1" -solana-banks-client = { path = "banks-client", version = "=3.0.0" } -solana-banks-interface = { path = "banks-interface", version = "=3.0.0" } -solana-banks-server = { path = "banks-server", version = "=3.0.0" } -solana-bench-tps = { path = "bench-tps", version = "=3.0.0" } -solana-big-mod-exp = "2.2.1" -solana-bincode = "2.2.1" -solana-blake3-hasher = "2.2.1" -solana-bloom = { path = "bloom", version = "=3.0.0" } -solana-bn254 = "2.2.2" -solana-borsh = "2.2.1" -solana-bpf-loader-program = { path = "programs/bpf_loader", version = "=3.0.0" } -solana-bucket-map = { path = "bucket_map", version = "=3.0.0" } -solana-builtins = { path = "builtins", version = "=3.0.0" } -solana-builtins-default-costs = { path = "builtins-default-costs", version = "=3.0.0" } -solana-clap-utils = { path = "clap-utils", version = "=3.0.0" } -solana-clap-v3-utils = { path = "clap-v3-utils", version = "=3.0.0" } -solana-cli = { path = "cli", version = "=3.0.0" } -solana-cli-config = { path = "cli-config", version = "=3.0.0" } -solana-cli-output = { path = "cli-output", version = "=3.0.0" } -solana-client = { path = "client", version = "=3.0.0" } -solana-client-traits = "2.2.1" -solana-clock = "2.2.2" -solana-cluster-type = "2.2.1" -solana-commitment-config = "2.2.1" -solana-compute-budget = { path = "compute-budget", version = "=3.0.0" } -solana-compute-budget-instruction = { path = "compute-budget-instruction", version = "=3.0.0" } -solana-compute-budget-interface = "2.2.2" -solana-compute-budget-program = { path = "programs/compute-budget", version = "=3.0.0" } -solana-config-interface = "1.0.0" -solana-config-program-client = "1.1.0" -solana-connection-cache = { path = "connection-cache", version = "=3.0.0", default-features = false } -solana-core = { path = "core", version = "=3.0.0" } -solana-cost-model = { path = "cost-model", version = "=3.0.0" } -solana-cpi = "2.2.1" -solana-curve25519 = { path = "curves/curve25519", version = "=3.0.0" } -solana-define-syscall = "2.3.0" -solana-derivation-path = "2.2.1" -solana-download-utils = { path = "download-utils", version = "=3.0.0" } -solana-ed25519-program = "2.2.3" -solana-entry = { path = "entry", version = "=3.0.0" } -solana-epoch-info = "2.2.1" -solana-epoch-rewards = "2.2.1" -solana-epoch-rewards-hasher = "2.2.1" -solana-epoch-schedule = "2.2.1" -solana-example-mocks = "2.2.1" -solana-faucet = { path = "faucet", version = "=3.0.0" } -solana-feature-gate-client = "0.0.2" -solana-feature-gate-interface = "2.2.2" -solana-fee = { path = "fee", version = "=3.0.0" } -solana-fee-calculator = "2.2.1" -solana-fee-structure = "2.3.0" -solana-file-download = "2.2.2" -solana-frozen-abi = "2.3.0" -solana-frozen-abi-macro = "2.2.1" -solana-genesis = { path = "genesis", version = "=3.0.0" } -solana-genesis-config = "2.3.0" -solana-genesis-utils = { path = "genesis-utils", version = "=3.0.0" } -solana-geyser-plugin-manager = { path = "geyser-plugin-manager", version = "=3.0.0" } -solana-gossip = { path = "gossip", version = "=3.0.0" } -solana-hard-forks = "2.2.1" -solana-hash = "2.3.0" -solana-inflation = "2.2.1" -solana-instruction = "2.3.0" -solana-instructions-sysvar = "2.2.2" -solana-keccak-hasher = "2.2.1" -solana-keypair = "2.2.1" -solana-last-restart-slot = "2.2.1" -solana-lattice-hash = { path = "lattice-hash", version = "=3.0.0" } -solana-ledger = { path = "ledger", version = "=3.0.0" } -solana-loader-v2-interface = "2.2.1" -solana-loader-v3-interface = "5.0.0" -solana-loader-v4-interface = "2.2.1" -solana-loader-v4-program = { path = "programs/loader-v4", version = "=3.0.0" } -solana-local-cluster = { path = "local-cluster", version = "=3.0.0" } -solana-log-collector = { path = "log-collector", version = "=3.0.0" } -solana-logger = "2.3.1" -solana-measure = { path = "measure", version = "=3.0.0" } -solana-merkle-tree = { path = "merkle-tree", version = "=3.0.0" } -solana-message = "2.4.0" -solana-metrics = { path = "metrics", version = "=3.0.0" } -solana-msg = "2.2.1" -solana-native-token = "2.2.2" -solana-net-utils = { path = "net-utils", version = "=3.0.0" } +solana-account = "3.0.0" +solana-account-decoder = { path = "account-decoder", version = "=3.1.0" } +solana-account-decoder-client-types = { path = "account-decoder-client-types", version = "=3.1.0" } +solana-account-info = "3.0.0" +solana-accounts-db = { path = "accounts-db", version = "=3.1.0" } +solana-address = "1.0.0" +solana-address-lookup-table-interface = "3.0.0" +solana-atomic-u64 = "3.0.0" +solana-banks-client = { path = "banks-client", version = "=3.1.0" } +solana-banks-interface = { path = "banks-interface", version = "=3.1.0" } +solana-banks-server = { path = "banks-server", version = "=3.1.0" } +solana-bench-tps = { path = "bench-tps", version = "=3.1.0" } +solana-big-mod-exp = "3.0.0" +solana-bincode = "3.0.0" +solana-blake3-hasher = "3.0.0" +solana-bloom = { path = "bloom", version = "=3.1.0" } +solana-bls-signatures = { version = "0.3.0", features = ["serde"] } +solana-bn254 = "3.0.0" +solana-borsh = "3.0.0" +solana-bpf-loader-program = { path = "programs/bpf_loader", version = "=3.1.0" } +solana-bucket-map = { path = "bucket_map", version = "=3.1.0" } +solana-builtins = { path = "builtins", version = "=3.1.0" } +solana-builtins-default-costs = { path = "builtins-default-costs", version = "=3.1.0" } +solana-clap-utils = { path = "clap-utils", version = "=3.1.0" } +solana-clap-v3-utils = { path = "clap-v3-utils", version = "=3.1.0" } +solana-cli = { path = "cli", version = "=3.1.0" } +solana-cli-config = { path = "cli-config", version = "=3.1.0" } +solana-cli-output = { path = "cli-output", version = "=3.1.0" } +solana-client = { path = "client", version = "=3.1.0" } +solana-client-traits = "3.0.0" +solana-clock = "3.0.0" +solana-cluster-type = "3.0.0" +solana-commitment-config = "3.0.0" +solana-compute-budget = { path = "compute-budget", version = "=3.1.0" } +solana-compute-budget-instruction = { path = "compute-budget-instruction", version = "=3.1.0" } +solana-compute-budget-interface = "3.0.0" +solana-compute-budget-program = { path = "programs/compute-budget", version = "=3.1.0" } +solana-config-interface = "2.0.0" +solana-connection-cache = { path = "connection-cache", version = "=3.1.0", default-features = false } +solana-core = { path = "core", version = "=3.1.0" } +solana-cost-model = { path = "cost-model", version = "=3.1.0" } +solana-cpi = "3.0.0" +solana-curve25519 = { path = "curves/curve25519", version = "=3.1.0" } +solana-define-syscall = "3.0.0" +solana-derivation-path = "3.0.0" +solana-download-utils = { path = "download-utils", version = "=3.1.0" } +solana-ed25519-program = "3.0.0" +solana-entry = { path = "entry", version = "=3.1.0" } +solana-epoch-info = "3.0.0" +solana-epoch-rewards = "3.0.0" +solana-epoch-rewards-hasher = "3.0.0" +solana-epoch-schedule = "3.0.0" +solana-example-mocks = "3.0.0" +solana-faucet = { path = "faucet", version = "=3.1.0" } +solana-feature-gate-interface = "3.0.0" +solana-fee = { path = "fee", version = "=3.1.0" } +solana-fee-calculator = "3.0.0" +solana-fee-structure = "3.0.0" +solana-file-download = "3.0.0" +solana-frozen-abi = "3.0.0" +solana-frozen-abi-macro = "3.0.0" +solana-genesis = { path = "genesis", version = "=3.1.0" } +solana-genesis-config = "3.0.0" +solana-genesis-utils = { path = "genesis-utils", version = "=3.1.0" } +solana-geyser-plugin-manager = { path = "geyser-plugin-manager", version = "=3.1.0" } +solana-gossip = { path = "gossip", version = "=3.1.0" } +solana-hard-forks = "3.0.0" +solana-hash = "3.0.0" +solana-inflation = "3.0.0" +solana-instruction = "3.0.0" +solana-instruction-error = "2.0.0" +solana-instructions-sysvar = "3.0.0" +solana-keccak-hasher = "3.0.0" +solana-keypair = "3.0.1" +solana-last-restart-slot = "3.0.0" +solana-lattice-hash = { path = "lattice-hash", version = "=3.1.0" } +solana-ledger = { path = "ledger", version = "=3.1.0" } +solana-loader-v2-interface = "3.0.0" +solana-loader-v3-interface = "6.1.0" +solana-loader-v4-interface = "3.1.0" +solana-loader-v4-program = { path = "programs/loader-v4", version = "=3.1.0" } +solana-local-cluster = { path = "local-cluster", version = "=3.1.0" } +solana-logger = "3.0.0" +solana-measure = { path = "measure", version = "=3.1.0" } +solana-merkle-tree = { path = "merkle-tree", version = "=3.1.0" } +solana-message = "3.0.1" +solana-metrics = { path = "metrics", version = "=3.1.0" } +solana-msg = "3.0.0" +solana-native-token = "3.0.0" +solana-net-utils = { path = "net-utils", version = "=3.1.0" } solana-nohash-hasher = "0.2.1" -solana-nonce = "2.2.1" -solana-nonce-account = "2.2.1" -solana-notifier = { path = "notifier", version = "=3.0.0" } -solana-offchain-message = "2.2.1" -solana-packet = "2.2.1" -solana-perf = { path = "perf", version = "=3.0.0" } -solana-poh = { path = "poh", version = "=3.0.0" } -solana-poh-config = "2.2.1" -solana-poseidon = { path = "poseidon", version = "=3.0.0" } -solana-precompile-error = "2.2.2" -solana-presigner = "2.2.1" -solana-program = { version = "2.3.0", default-features = false } -solana-program-entrypoint = "2.3.0" -solana-program-error = "2.2.2" -solana-program-memory = "2.3.1" -solana-program-option = "2.2.1" -solana-program-pack = "2.2.1" -solana-program-runtime = { path = "program-runtime", version = "=3.0.0" } -solana-program-test = { path = "program-test", version = "=3.0.0" } -solana-pubkey = { version = "2.4.0", default-features = false } -solana-pubsub-client = { path = "pubsub-client", version = "=3.0.0" } -solana-quic-client = { path = "quic-client", version = "=3.0.0" } -solana-quic-definitions = "2.3.0" -solana-rayon-threadlimit = { path = "rayon-threadlimit", version = "=3.0.0" } -solana-remote-wallet = { path = "remote-wallet", version = "=3.0.0", default-features = false } -solana-rent = "2.2.1" -solana-rent-collector = "2.2.1" -solana-reward-info = "2.2.1" -solana-rpc = { path = "rpc", version = "=3.0.0" } -solana-rpc-client = { path = "rpc-client", version = "=3.0.0", default-features = false } -solana-rpc-client-api = { path = "rpc-client-api", version = "=3.0.0" } -solana-rpc-client-nonce-utils = { path = "rpc-client-nonce-utils", version = "=3.0.0" } -solana-rpc-client-types = { path = "rpc-client-types", version = "=3.0.0" } -solana-runtime = { path = "runtime", version = "=3.0.0" } -solana-runtime-transaction = { path = "runtime-transaction", version = "=3.0.0" } -solana-sanitize = "2.2.1" -solana-sbpf = "=0.12.0" -solana-sdk-ids = "2.2.1" -solana-secp256k1-program = "2.2.3" -solana-secp256k1-recover = "2.2.1" -solana-secp256r1-program = "2.2.4" -solana-seed-derivable = "2.2.1" -solana-seed-phrase = "2.2.1" -solana-send-transaction-service = { path = "send-transaction-service", version = "=3.0.0" } -solana-serde = "2.2.1" -solana-serde-varint = "2.2.2" -solana-serialize-utils = "2.2.1" -solana-sha256-hasher = "2.3.0" -solana-short-vec = "2.2.1" -solana-shred-version = "2.2.1" -solana-signature = { version = "2.3.0", default-features = false } -solana-signer = "2.2.1" -solana-slot-hashes = "2.2.1" -solana-slot-history = "2.2.1" -solana-stable-layout = "2.2.1" -solana-stake-interface = { version = "1.2.1" } -solana-stake-program = { path = "programs/stake", version = "=3.0.0" } -solana-storage-bigtable = { path = "storage-bigtable", version = "=3.0.0" } -solana-storage-proto = { path = "storage-proto", version = "=3.0.0" } -solana-streamer = { path = "streamer", version = "=3.0.0" } -solana-svm = { path = "svm", version = "=3.0.0" } -solana-svm-callback = { path = "svm-callback", version = "=3.0.0" } -solana-svm-conformance = { path = "svm-conformance", version = "=3.0.0" } -solana-svm-feature-set = { path = "svm-feature-set", version = "=3.0.0" } -solana-svm-transaction = { path = "svm-transaction", version = "=3.0.0" } -solana-system-interface = "1.0" -solana-system-program = { path = "programs/system", version = "=3.0.0" } -solana-system-transaction = "2.2.1" -solana-sysvar = "2.2.2" -solana-sysvar-id = "2.2.1" -solana-test-validator = { path = "test-validator", version = "=3.0.0" } -solana-time-utils = "2.2.1" -solana-timings = { path = "timings", version = "=3.0.0" } -solana-tls-utils = { path = "tls-utils", version = "=3.0.0" } -solana-tps-client = { path = "tps-client", version = "=3.0.0" } -solana-tpu-client = { path = "tpu-client", version = "=3.0.0", default-features = false } -solana-tpu-client-next = { path = "tpu-client-next", version = "=3.0.0" } -solana-transaction = "2.2.3" -solana-transaction-context = { path = "transaction-context", version = "=3.0.0", features = ["bincode"] } -solana-transaction-error = "2.2.1" -solana-transaction-metrics-tracker = { path = "transaction-metrics-tracker", version = "=3.0.0" } -solana-transaction-status = { path = "transaction-status", version = "=3.0.0" } -solana-transaction-status-client-types = { path = "transaction-status-client-types", version = "=3.0.0" } -solana-turbine = { path = "turbine", version = "=3.0.0" } -solana-type-overrides = { path = "type-overrides", version = "=3.0.0" } -solana-udp-client = { path = "udp-client", version = "=3.0.0" } -solana-unified-scheduler-logic = { path = "unified-scheduler-logic", version = "=3.0.0" } -solana-unified-scheduler-pool = { path = "unified-scheduler-pool", version = "=3.0.0" } -solana-validator-exit = "2.2.1" -solana-version = { path = "version", version = "=3.0.0" } -solana-vote = { path = "vote", version = "=3.0.0" } -solana-vote-interface = "2.2.6" -solana-vote-program = { path = "programs/vote", version = "=3.0.0", default-features = false } -solana-wen-restart = { path = "wen-restart", version = "=3.0.0" } -solana-zk-elgamal-proof-program = { path = "programs/zk-elgamal-proof", version = "=3.0.0" } -solana-zk-keygen = { path = "zk-keygen", version = "=3.0.0" } -solana-zk-sdk = "3.0.0" -solana-zk-token-proof-program = { path = "programs/zk-token-proof", version = "=3.0.0" } -solana-zk-token-sdk = { path = "zk-token-sdk", version = "=3.0.0" } -spl-associated-token-account-interface = "1.0.0" -spl-generic-token = "1.0.1" -spl-memo-interface = "1.0.0" -spl-pod = "0.5.1" -spl-token-2022-interface = "1.0.0" -spl-token-confidential-transfer-proof-extraction = "0.4.0" -spl-token-group-interface = "0.6.0" -spl-token-interface = "1.0.0" -spl-token-metadata-interface = "0.7.0" +solana-nonce = "3.0.0" +solana-nonce-account = "3.0.0" +solana-notifier = { path = "notifier", version = "=3.1.0" } +solana-offchain-message = "3.0.0" +solana-packet = "3.0.0" +solana-perf = { path = "perf", version = "=3.1.0" } +solana-poh = { path = "poh", version = "=3.1.0" } +solana-poh-config = "3.0.0" +solana-poseidon = { path = "poseidon", version = "=3.1.0" } +solana-precompile-error = "3.0.0" +solana-presigner = "3.0.0" +solana-program = { version = "3.0.0", default-features = false } +solana-program-binaries = { path = "program-binaries", version = "=3.1.0" } +solana-program-entrypoint = "3.1.0" +solana-program-error = "3.0.0" +solana-program-memory = "3.0.0" +solana-program-option = "3.0.0" +solana-program-pack = "3.0.0" +solana-program-runtime = { path = "program-runtime", version = "=3.1.0" } +solana-program-test = { path = "program-test", version = "=3.1.0" } +solana-pubkey = { version = "3.0.0", default-features = false } +solana-pubsub-client = { path = "pubsub-client", version = "=3.1.0" } +solana-quic-client = { path = "quic-client", version = "=3.1.0" } +solana-quic-definitions = "3.0.0" +solana-rayon-threadlimit = { path = "rayon-threadlimit", version = "=3.1.0" } +solana-remote-wallet = { path = "remote-wallet", version = "=3.1.0", default-features = false } +solana-rent = "3.0.0" +solana-reward-info = "3.0.0" +solana-rpc = { path = "rpc", version = "=3.1.0" } +solana-rpc-client = { path = "rpc-client", version = "=3.1.0", default-features = false } +solana-rpc-client-api = { path = "rpc-client-api", version = "=3.1.0" } +solana-rpc-client-nonce-utils = { path = "rpc-client-nonce-utils", version = "=3.1.0" } +solana-rpc-client-types = { path = "rpc-client-types", version = "=3.1.0" } +solana-runtime = { path = "runtime", version = "=3.1.0" } +solana-runtime-transaction = { path = "runtime-transaction", version = "=3.1.0" } +solana-sanitize = "3.0.1" +solana-sbpf = { version = "=0.12.2", default-features = false } +solana-sdk-ids = "3.0.0" +solana-secp256k1-program = "3.0.0" +solana-secp256k1-recover = "3.0.0" +solana-secp256r1-program = "3.0.0" +solana-seed-derivable = "3.0.0" +solana-seed-phrase = "3.0.0" +solana-send-transaction-service = { path = "send-transaction-service", version = "=3.1.0" } +solana-serde = "3.0.0" +solana-serde-varint = "3.0.0" +solana-serialize-utils = "3.1.0" +solana-sha256-hasher = "3.0.0" +solana-short-vec = "3.0.0" +solana-shred-version = "3.0.0" +solana-signature = { version = "3.1.0", default-features = false } +solana-signer = "3.0.0" +solana-signer-store = "0.1.0" +solana-slot-hashes = "3.0.0" +solana-slot-history = "3.0.0" +solana-stable-layout = "3.0.0" +solana-stake-interface = { version = "2.0.1" } +solana-stake-program = { path = "programs/stake", version = "=3.1.0" } +solana-storage-bigtable = { path = "storage-bigtable", version = "=3.1.0" } +solana-storage-proto = { path = "storage-proto", version = "=3.1.0" } +solana-streamer = { path = "streamer", version = "=3.1.0" } +solana-svm = { path = "svm", version = "=3.1.0" } +solana-svm-callback = { path = "svm-callback", version = "=3.1.0" } +solana-svm-feature-set = { path = "svm-feature-set", version = "=3.1.0" } +solana-svm-log-collector = { path = "svm-log-collector", version = "=3.1.0" } +solana-svm-measure = { path = "svm-measure", version = "=3.1.0" } +solana-svm-timings = { path = "svm-timings", version = "=3.1.0" } +solana-svm-transaction = { path = "svm-transaction", version = "=3.1.0" } +solana-svm-type-overrides = { path = "svm-type-overrides", version = "=3.1.0" } +solana-system-interface = "2.0" +solana-system-program = { path = "programs/system", version = "=3.1.0" } +solana-system-transaction = "3.0.0" +solana-sysvar = "3.0.0" +solana-sysvar-id = "3.0.0" +solana-test-validator = { path = "test-validator", version = "=3.1.0" } +solana-time-utils = "3.0.0" +solana-tls-utils = { path = "tls-utils", version = "=3.1.0" } +solana-tps-client = { path = "tps-client", version = "=3.1.0" } +solana-tpu-client = { path = "tpu-client", version = "=3.1.0", default-features = false } +solana-tpu-client-next = { path = "tpu-client-next", version = "=3.1.0" } +solana-transaction = "3.0.1" +solana-transaction-context = { path = "transaction-context", version = "=3.1.0", features = ["bincode"] } +solana-transaction-error = "3.0.0" +solana-transaction-metrics-tracker = { path = "transaction-metrics-tracker", version = "=3.1.0" } +solana-transaction-status = { path = "transaction-status", version = "=3.1.0" } +solana-transaction-status-client-types = { path = "transaction-status-client-types", version = "=3.1.0" } +solana-turbine = { path = "turbine", version = "=3.1.0" } +solana-udp-client = { path = "udp-client", version = "=3.1.0" } +solana-unified-scheduler-logic = { path = "unified-scheduler-logic", version = "=3.1.0" } +solana-unified-scheduler-pool = { path = "unified-scheduler-pool", version = "=3.1.0" } +solana-validator-exit = "3.0.0" +solana-version = { path = "version", version = "=3.1.0" } +solana-vote = { path = "vote", version = "=3.1.0" } +solana-vote-interface = "4.0.2" +solana-vote-program = { path = "programs/vote", version = "=3.1.0", default-features = false } +solana-votor-messages = { path = "votor-messages", version = "=3.1.0" } +solana-wen-restart = { path = "wen-restart", version = "=3.1.0" } +solana-zk-elgamal-proof-program = { path = "programs/zk-elgamal-proof", version = "=3.1.0" } +solana-zk-sdk = "4.0.0" +solana-zk-token-proof-program = { path = "programs/zk-token-proof", version = "=3.1.0" } +solana-zk-token-sdk = { path = "zk-token-sdk", version = "=3.1.0" } +spl-associated-token-account-interface = "2.0.0" +spl-generic-token = "2.0.0" +spl-memo-interface = "2.0.0" +spl-pod = "0.7.0" +spl-token-2022-interface = "2.0.0" +spl-token-confidential-transfer-proof-extraction = "0.5.0" +spl-token-group-interface = "0.7.0" +spl-token-interface = "2.0.0" +spl-token-metadata-interface = "0.8.0" static_assertions = "1.1.0" stream-cancel = "0.8.2" strum = "0.24" @@ -581,9 +585,9 @@ sysctl = "0.4.6" systemstat = "0.2.5" tar = "0.4.44" tarpc = "0.29.0" -tempfile = "3.20.0" +tempfile = "3.22.0" test-case = "3.3.1" -thiserror = "2.0.12" +thiserror = "2.0.16" thread-priority = "1.2.0" tiny-bip39 = "0.8.2" tokio = "1.47.1" @@ -595,12 +599,13 @@ toml = "0.8.12" tonic = "0.9.2" tonic-build = "0.9.2" tower = "0.5.2" +tracing = "0.1" trait-set = "0.3.0" trees = "0.4.2" tungstenite = "0.20.1" unwrap_none = "0.1.2" uriparse = "0.6.4" -url = "2.5.4" +url = "2.5.7" vec_extract_if_polyfill = "0.1.0" wasm-bindgen = "0.2" winapi = "0.3.8" diff --git a/README.md b/README.md index e1cb6c29c049bb..b7a563b63d033c 100644 --- a/README.md +++ b/README.md @@ -19,17 +19,9 @@ $ source $HOME/.cargo/env $ rustup component add rustfmt ``` -When building the master branch, please make sure you are using the latest stable rust version by running: - -```bash -$ rustup update -``` - -When building a specific release branch, you should check the rust version in `ci/rust-version.sh` and if necessary, install that version by running: -```bash -$ rustup install VERSION -``` -Note that if this is not the latest rust version on your machine, cargo commands may require an [override](https://rust-lang.github.io/rustup/overrides.html) in order to use the correct version. +The `rust-toolchain.toml` file pins a specific rust version and ensures that +cargo commands run with that version. Note that cargo will automatically install +the correct version if it is not already installed. On Linux systems you may need to install libssl-dev, pkg-config, zlib1g-dev, protobuf etc. @@ -70,12 +62,12 @@ $ ./cargo test ### Starting a local testnet -Start your own testnet locally, instructions are in the [online docs](https://docs.solanalabs.com/clusters/benchmark). +Start your own testnet locally, instructions are in the [online docs](https://docs.anza.xyz/clusters/benchmark). ### Accessing the remote development cluster * `devnet` - stable public cluster for development accessible via -devnet.solana.com. Runs 24/7. Learn more about the [public clusters](https://docs.solanalabs.com/clusters) +devnet.solana.com. Runs 24/7. Learn more about the [public clusters](https://docs.anza.xyz/clusters) # Benchmarking diff --git a/account-decoder/Cargo.toml b/account-decoder/Cargo.toml index b6ba7fd8a67dd9..d94c96a1954b15 100644 --- a/account-decoder/Cargo.toml +++ b/account-decoder/Cargo.toml @@ -28,7 +28,7 @@ solana-address-lookup-table-interface = { workspace = true, features = [ "bytemuck", ] } solana-clock = { workspace = true } -solana-config-program-client = { workspace = true, features = ["serde"] } +solana-config-interface = { workspace = true, features = ["bincode"] } solana-epoch-schedule = { workspace = true } solana-fee-calculator = { workspace = true } solana-instruction = { workspace = true } @@ -41,7 +41,7 @@ solana-rent = { workspace = true } solana-sdk-ids = { workspace = true } solana-slot-hashes = { workspace = true } solana-slot-history = { workspace = true } -solana-stake-interface = { workspace = true } +solana-stake-interface = { workspace = true, features = ["bincode", "sysvar"] } solana-sysvar = { workspace = true } solana-vote-interface = { workspace = true, features = ["bincode"] } spl-generic-token = { workspace = true } diff --git a/account-decoder/src/parse_account_data.rs b/account-decoder/src/parse_account_data.rs index 807cedee64eba8..5b1d0bf5279a3e 100644 --- a/account-decoder/src/parse_account_data.rs +++ b/account-decoder/src/parse_account_data.rs @@ -165,7 +165,7 @@ mod test { }, solana_vote_interface::{ program::id as vote_program_id, - state::{VoteState, VoteStateVersions}, + state::{VoteStateV3, VoteStateVersions}, }, }; @@ -176,10 +176,10 @@ mod test { let data = vec![0; 4]; assert!(parse_account_data_v3(&account_pubkey, &other_program, &data, None).is_err()); - let vote_state = VoteState::default(); - let mut vote_account_data: Vec = vec![0; VoteState::size_of()]; - let versioned = VoteStateVersions::new_current(vote_state); - VoteState::serialize(&versioned, &mut vote_account_data).unwrap(); + let vote_state = VoteStateV3::default(); + let mut vote_account_data: Vec = vec![0; VoteStateV3::size_of()]; + let versioned = VoteStateVersions::new_v3(vote_state); + VoteStateV3::serialize(&versioned, &mut vote_account_data).unwrap(); let parsed = parse_account_data_v3( &account_pubkey, &vote_program_id(), @@ -188,7 +188,7 @@ mod test { ) .unwrap(); assert_eq!(parsed.program, "vote".to_string()); - assert_eq!(parsed.space, VoteState::size_of() as u64); + assert_eq!(parsed.space, VoteStateV3::size_of() as u64); let nonce_data = Versions::new(State::Initialized(Data::default())); let nonce_account_data = bincode::serialize(&nonce_data).unwrap(); diff --git a/account-decoder/src/parse_config.rs b/account-decoder/src/parse_config.rs index 4f03503806de36..08be68fc42bc26 100644 --- a/account-decoder/src/parse_config.rs +++ b/account-decoder/src/parse_config.rs @@ -5,7 +5,7 @@ use { }, bincode::deserialize, serde_json::Value, - solana_config_program_client::{get_config_data, ConfigKeys}, + solana_config_interface::state::{get_config_data, ConfigKeys}, solana_pubkey::Pubkey, solana_stake_interface::config::{ Config as StakeConfig, {self as stake_config}, @@ -101,7 +101,6 @@ mod test { bincode::serialize, serde_json::json, solana_account::{Account, AccountSharedData, ReadableAccount}, - solana_config_program_client::ConfigKeys, }; fn create_config_account( diff --git a/account-decoder/src/parse_sysvar.rs b/account-decoder/src/parse_sysvar.rs index c0b260f1542010..9a5c70c89d0a3b 100644 --- a/account-decoder/src/parse_sysvar.rs +++ b/account-decoder/src/parse_sysvar.rs @@ -14,11 +14,9 @@ use { solana_sdk_ids::sysvar, solana_slot_hashes::SlotHashes, solana_slot_history::{self as slot_history, SlotHistory}, + solana_stake_interface::stake_history::{StakeHistory, StakeHistoryEntry}, solana_sysvar::{ - epoch_rewards::EpochRewards, - last_restart_slot::LastRestartSlot, - rewards::Rewards, - stake_history::{StakeHistory, StakeHistoryEntry}, + epoch_rewards::EpochRewards, last_restart_slot::LastRestartSlot, rewards::Rewards, }, }; diff --git a/account-decoder/src/parse_vote.rs b/account-decoder/src/parse_vote.rs index aca75b74f8354d..3dfb5aa63bce55 100644 --- a/account-decoder/src/parse_vote.rs +++ b/account-decoder/src/parse_vote.rs @@ -2,11 +2,11 @@ use { crate::{parse_account_data::ParseAccountError, StringAmount}, solana_clock::{Epoch, Slot}, solana_pubkey::Pubkey, - solana_vote_interface::state::{BlockTimestamp, Lockout, VoteState}, + solana_vote_interface::state::{BlockTimestamp, Lockout, VoteStateV3}, }; pub fn parse_vote(data: &[u8]) -> Result { - let mut vote_state = VoteState::deserialize(data).map_err(ParseAccountError::from)?; + let mut vote_state = VoteStateV3::deserialize(data).map_err(ParseAccountError::from)?; let epoch_credits = vote_state .epoch_credits() .iter() @@ -125,10 +125,10 @@ mod test { #[test] fn test_parse_vote() { - let vote_state = VoteState::default(); - let mut vote_account_data: Vec = vec![0; VoteState::size_of()]; - let versioned = VoteStateVersions::new_current(vote_state); - VoteState::serialize(&versioned, &mut vote_account_data).unwrap(); + let vote_state = VoteStateV3::default(); + let mut vote_account_data: Vec = vec![0; VoteStateV3::size_of()]; + let versioned = VoteStateVersions::new_v3(vote_state); + VoteStateV3::serialize(&versioned, &mut vote_account_data).unwrap(); let expected_vote_state = UiVoteState { node_pubkey: Pubkey::default().to_string(), authorized_withdrawer: Pubkey::default().to_string(), diff --git a/accounts-cluster-bench/Cargo.toml b/accounts-cluster-bench/Cargo.toml index f016eb135c84d8..5af427f4fdf5ac 100644 --- a/accounts-cluster-bench/Cargo.toml +++ b/accounts-cluster-bench/Cargo.toml @@ -11,6 +11,9 @@ edition = { workspace = true } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] +[features] +dev-context-only-utils = [] + [dependencies] clap = { workspace = true } log = { workspace = true } @@ -50,8 +53,8 @@ jemallocator = { workspace = true } [dev-dependencies] solana-accounts-db = { workspace = true } solana-core = { workspace = true, features = ["dev-context-only-utils"] } -solana-faucet = { workspace = true } -solana-local-cluster = { workspace = true } +solana-faucet = { workspace = true, features = ["dev-context-only-utils"] } +solana-local-cluster = { workspace = true, features = ["dev-context-only-utils"] } solana-native-token = { workspace = true } solana-poh-config = { workspace = true } solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } diff --git a/accounts-cluster-bench/src/main.rs b/accounts-cluster-bench/src/main.rs index bdbe2cf4813cce..abc6a49fa76b87 100644 --- a/accounts-cluster-bench/src/main.rs +++ b/accounts-cluster-bench/src/main.rs @@ -61,10 +61,7 @@ pub fn poll_slot_height(client: &RpcClient) -> Slot { return slot; } else { num_retries -= 1; - warn!( - "get_slot_height failure: {:?}. remaining retries {}", - response, num_retries - ); + warn!("get_slot_height failure: {response:?}. remaining retries {num_retries}"); } if num_retries == 0 { panic!("failed to get_slot_height(), rpc node down?") @@ -81,10 +78,7 @@ pub fn poll_get_latest_blockhash(client: &RpcClient) -> Option { return Some(blockhash); } else { num_retries -= 1; - warn!( - "get_latest_blockhash failure: {:?}. remaining retries {}", - response, num_retries - ); + warn!("get_latest_blockhash failure: {response:?}. remaining retries {num_retries}"); } if num_retries == 0 { panic!("failed to get_latest_blockhash(), rpc node down?") @@ -102,10 +96,7 @@ pub fn poll_get_fee_for_message(client: &RpcClient, message: &mut Message) -> (O return (Some(fee), message.recent_blockhash); } else { num_retries -= 1; - warn!( - "get_fee_for_message failure: {:?}. remaining retries {}", - response, num_retries - ); + warn!("get_fee_for_message failure: {response:?}. remaining retries {num_retries}"); let blockhash = poll_get_latest_blockhash(client).expect("blockhash"); message.recent_blockhash = blockhash; @@ -119,7 +110,7 @@ pub fn poll_get_fee_for_message(client: &RpcClient, message: &mut Message) -> (O fn airdrop_lamports(client: &RpcClient, id: &Keypair, desired_balance: u64) -> bool { let starting_balance = client.get_balance(&id.pubkey()).unwrap_or(0); - info!("starting balance {}", starting_balance); + info!("starting balance {starting_balance}"); if starting_balance < desired_balance { let airdrop_amount = desired_balance - starting_balance; @@ -143,7 +134,7 @@ fn airdrop_lamports(client: &RpcClient, id: &Keypair, desired_balance: u64) -> b let current_balance = client.get_balance(&id.pubkey()).unwrap_or_else(|e| { panic!("airdrop error {e}"); }); - info!("current balance {}...", current_balance); + info!("current balance {current_balance}..."); if current_balance - starting_balance != airdrop_amount { info!( @@ -396,10 +387,10 @@ fn process_get_multiple_accounts( stats.total_errors_time_us += rpc_time.as_us(); stats.errors += 1; if last_error.elapsed().as_secs() > 2 { - info!("error: {:?}", e); + info!("error: {e:?}"); *last_error = Instant::now(); } - debug!("error: {:?}", e); + debug!("error: {e:?}"); } } } @@ -519,7 +510,7 @@ fn run_rpc_bench_loop( stats.total_errors_time_us += rpc_time.as_us(); stats.errors += 1; if last_error.elapsed().as_secs() > 2 { - info!("get_account_info error: {:?}", e); + info!("get_account_info error: {e:?}"); last_error = Instant::now(); } } @@ -545,7 +536,7 @@ fn run_rpc_bench_loop( stats.total_errors_time_us += rpc_time.as_us(); stats.errors += 1; if last_error.elapsed().as_secs() > 2 { - info!("get_block error: {:?}", e); + info!("get_block error: {e:?}"); last_error = Instant::now(); } } @@ -569,7 +560,7 @@ fn run_rpc_bench_loop( stats.total_errors_time_us += rpc_time.as_us(); stats.errors += 1; if last_error.elapsed().as_secs() > 2 { - info!("get_blocks error: {:?}", e); + info!("get_blocks error: {e:?}"); last_error = Instant::now(); } } @@ -588,7 +579,7 @@ fn run_rpc_bench_loop( stats.total_errors_time_us += rpc_time.as_us(); stats.errors += 1; if last_error.elapsed().as_secs() > 2 { - info!("get_first_available_block error: {:?}", e); + info!("get_first_available_block error: {e:?}"); last_error = Instant::now(); } } @@ -607,7 +598,7 @@ fn run_rpc_bench_loop( stats.total_errors_time_us += rpc_time.as_us(); stats.errors += 1; if last_error.elapsed().as_secs() > 2 { - info!("get_slot error: {:?}", e); + info!("get_slot error: {e:?}"); last_error = Instant::now(); } } @@ -626,7 +617,7 @@ fn run_rpc_bench_loop( stats.total_errors_time_us += rpc_time.as_us(); stats.errors += 1; if last_error.elapsed().as_secs() > 2 { - info!("get_token_supply error: {:?}", e); + info!("get_token_supply error: {e:?}"); last_error = Instant::now(); } } @@ -659,7 +650,7 @@ fn run_rpc_bench_loop( stats.errors += 1; stats.total_errors_time_us += rpc_time.as_us(); if last_error.elapsed().as_secs() > 2 { - info!("get-program-accounts error: {:?}", e); + info!("get-program-accounts error: {e:?}"); last_error = Instant::now(); } } @@ -679,7 +670,7 @@ fn run_rpc_bench_loop( stats.errors += 1; stats.total_errors_time_us += rpc_time.as_us(); if last_error.elapsed().as_secs() > 2 { - info!("get-token-accounts-by-delegate error: {:?}", e); + info!("get-token-accounts-by-delegate error: {e:?}"); last_error = Instant::now(); } } @@ -699,7 +690,7 @@ fn run_rpc_bench_loop( stats.errors += 1; stats.total_errors_time_us += rpc_time.as_us(); if last_error.elapsed().as_secs() > 2 { - info!("get-token-accounts-by-owner error: {:?}", e); + info!("get-token-accounts-by-owner error: {e:?}"); last_error = Instant::now(); } } @@ -781,7 +772,7 @@ fn make_rpc_bench_threads( let transaction_signature_tracker = transaction_signature_tracker.clone(); let mint = *mint; Builder::new() - .name(format!("rpc-bench-{}", thread)) + .name(format!("rpc-bench-{thread}")) .spawn(move || { start_bench.wait(); run_rpc_bench_loop( @@ -853,7 +844,7 @@ fn run_accounts_bench( let transaction_signature_tracker = TransactionSignatureTracker(Arc::new(RwLock::new(VecDeque::with_capacity(5000)))); - info!("Starting balance(s): {:?}", balances); + info!("Starting balance(s): {balances:?}"); let executor = TransactionExecutor::new_with_rpc_client(client.clone()); @@ -917,10 +908,7 @@ fn run_accounts_bench( } last_balance = Instant::now(); if *balance < lamports * 2 { - info!( - "Balance {} is less than needed: {}, doing airdrop...", - balance, lamports - ); + info!("Balance {balance} is less than needed: {lamports}, doing airdrop..."); if !airdrop_lamports(&client, payer_keypairs[i], lamports * 100_000) { warn!("failed airdrop, exiting"); return; @@ -934,7 +922,7 @@ fn run_accounts_bench( if sigs_len < batch_size { let num_to_create = batch_size - sigs_len; if num_to_create >= payer_keypairs.len() { - info!("creating {} new", num_to_create); + info!("creating {num_to_create} new"); let chunk_size = num_to_create / payer_keypairs.len(); if chunk_size > 0 { for (i, keypair) in payer_keypairs.iter().enumerate() { @@ -1018,8 +1006,9 @@ fn run_accounts_bench( || max_accounts_met { info!( - "total_accounts_created: {} total_accounts_closed: {} tx_sent_count: {} loop_count: {} balance(s): {:?}", - total_accounts_created, total_accounts_closed, tx_sent_count, count, balances + "total_accounts_created: {total_accounts_created} total_accounts_closed: \ + {total_accounts_closed} tx_sent_count: {tx_sent_count} loop_count: {count} \ + balance(s): {balances:?}" ); last_log = Instant::now(); } @@ -1061,9 +1050,9 @@ fn run_accounts_bench( (max_created_seed - max_closed_seed) as usize, ); if num_to_close >= payer_keypairs.len() { - info!("closing {} accounts", num_to_close); + info!("closing {num_to_close} accounts"); let chunk_size = num_to_close / payer_keypairs.len(); - info!("{:?} chunk_size", chunk_size); + info!("{chunk_size:?} chunk_size"); if chunk_size > 0 { for (i, keypair) in payer_keypairs.iter().enumerate() { let txs: Vec<_> = (0..chunk_size) @@ -1101,8 +1090,8 @@ fn run_accounts_bench( count += 1; if last_log.elapsed().as_millis() > 3000 || max_closed_seed >= max_created_seed { info!( - "total_accounts_closed: {} tx_sent_count: {} loop_count: {} balance(s): {:?}", - total_accounts_closed, tx_sent_count, count, balances + "total_accounts_closed: {total_accounts_closed} tx_sent_count: \ + {tx_sent_count} loop_count: {count} balance(s): {balances:?}" ); last_log = Instant::now(); } @@ -1150,8 +1139,8 @@ fn main() { .validator(is_url_or_moniker) .conflicts_with("entrypoint") .help( - "URL for Solana's JSON RPC or moniker (or their first letter): \ - [mainnet-beta, testnet, devnet, localhost]", + "URL for Solana's JSON RPC or moniker (or their first letter): [mainnet-beta, \ + testnet, devnet, localhost]", ), ) .arg( @@ -1206,10 +1195,9 @@ fn main() { .takes_value(true) .value_name("BYTES") .help( - "Every `n` batches, create a batch of close transactions for \ - the earliest remaining batch of accounts created. \ - Note: Should be > 1 to avoid situations where the close \ - transactions will be submitted before the corresponding \ + "Every `n` batches, create a batch of close transactions for the earliest \ + remaining batch of accounts created. Note: Should be > 1 to avoid situations \ + where the close transactions will be submitted before the corresponding \ create transactions have been confirmed", ), ) @@ -1232,7 +1220,10 @@ fn main() { .long("max-accounts") .takes_value(true) .value_name("NUM_ACCOUNTS") - .help("Halt after client has created this number of accounts. Does not count closed accounts."), + .help( + "Halt after client has created this number of accounts. Does not count closed \ + accounts.", + ), ) .arg( Arg::with_name("check_gossip") @@ -1273,10 +1264,7 @@ fn main() { .takes_value(true) .value_name("RPC_BENCH_TYPE(S)") .multiple(true) - .requires_ifs(&[ - ("supply", "mint"), - ("token-accounts-by-owner", "mint"), - ]) + .requires_ifs(&[("supply", "mint"), ("token-accounts-by-owner", "mint")]) .help("Spawn a thread which calls a specific RPC method in a loop to benchmark it"), ) .get_matches(); @@ -1333,7 +1321,7 @@ fn main() { Some( solana_net_utils::get_cluster_shred_version(&entrypoint_addr).unwrap_or_else( |err| { - eprintln!("Failed to get shred version: {}", err); + eprintln!("Failed to get shred version: {err}"); exit(1); }, ), @@ -1344,7 +1332,7 @@ fn main() { }; let rpc_addr = if !skip_gossip { - info!("Finding cluster entry: {:?}", entrypoint_addr); + info!("Finding cluster entry: {entrypoint_addr:?}"); let (gossip_nodes, _validators) = discover( None, // keypair Some(&entrypoint_addr), @@ -1364,7 +1352,7 @@ fn main() { info!("done found {} nodes", gossip_nodes.len()); gossip_nodes[0].rpc().unwrap() } else { - info!("Using {:?} as the RPC address", entrypoint_addr); + info!("Using {entrypoint_addr:?} as the RPC address"); entrypoint_addr }; @@ -1409,18 +1397,15 @@ fn main() { pub mod test { use { super::*, - solana_accounts_db::{ - accounts_db::ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS, - accounts_index::{AccountIndex, AccountSecondaryIndexes}, - }, + solana_accounts_db::accounts_index::{AccountIndex, AccountSecondaryIndexes}, solana_core::validator::ValidatorConfig, - solana_faucet::faucet::run_local_faucet, + solana_faucet::faucet::run_local_faucet_for_tests, solana_local_cluster::{ local_cluster::{ClusterConfig, LocalCluster}, validator_configs::make_identical_validator_configs, }, solana_measure::measure::Measure, - solana_native_token::sol_to_lamports, + solana_native_token::LAMPORTS_PER_SOL, solana_poh_config::PohConfig, solana_program_pack::Pack, solana_test_validator::TestValidator, @@ -1428,15 +1413,7 @@ pub mod test { }; fn initialize_and_add_secondary_indexes(validator_config: &mut ValidatorConfig) { - if validator_config.accounts_db_config.is_none() { - validator_config.accounts_db_config = Some(ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS); - } - - let account_indexes = &mut validator_config - .accounts_db_config - .as_mut() - .unwrap() - .account_indexes; + let account_indexes = &mut validator_config.accounts_db_config.account_indexes; if account_indexes.is_none() { *account_indexes = Some(AccountSecondaryIndexes::default()); } @@ -1498,7 +1475,7 @@ pub mod test { ); let post_txs = client.get_transaction_count().unwrap(); start.stop(); - info!("{} pre {} post {}", start, pre_txs, post_txs); + info!("{start} pre {pre_txs} post {post_txs}"); } #[test] @@ -1548,7 +1525,7 @@ pub mod test { ); let post_txs = client.get_transaction_count().unwrap(); start.stop(); - info!("{} pre {} post {}", start, pre_txs, post_txs); + info!("{start} pre {pre_txs} post {post_txs}"); } #[test] @@ -1556,7 +1533,11 @@ pub mod test { solana_logger::setup(); let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); - let faucet_addr = run_local_faucet(mint_keypair, None); + let faucet_addr = run_local_faucet_for_tests( + mint_keypair, + None, /* per_time_cap */ + 0, /* port */ + ); let test_validator = TestValidator::with_custom_fees( mint_pubkey, 1, @@ -1572,11 +1553,7 @@ pub mod test { let funder = Keypair::new(); let latest_blockhash = rpc_client.get_latest_blockhash().unwrap(); let signature = rpc_client - .request_airdrop_with_blockhash( - &funder.pubkey(), - sol_to_lamports(1.0), - &latest_blockhash, - ) + .request_airdrop_with_blockhash(&funder.pubkey(), LAMPORTS_PER_SOL, &latest_blockhash) .unwrap(); rpc_client .confirm_transaction_with_spinner( @@ -1647,6 +1624,6 @@ pub mod test { 0, ); start.stop(); - info!("{}", start); + info!("{start}"); } } diff --git a/accounts-db/Cargo.toml b/accounts-db/Cargo.toml index 46d56f733fcbac..f1eb57d3848327 100644 --- a/accounts-db/Cargo.toml +++ b/accounts-db/Cargo.toml @@ -25,7 +25,6 @@ dev-context-only-utils = [ "dep:solana-stake-program", "dep:solana-vote-program", "solana-account/dev-context-only-utils", - "solana-pubkey/rand", "solana-transaction/dev-context-only-utils", ] frozen-abi = [ @@ -85,10 +84,9 @@ solana-measure = { workspace = true } solana-message = { workspace = true } solana-metrics = { workspace = true } solana-nohash-hasher = { workspace = true } -solana-pubkey = { workspace = true } +solana-pubkey = { workspace = true, features = ["rand"] } solana-rayon-threadlimit = { workspace = true } solana-rent = { workspace = true, optional = true } -solana-rent-collector = { workspace = true } solana-reward-info = { workspace = true, features = ["serde"] } solana-sha256-hasher = { workspace = true } solana-signer = { workspace = true, optional = true } @@ -128,6 +126,7 @@ solana-logger = { workspace = true } solana-sdk-ids = { workspace = true } solana-signature = { workspace = true, features = ["rand"] } solana-slot-history = { workspace = true } +solana-svm = { workspace = true } static_assertions = { workspace = true } strum = { workspace = true, features = ["derive"] } strum_macros = { workspace = true } diff --git a/accounts-db/benches/accounts.rs b/accounts-db/benches/accounts.rs index cfb0c89d15fdc6..f2012aafc22414 100644 --- a/accounts-db/benches/accounts.rs +++ b/accounts-db/benches/accounts.rs @@ -33,7 +33,7 @@ static GLOBAL: jemallocator::Jemalloc = jemallocator::Jemalloc; fn new_accounts_db(account_paths: Vec) -> AccountsDb { AccountsDb::new_with_config( account_paths, - Some(ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS), + ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS, None, Arc::default(), ) @@ -51,10 +51,10 @@ fn bench_delete_dependencies(bencher: &mut Bencher) { let account = AccountSharedData::new(i + 1, 0, AccountSharedData::default().owner()); accounts .accounts_db - .store_for_tests(i, &[(&pubkey, &account)]); + .store_for_tests((i, [(&pubkey, &account)].as_slice())); accounts .accounts_db - .store_for_tests(i, &[(&old_pubkey, &zero_account)]); + .store_for_tests((i, [(&old_pubkey, &zero_account)].as_slice())); old_pubkey = pubkey; accounts.accounts_db.add_root_and_flush_write_cache(i); } @@ -89,7 +89,7 @@ where ) .collect(); let storable_accounts: Vec<_> = pubkeys.iter().zip(accounts_data.iter()).collect(); - accounts.store_accounts_cached((slot, storable_accounts.as_slice())); + accounts.store_accounts_par((slot, storable_accounts.as_slice()), None); accounts.add_root(slot); accounts .accounts_db @@ -116,7 +116,7 @@ where // Write to a different slot than the one being read from. Because // there's a new account pubkey being written to every time, will // compete for the accounts index lock on every store - accounts.store_accounts_cached((slot + 1, new_storable_accounts.as_slice())); + accounts.store_accounts_par((slot + 1, new_storable_accounts.as_slice()), None); }); } @@ -234,7 +234,7 @@ fn bench_dashmap_par_iter(bencher: &mut Bencher) { let (accounts, dashmap) = setup_bench_dashmap_iter(); bencher.iter(|| { - test::black_box(accounts.accounts_db.thread_pool.install(|| { + test::black_box(accounts.accounts_db.thread_pool_foreground.install(|| { dashmap .par_iter() .map(|cached_account| (*cached_account.key(), cached_account.value().1)) @@ -268,7 +268,7 @@ fn bench_load_largest_accounts(b: &mut Bencher) { let account = AccountSharedData::new(lamports, 0, &Pubkey::default()); accounts .accounts_db - .store_for_tests(0, &[(&pubkey, &account)]); + .store_for_tests((0, [(&pubkey, &account)].as_slice())); } accounts.accounts_db.add_root_and_flush_write_cache(0); let ancestors = Ancestors::from(vec![0]); diff --git a/accounts-db/benches/accounts_index.rs b/accounts-db/benches/accounts_index.rs index 6786a251d3cee1..9ed8c09dad7547 100644 --- a/accounts-db/benches/accounts_index.rs +++ b/accounts-db/benches/accounts_index.rs @@ -8,7 +8,7 @@ use { solana_accounts_db::{ account_info::AccountInfo, accounts_index::{ - AccountSecondaryIndexes, AccountsIndex, UpsertReclaim, + AccountSecondaryIndexes, AccountsIndex, ReclaimsSlotList, UpsertReclaim, ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS, }, }, @@ -29,7 +29,7 @@ fn bench_accounts_index(bencher: &mut Bencher) { const NUM_FORKS: u64 = 16; - let mut reclaims = vec![]; + let mut reclaims = ReclaimsSlotList::new(); let index = AccountsIndex::::new( &ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS, Arc::default(), diff --git a/accounts-db/benches/append_vec.rs b/accounts-db/benches/append_vec.rs index 4e2536b6ad836e..204c51e01db948 100644 --- a/accounts-db/benches/append_vec.rs +++ b/accounts-db/benches/append_vec.rs @@ -5,7 +5,7 @@ use { rand::{thread_rng, Rng}, solana_account::{AccountSharedData, ReadableAccount}, solana_accounts_db::{ - accounts_file::StoredAccountsInfo, + accounts_file::{StorageAccess, StoredAccountsInfo}, append_vec::{ test_utils::{create_test_account, get_append_vec_path}, AppendVec, StoredMeta, @@ -39,10 +39,9 @@ fn append_account( vec.append_accounts(&storable_accounts, 0) } -#[bench] -fn append_vec_append(bencher: &mut Bencher) { +fn append_vec_append(bencher: &mut Bencher, storage_access: StorageAccess) { let path = get_append_vec_path("bench_append"); - let vec = AppendVec::new(&path.path, true, 64 * 1024); + let vec = AppendVec::new(&path.path, true, 64 * 1024, storage_access); bencher.iter(|| { let (meta, account) = create_test_account(0); if append_account(&vec, meta, &account).is_none() { @@ -51,6 +50,16 @@ fn append_vec_append(bencher: &mut Bencher) { }); } +#[bench] +fn append_vec_append_file(bencher: &mut Bencher) { + append_vec_append(bencher, StorageAccess::File); +} + +#[bench] +fn append_vec_append_mmap(bencher: &mut Bencher) { + append_vec_append(bencher, StorageAccess::Mmap); +} + fn add_test_accounts(vec: &AppendVec, size: usize) -> Vec<(usize, usize)> { (0..size) .filter_map(|sample| { @@ -60,10 +69,9 @@ fn add_test_accounts(vec: &AppendVec, size: usize) -> Vec<(usize, usize)> { .collect() } -#[bench] -fn append_vec_sequential_read(bencher: &mut Bencher) { +fn append_vec_sequential_read(bencher: &mut Bencher, storage_access: StorageAccess) { let path = get_append_vec_path("seq_read"); - let vec = AppendVec::new(&path.path, true, 64 * 1024); + let vec = AppendVec::new(&path.path, true, 64 * 1024, storage_access); let size = 1_000; let mut indexes = add_test_accounts(&vec, size); bencher.iter(|| { @@ -76,10 +84,20 @@ fn append_vec_sequential_read(bencher: &mut Bencher) { }); }); } + +#[bench] +fn append_vec_sequential_read_file(bencher: &mut Bencher) { + append_vec_sequential_read(bencher, StorageAccess::File); +} + #[bench] -fn append_vec_random_read(bencher: &mut Bencher) { +fn append_vec_sequential_read_mmap(bencher: &mut Bencher) { + append_vec_sequential_read(bencher, StorageAccess::Mmap); +} + +fn append_vec_random_read(bencher: &mut Bencher, storage_access: StorageAccess) { let path = get_append_vec_path("random_read"); - let vec = AppendVec::new(&path.path, true, 64 * 1024); + let vec = AppendVec::new(&path.path, true, 64 * 1024, storage_access); let size = 1_000; let indexes = add_test_accounts(&vec, size); bencher.iter(|| { @@ -93,9 +111,23 @@ fn append_vec_random_read(bencher: &mut Bencher) { } #[bench] -fn append_vec_concurrent_append_read(bencher: &mut Bencher) { +fn append_vec_random_read_file(bencher: &mut Bencher) { + append_vec_random_read(bencher, StorageAccess::File); +} + +#[bench] +fn append_vec_random_read_mmap(bencher: &mut Bencher) { + append_vec_random_read(bencher, StorageAccess::Mmap); +} + +fn append_vec_concurrent_append_read(bencher: &mut Bencher, storage_access: StorageAccess) { let path = get_append_vec_path("concurrent_read"); - let vec = Arc::new(AppendVec::new(&path.path, true, 1024 * 1024)); + let vec = Arc::new(AppendVec::new( + &path.path, + true, + 1024 * 1024, + storage_access, + )); let vec1 = vec.clone(); let indexes: Arc>> = Arc::new(Mutex::new(vec![])); let indexes1 = indexes.clone(); @@ -123,9 +155,23 @@ fn append_vec_concurrent_append_read(bencher: &mut Bencher) { } #[bench] -fn append_vec_concurrent_read_append(bencher: &mut Bencher) { +fn append_vec_concurrent_append_read_file(bencher: &mut Bencher) { + append_vec_concurrent_append_read(bencher, StorageAccess::File); +} + +#[bench] +fn append_vec_concurrent_append_read_mmap(bencher: &mut Bencher) { + append_vec_concurrent_append_read(bencher, StorageAccess::Mmap); +} + +fn append_vec_concurrent_read_append(bencher: &mut Bencher, storage_access: StorageAccess) { let path = get_append_vec_path("concurrent_read"); - let vec = Arc::new(AppendVec::new(&path.path, true, 1024 * 1024)); + let vec = Arc::new(AppendVec::new( + &path.path, + true, + 1024 * 1024, + storage_access, + )); let vec1 = vec.clone(); let indexes: Arc>> = Arc::new(Mutex::new(vec![])); let indexes1 = indexes.clone(); @@ -134,8 +180,12 @@ fn append_vec_concurrent_read_append(bencher: &mut Bencher) { if len == 0 { continue; } - let random_index: usize = thread_rng().gen_range(0..len + 1); - let (sample, pos) = *indexes1.lock().unwrap().get(random_index % len).unwrap(); + let random_index: usize = thread_rng().gen_range(0..len.wrapping_add(1)); + let (sample, pos) = *indexes1 + .lock() + .unwrap() + .get(random_index.checked_rem(len).unwrap()) + .unwrap(); vec1.get_stored_account_meta_callback(pos, |account| { let (_meta, test) = create_test_account(sample); assert_eq!(account.data(), test.data()); @@ -149,3 +199,13 @@ fn append_vec_concurrent_read_append(bencher: &mut Bencher) { } }); } + +#[bench] +fn append_vec_concurrent_read_append_file(bencher: &mut Bencher) { + append_vec_concurrent_read_append(bencher, StorageAccess::File); +} + +#[bench] +fn append_vec_concurrent_read_append_mmap(bencher: &mut Bencher) { + append_vec_concurrent_read_append(bencher, StorageAccess::Mmap); +} diff --git a/accounts-db/benches/bench_accounts_file.rs b/accounts-db/benches/bench_accounts_file.rs index 0e83e3e12c9db0..6a2068422ec86d 100644 --- a/accounts-db/benches/bench_accounts_file.rs +++ b/accounts-db/benches/bench_accounts_file.rs @@ -7,12 +7,11 @@ use { append_vec::{self, AppendVec}, tiered_storage::{ file::TieredReadableFile, - hot::{HotStorageReader, HotStorageWriter}, + hot::{HotStorageReader, HotStorageWriter, RENT_EXEMPT_RENT_EPOCH}, }, }, solana_clock::Slot, solana_pubkey::Pubkey, - solana_rent_collector::RENT_EXEMPT_RENT_EPOCH, solana_system_interface::MAX_PERMITTED_DATA_LENGTH, std::mem::ManuallyDrop, }; @@ -30,8 +29,8 @@ const ACCOUNTS_COUNTS: [usize; 4] = [ 10_000, // reasonable largest number of accounts written per slot ]; -fn bench_write_accounts_file(c: &mut Criterion) { - let mut group = c.benchmark_group("write_accounts_file"); +fn bench_write_accounts_file(c: &mut Criterion, storage_access: StorageAccess) { + let mut group = c.benchmark_group(format!("write_accounts_file_{storage_access:?}")); // most accounts on mnb are 165-200 bytes, so use that here too let space = 200; @@ -65,7 +64,7 @@ fn bench_write_accounts_file(c: &mut Criterion) { || { let path = temp_dir.path().join(format!("append_vec_{accounts_count}")); let file_size = accounts.len() * (space + append_vec::STORE_META_OVERHEAD); - AppendVec::new(path, true, file_size) + AppendVec::new(path, true, file_size, storage_access) }, |append_vec| { let res = append_vec.append_accounts(&storable_accounts, 0).unwrap(); @@ -99,6 +98,14 @@ fn bench_write_accounts_file(c: &mut Criterion) { } } +fn bench_write_accounts_file_file_io(c: &mut Criterion) { + bench_write_accounts_file(c, StorageAccess::File); +} + +fn bench_write_accounts_file_mmap(c: &mut Criterion) { + bench_write_accounts_file(c, StorageAccess::Mmap); +} + fn bench_scan_pubkeys(c: &mut Criterion) { let mut group = c.benchmark_group("scan_pubkeys"); let temp_dir = tempfile::tempdir().unwrap(); @@ -126,7 +133,7 @@ fn bench_scan_pubkeys(c: &mut Criterion) { .iter() .map(|(_, account)| append_vec::aligned_stored_size(account.data().len())) .sum(); - let append_vec = AppendVec::new(append_vec_path, true, file_size); + let append_vec = AppendVec::new(append_vec_path, true, file_size, StorageAccess::File); let stored_accounts_info = append_vec .append_accounts(&(Slot::MAX, storable_accounts.as_slice()), 0) .unwrap(); @@ -211,7 +218,7 @@ fn bench_get_account_shared_data(c: &mut Criterion) { .iter() .map(|(_, account)| append_vec::aligned_stored_size(account.data().len())) .sum(); - let append_vec = AppendVec::new(append_vec_path, true, file_size); + let append_vec = AppendVec::new(append_vec_path, true, file_size, StorageAccess::File); let stored_accounts_info = append_vec .append_accounts(&(Slot::MAX, storable_accounts.as_slice()), 0) .unwrap(); @@ -283,7 +290,8 @@ fn bench_get_account_shared_data(c: &mut Criterion) { criterion_group!( benches, - bench_write_accounts_file, + bench_write_accounts_file_file_io, + bench_write_accounts_file_mmap, bench_scan_pubkeys, bench_get_account_shared_data, ); diff --git a/accounts-db/benches/bench_hashing.rs b/accounts-db/benches/bench_hashing.rs index 54958715d66507..a339235d2879e7 100644 --- a/accounts-db/benches/bench_hashing.rs +++ b/accounts-db/benches/bench_hashing.rs @@ -26,7 +26,7 @@ const DATA_SIZES: [usize; 6] = [ /// /// Ensure this constant stays in sync with the value of `META_SIZE` in /// AccountsDb::hash_account_helper(). -const META_SIZE: usize = 81; +const META_SIZE: usize = 73; fn bench_hash_account(c: &mut Criterion) { let lamports = 123_456_789; diff --git a/accounts-db/benches/bench_lock_accounts.rs b/accounts-db/benches/bench_lock_accounts.rs index 620b6939e0cbc8..42d11e4875704f 100644 --- a/accounts-db/benches/bench_lock_accounts.rs +++ b/accounts-db/benches/bench_lock_accounts.rs @@ -38,7 +38,7 @@ fn create_test_transactions(lock_count: usize, read_conflicts: bool) -> Vec = utils::accounts_with_size_limit( 255, diff --git a/accounts-db/benches/utils.rs b/accounts-db/benches/utils.rs index a76c6c00a25496..9b8dfa8cbfc5da 100644 --- a/accounts-db/benches/utils.rs +++ b/accounts-db/benches/utils.rs @@ -9,9 +9,9 @@ use { }, rand_chacha::ChaChaRng, solana_account::AccountSharedData, + solana_accounts_db::tiered_storage::hot::RENT_EXEMPT_RENT_EPOCH, solana_pubkey::Pubkey, solana_rent::Rent, - solana_rent_collector::RENT_EXEMPT_RENT_EPOCH, std::iter, }; diff --git a/accounts-db/src/account_info.rs b/accounts-db/src/account_info.rs index 9cf8e771edd9e5..995cab01143316 100644 --- a/accounts-db/src/account_info.rs +++ b/accounts-db/src/account_info.rs @@ -4,8 +4,10 @@ //! Note that AccountInfo is saved to disk buckets during runtime, but disk buckets are recreated at startup. use { crate::{ - accounts_db::AccountsFileId, accounts_file::ALIGN_BOUNDARY_OFFSET, - accounts_index::IsCached, is_zero_lamport::IsZeroLamport, + accounts_db::AccountsFileId, + accounts_file::ALIGN_BOUNDARY_OFFSET, + accounts_index::{DiskIndexValue, IndexValue, IsCached}, + is_zero_lamport::IsZeroLamport, }, modular_bitfield::prelude::*, }; @@ -13,10 +15,6 @@ use { /// offset within an append vec to account data pub type Offset = usize; -/// bytes used to store this account in append vec -/// Note this max needs to be big enough to handle max data len of 10MB, which is a const -pub type StoredSize = u32; - /// specify where account data is located #[derive(Debug, PartialEq, Eq)] pub enum StorageLocation { @@ -103,6 +101,10 @@ impl IsCached for AccountInfo { } } +impl IndexValue for AccountInfo {} + +impl DiskIndexValue for AccountInfo {} + impl IsCached for StorageLocation { fn is_cached(&self) -> bool { matches!(self, StorageLocation::Cached) @@ -168,6 +170,7 @@ impl AccountInfo { } } } + #[cfg(test)] mod test { use {super::*, crate::append_vec::MAXIMUM_APPEND_VEC_FILE_SIZE}; diff --git a/accounts-db/src/account_storage.rs b/accounts-db/src/account_storage.rs index c48ca81b7ccba6..324583f5cde70a 100644 --- a/accounts-db/src/account_storage.rs +++ b/accounts-db/src/account_storage.rs @@ -8,8 +8,11 @@ use { solana_clock::Slot, solana_nohash_hasher::{BuildNoHashHasher, IntMap}, std::{ - ops::Range, - sync::{Arc, RwLock}, + ops::{Index, Range}, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, RwLock, + }, }, }; @@ -83,6 +86,14 @@ impl AccountStorage { self.get_slot_storage_entry_shrinking_in_progress_ok(slot) } + pub(super) fn all_storages(&self) -> Vec> { + assert!(self.no_shrink_in_progress()); + self.map + .iter() + .map(|item| Arc::clone(item.value())) + .collect() + } + pub(crate) fn replace_storage_with_equivalent( &self, slot: Slot, @@ -284,20 +295,6 @@ impl ShrinkInProgress<'_> { } } -#[cfg_attr(feature = "frozen-abi", derive(AbiExample, AbiEnumVisitor))] -#[derive(Debug, Eq, PartialEq, Copy, Clone, Deserialize, Serialize)] -pub enum AccountStorageStatus { - Available = 0, - Full = 1, - Candidate = 2, -} - -impl Default for AccountStorageStatus { - fn default() -> Self { - Self::Available - } -} - /// Wrapper over slice of `Arc` that provides an ordered access to storages. /// /// A few strategies are available for ordering storages: @@ -339,6 +336,19 @@ impl<'a> AccountStoragesOrderer<'a> { } } + pub fn entries_len(&self) -> usize { + self.indices.len() + } + + /// Returns the original index, into the storages slice, at `position` + /// + /// # Panics + /// + /// Caller must ensure `position` is in range, else will panic. + pub fn original_index(&'a self, position: usize) -> usize { + self.indices[position] + } + pub fn iter(&'a self) -> impl ExactSizeIterator + 'a { self.indices.iter().map(|i| self.storages[*i].as_ref()) } @@ -346,6 +356,71 @@ impl<'a> AccountStoragesOrderer<'a> { pub fn par_iter(&'a self) -> impl IndexedParallelIterator + 'a { self.indices.par_iter().map(|i| self.storages[*i].as_ref()) } + + pub fn into_concurrent_consumer(self) -> AccountStoragesConcurrentConsumer<'a> { + AccountStoragesConcurrentConsumer::new(self) + } +} + +impl Index for AccountStoragesOrderer<'_> { + type Output = AccountStorageEntry; + + fn index(&self, position: usize) -> &Self::Output { + // SAFETY: Caller must ensure `position` is in range. + let original_index = self.original_index(position); + // SAFETY: `original_index` must be valid here, so it is a valid index into `storages`. + self.storages[original_index].as_ref() + } +} + +/// A thread-safe, lock-free iterator for consuming `AccountStorageEntry` values +/// from an `AccountStoragesOrderer` across multiple threads. +/// +/// Unlike standard iterators, `AccountStoragesConcurrentConsumer`: +/// - Is **shared** between threads via references (`&self`), not moved. +/// - Allows safe, parallel consumption where each item is yielded at most once. +/// - Does **not** implement `Iterator` because it must take `&self` instead of `&mut self`. +pub struct AccountStoragesConcurrentConsumer<'a> { + orderer: AccountStoragesOrderer<'a>, + current_position: AtomicUsize, +} + +impl<'a> AccountStoragesConcurrentConsumer<'a> { + pub fn new(orderer: AccountStoragesOrderer<'a>) -> Self { + Self { + orderer, + current_position: AtomicUsize::new(0), + } + } + + /// Takes the next `AccountStorageEntry` moving shared consume position + /// until the end of the entries source is reached. + pub fn next(&'a self) -> Option> { + let position = self.current_position.fetch_add(1, Ordering::Relaxed); + if position < self.orderer.entries_len() { + // SAFETY: We have ensured `position` is in range. + let original_index = self.orderer.original_index(position); + let storage = &self.orderer[position]; + Some(NextItem { + position, + original_index, + storage, + }) + } else { + None + } + } +} + +/// Value returned from calling `AccountStoragesConcurrentConsumer::next()` +#[derive(Debug)] +pub struct NextItem<'a> { + /// The position through the orderer for this call to `next()` + pub position: usize, + /// The index into the original storages slice at this position + pub original_index: usize, + /// The storage itself + pub storage: &'a AccountStorageEntry, } /// Select the `nth` (`0 <= nth < range.len()`) value from a `range`, choosing values alternately @@ -377,12 +452,14 @@ fn select_from_range_with_start_end_rates( pub(crate) mod tests { use { super::*, - crate::accounts_file::AccountsFileProvider, + crate::accounts_file::{AccountsFileProvider, StorageAccess}, std::{iter, path::Path}, + test_case::test_case, }; - #[test] - fn test_shrink_in_progress() { + #[test_case(StorageAccess::Mmap)] + #[test_case(StorageAccess::File)] + fn test_shrink_in_progress(storage_access: StorageAccess) { // test that we check in order map then shrink_in_progress_map let storage = AccountStorage::default(); let slot = 0; @@ -401,6 +478,7 @@ pub(crate) mod tests { id, store_file_size, AccountsFileProvider::AppendVec, + storage_access, )); let entry2 = Arc::new(AccountStorageEntry::new( common_store_path, @@ -408,6 +486,7 @@ pub(crate) mod tests { id, store_file_size2, AccountsFileProvider::AppendVec, + storage_access, )); storage.map.insert(slot, entry); @@ -450,7 +529,11 @@ pub(crate) mod tests { } impl AccountStorage { - fn get_test_storage_with_id(&self, id: AccountsFileId) -> Arc { + fn get_test_storage_with_id( + &self, + id: AccountsFileId, + storage_access: StorageAccess, + ) -> Arc { let slot = 0; // add a map store let common_store_path = Path::new(""); @@ -461,80 +544,87 @@ pub(crate) mod tests { id, store_file_size, AccountsFileProvider::AppendVec, + storage_access, )) } - fn get_test_storage(&self) -> Arc { - self.get_test_storage_with_id(0) + fn get_test_storage(&self, storage_access: StorageAccess) -> Arc { + self.get_test_storage_with_id(0, storage_access) } } - #[test] + #[test_case(StorageAccess::Mmap)] + #[test_case(StorageAccess::File)] #[should_panic(expected = "self.no_shrink_in_progress()")] - fn test_get_slot_storage_entry_fail() { + fn test_get_slot_storage_entry_fail(storage_access: StorageAccess) { let storage = AccountStorage::default(); storage .shrink_in_progress_map .write() .unwrap() - .insert(0, storage.get_test_storage()); + .insert(0, storage.get_test_storage(storage_access)); storage.get_slot_storage_entry(0); } - #[test] + #[test_case(StorageAccess::Mmap)] + #[test_case(StorageAccess::File)] #[should_panic(expected = "self.no_shrink_in_progress()")] - fn test_all_slots_fail() { + fn test_all_slots_fail(storage_access: StorageAccess) { let storage = AccountStorage::default(); storage .shrink_in_progress_map .write() .unwrap() - .insert(0, storage.get_test_storage()); + .insert(0, storage.get_test_storage(storage_access)); storage.all_slots(); } - #[test] + #[test_case(StorageAccess::Mmap)] + #[test_case(StorageAccess::File)] #[should_panic(expected = "self.no_shrink_in_progress()")] - fn test_initialize_fail() { + fn test_initialize_fail(storage_access: StorageAccess) { let mut storage = AccountStorage::default(); storage .shrink_in_progress_map .write() .unwrap() - .insert(0, storage.get_test_storage()); + .insert(0, storage.get_test_storage(storage_access)); storage.initialize(AccountStorageMap::default()); } - #[test] + #[test_case(StorageAccess::Mmap)] + #[test_case(StorageAccess::File)] #[should_panic( expected = "shrink_can_be_active || self.shrink_in_progress_map.read().unwrap().is_empty()" )] - fn test_remove_fail() { + fn test_remove_fail(storage_access: StorageAccess) { let storage = AccountStorage::default(); storage .shrink_in_progress_map .write() .unwrap() - .insert(0, storage.get_test_storage()); + .insert(0, storage.get_test_storage(storage_access)); storage.remove(&0, false); } - #[test] + #[test_case(StorageAccess::Mmap)] + #[test_case(StorageAccess::File)] #[should_panic(expected = "self.no_shrink_in_progress()")] - fn test_iter_fail() { + fn test_iter_fail(storage_access: StorageAccess) { let storage = AccountStorage::default(); storage .shrink_in_progress_map .write() .unwrap() - .insert(0, storage.get_test_storage()); + .insert(0, storage.get_test_storage(storage_access)); storage.iter(); } - #[test] + #[test_case(StorageAccess::Mmap)] + #[test_case(StorageAccess::File)] #[should_panic(expected = "self.no_shrink_in_progress()")] - fn test_insert_fail() { + fn test_insert_fail(storage_access: StorageAccess) { let storage = AccountStorage::default(); - let sample = storage.get_test_storage(); + let sample = storage.get_test_storage(storage_access); storage .shrink_in_progress_map .write() @@ -543,12 +633,13 @@ pub(crate) mod tests { storage.insert(0, sample); } - #[test] + #[test_case(StorageAccess::Mmap)] + #[test_case(StorageAccess::File)] #[should_panic(expected = "duplicate call")] - fn test_shrinking_in_progress_fail3() { + fn test_shrinking_in_progress_fail3(storage_access: StorageAccess) { // already entry in shrink_in_progress_map let storage = AccountStorage::default(); - let sample = storage.get_test_storage(); + let sample = storage.get_test_storage(storage_access); storage.map.insert(0, sample.clone()); storage .shrink_in_progress_map @@ -558,28 +649,30 @@ pub(crate) mod tests { storage.shrinking_in_progress(0, sample); } - #[test] + #[test_case(StorageAccess::Mmap)] + #[test_case(StorageAccess::File)] #[should_panic(expected = "duplicate call")] - fn test_shrinking_in_progress_fail4() { + fn test_shrinking_in_progress_fail4(storage_access: StorageAccess) { // already called 'shrink_in_progress' on this slot and it is still active let storage = AccountStorage::default(); - let sample_to_shrink = storage.get_test_storage(); - let sample = storage.get_test_storage(); + let sample_to_shrink = storage.get_test_storage(storage_access); + let sample = storage.get_test_storage(storage_access); storage.map.insert(0, sample_to_shrink); let _shrinking_in_progress = storage.shrinking_in_progress(0, sample.clone()); storage.shrinking_in_progress(0, sample); } - #[test] - fn test_shrinking_in_progress_second_call() { + #[test_case(StorageAccess::Mmap)] + #[test_case(StorageAccess::File)] + fn test_shrinking_in_progress_second_call(storage_access: StorageAccess) { // already called 'shrink_in_progress' on this slot, but it finished, so we succeed // verify data structures during and after shrink and then with subsequent shrink call let storage = AccountStorage::default(); let slot = 0; let id_to_shrink = 1; let id_shrunk = 0; - let sample_to_shrink = storage.get_test_storage_with_id(id_to_shrink); - let sample = storage.get_test_storage(); + let sample_to_shrink = storage.get_test_storage_with_id(id_to_shrink, storage_access); + let sample = storage.get_test_storage(storage_access); storage.map.insert(slot, sample_to_shrink); let shrinking_in_progress = storage.shrinking_in_progress(slot, sample.clone()); assert!(storage.map.contains_key(&slot)); @@ -602,30 +695,33 @@ pub(crate) mod tests { storage.shrinking_in_progress(slot, sample); } - #[test] + #[test_case(StorageAccess::Mmap)] + #[test_case(StorageAccess::File)] #[should_panic(expected = "no pre-existing storage for shrinking slot")] - fn test_shrinking_in_progress_fail1() { + fn test_shrinking_in_progress_fail1(storage_access: StorageAccess) { // nothing in slot currently let storage = AccountStorage::default(); - let sample = storage.get_test_storage(); + let sample = storage.get_test_storage(storage_access); storage.shrinking_in_progress(0, sample); } - #[test] + #[test_case(StorageAccess::Mmap)] + #[test_case(StorageAccess::File)] #[should_panic(expected = "no pre-existing storage for shrinking slot")] - fn test_shrinking_in_progress_fail2() { + fn test_shrinking_in_progress_fail2(storage_access: StorageAccess) { // nothing in slot currently, but there is an empty map entry let storage = AccountStorage::default(); - let sample = storage.get_test_storage(); + let sample = storage.get_test_storage(storage_access); storage.shrinking_in_progress(0, sample); } - #[test] - fn test_missing() { + #[test_case(StorageAccess::Mmap)] + #[test_case(StorageAccess::File)] + fn test_missing(storage_access: StorageAccess) { // already called 'shrink_in_progress' on this slot, but it finished, so we succeed // verify data structures during and after shrink and then with subsequent shrink call let storage = AccountStorage::default(); - let sample = storage.get_test_storage(); + let sample = storage.get_test_storage(storage_access); let id = sample.id(); let missing_id = 9999; let slot = sample.slot(); @@ -659,8 +755,9 @@ pub(crate) mod tests { assert!(storage.get_account_storage_entry(slot, id).is_some()); } - #[test] - fn test_get_if() { + #[test_case(StorageAccess::Mmap)] + #[test_case(StorageAccess::File)] + fn test_get_if(storage_access: StorageAccess) { let storage = AccountStorage::default(); assert!(storage.get_if(|_, _| true).is_empty()); @@ -674,6 +771,7 @@ pub(crate) mod tests { id, 5000, AccountsFileProvider::AppendVec, + storage_access, ); storage.map.insert(slot, entry.into()); } @@ -691,15 +789,16 @@ pub(crate) mod tests { assert_eq!(storage.get_if(|_, _| true).len(), ids.len()); } - #[test] + #[test_case(StorageAccess::Mmap)] + #[test_case(StorageAccess::File)] #[should_panic(expected = "self.no_shrink_in_progress()")] - fn test_get_if_fail() { + fn test_get_if_fail(storage_access: StorageAccess) { let storage = AccountStorage::default(); storage .shrink_in_progress_map .write() .unwrap() - .insert(0, storage.get_test_storage()); + .insert(0, storage.get_test_storage(storage_access)); storage.get_if(|_, _| true); } diff --git a/accounts-db/src/account_storage/stored_account_info.rs b/accounts-db/src/account_storage/stored_account_info.rs index faabf619391676..b31bf64f2c8130 100644 --- a/accounts-db/src/account_storage/stored_account_info.rs +++ b/accounts-db/src/account_storage/stored_account_info.rs @@ -46,6 +46,12 @@ impl<'storage> StoredAccountInfo<'storage> { } } +impl IsZeroLamport for StoredAccountInfo<'_> { + fn is_zero_lamport(&self) -> bool { + self.lamports == 0 + } +} + impl ReadableAccount for StoredAccountInfo<'_> { fn lamports(&self) -> u64 { self.lamports diff --git a/accounts-db/src/account_storage_reader.rs b/accounts-db/src/account_storage_reader.rs index d42c630c46588e..f5aa204f2caa7a 100644 --- a/accounts-db/src/account_storage_reader.rs +++ b/accounts-db/src/account_storage_reader.rs @@ -32,7 +32,7 @@ impl<'a> AccountStorageReader<'a> { let mut sorted_obsolete_accounts = storage.get_obsolete_accounts(snapshot_slot); - // Tiered storage is not compatible with obsolete acocunts at this time + // Tiered storage is not compatible with obsolete accounts at this time if matches!(storage.accounts, AccountsFile::TieredStorage(_)) { assert!( sorted_obsolete_accounts.is_empty(), @@ -147,21 +147,25 @@ mod tests { fn create_storage_for_storage_reader( slot: Slot, provider: AccountsFileProvider, + storage_access: StorageAccess, ) -> (AccountStorageEntry, Vec) { let id = 0; let (temp_dirs, paths) = get_temp_accounts_paths(1).unwrap(); let file_size = 1024 * 1024; ( - AccountStorageEntry::new(&paths[0], slot, id, file_size, provider), + AccountStorageEntry::new(&paths[0], slot, id, file_size, provider, storage_access), temp_dirs, ) } - #[test] + #[test_case(StorageAccess::Mmap)] + #[test_case(StorageAccess::File)] #[should_panic(expected = "Obsolete accounts should be empty for TieredStorage")] - fn test_account_storage_reader_tiered_storage_one_obsolete_account_should_panic() { + fn test_account_storage_reader_tiered_storage_one_obsolete_account_should_panic( + storage_access: StorageAccess, + ) { let (storage, _temp_dirs) = - create_storage_for_storage_reader(0, AccountsFileProvider::HotStorage); + create_storage_for_storage_reader(0, AccountsFileProvider::HotStorage, storage_access); let account = AccountSharedData::new(1, 10, &Pubkey::new_unique()); let account2 = AccountSharedData::new(1, 10, &Pubkey::new_unique()); @@ -182,10 +186,14 @@ mod tests { _ = AccountStorageReader::new(&storage, None).unwrap(); } - #[test_case(AccountsFileProvider::AppendVec)] - #[test_case(AccountsFileProvider::HotStorage)] - fn test_account_storage_reader_no_obsolete_accounts(provider: AccountsFileProvider) { - let (storage, _temp_dirs) = create_storage_for_storage_reader(0, provider); + #[test_case(AccountsFileProvider::AppendVec, StorageAccess::Mmap)] + #[test_case(AccountsFileProvider::AppendVec, StorageAccess::File)] + #[test_case(AccountsFileProvider::HotStorage, StorageAccess::File)] + fn test_account_storage_reader_no_obsolete_accounts( + provider: AccountsFileProvider, + storage_access: StorageAccess, + ) { + let (storage, _temp_dirs) = create_storage_for_storage_reader(0, provider, storage_access); let account = AccountSharedData::new(1, 10, &Pubkey::default()); let account2 = AccountSharedData::new(1, 10, &Pubkey::default()); @@ -221,7 +229,7 @@ mod tests { ) { solana_logger::setup(); let (storage, _temp_dirs) = - create_storage_for_storage_reader(0, AccountsFileProvider::AppendVec); + create_storage_for_storage_reader(0, AccountsFileProvider::AppendVec, storage_access); let slot = 0; @@ -318,10 +326,11 @@ mod tests { } } - #[test] - fn test_account_storage_reader_filter_by_slot() { + #[test_case(StorageAccess::Mmap)] + #[test_case(StorageAccess::File)] + fn test_account_storage_reader_filter_by_slot(storage_access: StorageAccess) { let (storage, _temp_dirs) = - create_storage_for_storage_reader(10, AccountsFileProvider::AppendVec); + create_storage_for_storage_reader(10, AccountsFileProvider::AppendVec, storage_access); let total_accounts = 30; let slot = 0; diff --git a/accounts-db/src/accounts.rs b/accounts-db/src/accounts.rs index 0b9022e4028873..32f28d971f3a13 100644 --- a/accounts-db/src/accounts.rs +++ b/accounts-db/src/accounts.rs @@ -4,7 +4,7 @@ use { account_storage::stored_account_info::StoredAccountInfo, accounts_db::{ AccountsAddRootTiming, AccountsDb, LoadHint, LoadedAccount, ScanAccountStorageData, - ScanStorageResult, + ScanStorageResult, UpdateIndexThreadSelection, }, accounts_index::{IndexKey, ScanConfig, ScanError, ScanOrder, ScanResult}, ancestors::Ancestors, @@ -24,7 +24,7 @@ use { message_address_table_lookup::SVMMessageAddressTableLookup, svm_message::SVMMessage, }, solana_transaction::sanitized::SanitizedTransaction, - solana_transaction_context::TransactionAccount, + solana_transaction_context::transaction_accounts::TransactionAccount, solana_transaction_error::TransactionResult as Result, std::{ cmp::Reverse, @@ -546,18 +546,36 @@ impl Accounts { } } - /// Store the accounts into the DB - pub fn store_cached<'a>( + /// Store `accounts` into the DB + /// + /// This version updates the accounts index sequentially, + /// using the same thread that calls the fn itself. + pub fn store_accounts_seq<'a>( &self, accounts: impl StorableAccounts<'a>, transactions: Option<&'a [&'a SanitizedTransaction]>, ) { - self.accounts_db - .store_cached_inline_update_index(accounts, transactions); + self.accounts_db.store_accounts_unfrozen( + accounts, + transactions, + UpdateIndexThreadSelection::Inline, + ); } - pub fn store_accounts_cached<'a>(&self, accounts: impl StorableAccounts<'a>) { - self.accounts_db.store_cached(accounts) + /// Store `accounts` into the DB + /// + /// This version updates the accounts index in parallel, + /// using the foreground AccountsDb thread pool. + pub fn store_accounts_par<'a>( + &self, + accounts: impl StorableAccounts<'a>, + transactions: Option<&'a [&'a SanitizedTransaction]>, + ) { + self.accounts_db.store_accounts_unfrozen( + accounts, + transactions, + UpdateIndexThreadSelection::PoolWithThreshold, + ); } /// Add a slot to root. Root slots cannot be purged @@ -1120,7 +1138,8 @@ mod tests { impl Accounts { pub fn store_for_tests(&self, slot: Slot, pubkey: &Pubkey, account: &AccountSharedData) { - self.accounts_db.store_for_tests(slot, &[(pubkey, account)]) + self.accounts_db + .store_for_tests((slot, [(pubkey, account)].as_slice())) } /// useful to adapt tests written prior to introduction of the write cache @@ -1225,13 +1244,14 @@ mod tests { let pubkey = Pubkey::new_unique(); let account_data = AccountSharedData::new(1, 0, &Pubkey::default()); let accounts_db = Arc::new(AccountsDb::new_single_for_tests()); - accounts_db.store_for_tests( + accounts_db.store_for_tests(( 0, - &[ + [ (&Pubkey::default(), &account_data), (&pubkey, &account_data), - ], - ); + ] + .as_slice(), + )); let r_tx = sanitized_tx_from_metas(vec![AccountMeta { pubkey, @@ -1339,7 +1359,7 @@ mod tests { /* This test assumes pubkey0 < pubkey1 < pubkey2. * But the keys created with new_unique() does not guarantee this * order because of the endianness. new_unique() calls add 1 at each - * key generaration as the little endian integer. A pubkey stores its + * key generation as the little endian integer. A pubkey stores its * value in a 32-byte array bytes, and its eq-partial trait considers * the lower-address bytes more significant, which is the big-endian * order. diff --git a/accounts-db/src/accounts_cache.rs b/accounts-db/src/accounts_cache.rs index d8f5cacebef1f0..0b42e2c3e73419 100644 --- a/accounts-db/src/accounts_cache.rs +++ b/accounts-db/src/accounts_cache.rs @@ -1,10 +1,9 @@ use { - ahash::RandomState as AHashRandomState, dashmap::DashMap, solana_account::{AccountSharedData, ReadableAccount}, solana_clock::Slot, solana_nohash_hasher::BuildNoHashHasher, - solana_pubkey::Pubkey, + solana_pubkey::{Pubkey, PubkeyHasherBuilder}, std::{ collections::BTreeSet, ops::Deref, @@ -17,7 +16,7 @@ use { #[derive(Debug)] pub struct SlotCache { - cache: DashMap, AHashRandomState>, + cache: DashMap, PubkeyHasherBuilder>, same_account_writes: AtomicU64, same_account_writes_size: AtomicU64, unique_account_writes_size: AtomicU64, @@ -128,7 +127,7 @@ impl SlotCache { } impl Deref for SlotCache { - type Target = DashMap, AHashRandomState>; + type Target = DashMap, PubkeyHasherBuilder>; fn deref(&self) -> &Self::Target { &self.cache } diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 902c093606b204..36fcd723f90a83 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -31,29 +31,27 @@ use { account_info::{AccountInfo, Offset, StorageLocation}, account_storage::{ stored_account_info::{StoredAccountInfo, StoredAccountInfoWithoutData}, - AccountStorage, AccountStorageStatus, AccountStoragesOrderer, ShrinkInProgress, + AccountStorage, AccountStoragesOrderer, ShrinkInProgress, }, accounts_cache::{AccountsCache, CachedAccount, SlotCache}, accounts_db::stats::{ AccountsStats, CleanAccountsStats, FlushStats, ObsoleteAccountsStats, PurgeStats, ShrinkAncientStats, ShrinkStats, ShrinkStatsSub, StoreAccountsTiming, }, - accounts_file::{ - AccountsFile, AccountsFileError, AccountsFileProvider, MatchAccountOwnerError, - StorageAccess, - }, + accounts_file::{AccountsFile, AccountsFileError, AccountsFileProvider, StorageAccess}, accounts_hash::{AccountLtHash, AccountsLtHash, ZERO_LAMPORT_ACCOUNT_LT_HASH}, accounts_index::{ in_mem_accounts_index::StartupStats, AccountSecondaryIndexes, AccountsIndex, - AccountsIndexConfig, AccountsIndexRootsStats, AccountsIndexScanResult, DiskIndexValue, - IndexKey, IndexValue, IsCached, RefCount, ScanConfig, ScanFilter, ScanResult, SlotList, + AccountsIndexConfig, AccountsIndexRootsStats, AccountsIndexScanResult, IndexKey, + IsCached, ReclaimsSlotList, RefCount, ScanConfig, ScanFilter, ScanResult, SlotList, UpsertReclaim, ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS, ACCOUNTS_INDEX_CONFIG_FOR_TESTING, }, accounts_index_storage::Startup, - accounts_update_notifier_interface::AccountsUpdateNotifier, + accounts_update_notifier_interface::{AccountForGeyser, AccountsUpdateNotifier}, active_stats::{ActiveStatItem, ActiveStats}, ancestors::Ancestors, - append_vec::{aligned_stored_size, IndexInfo, IndexInfoInner, STORE_META_OVERHEAD}, + append_vec::{self, aligned_stored_size, STORE_META_OVERHEAD}, + buffered_reader::RequiredLenBufFileRead, contains::Contains, is_zero_lamport::IsZeroLamport, partitioned_rewards::{ @@ -62,7 +60,6 @@ use { read_only_accounts_cache::ReadOnlyAccountsCache, storable_accounts::{StorableAccounts, StorableAccountsBySlot}, u64_align, utils, - verify_accounts_hash_in_background::VerifyAccountsHashInBackground, }, dashmap::{DashMap, DashSet}, log::*, @@ -74,7 +71,7 @@ use { solana_clock::{BankId, Epoch, Slot}, solana_epoch_schedule::EpochSchedule, solana_lattice_hash::lt_hash::LtHash, - solana_measure::{meas_dur, measure::Measure, measure_us}, + solana_measure::{measure::Measure, measure_us}, solana_nohash_hasher::{BuildNoHashHasher, IntMap, IntSet}, solana_pubkey::Pubkey, solana_rayon_threadlimit::get_thread_count, @@ -91,7 +88,7 @@ use { atomic::{AtomicBool, AtomicU32, AtomicU64, AtomicUsize, Ordering}, Arc, Condvar, Mutex, RwLock, }, - thread::sleep, + thread::{self, sleep}, time::{Duration, Instant}, }, tempfile::TempDir, @@ -107,6 +104,13 @@ const UNREF_ACCOUNTS_BATCH_SIZE: usize = 10_000; const DEFAULT_FILE_SIZE: u64 = 4 * 1024 * 1024; const DEFAULT_NUM_DIRS: u32 = 4; +// This value reflects recommended memory lock limit documented in the validator's +// setup instructions at docs/src/operations/guides/validator-start.md allowing use of +// several io_uring instances with fixed buffers for large disk IO operations. +pub const DEFAULT_MEMLOCK_BUDGET_SIZE: usize = 2_000_000_000; +// Linux distributions often have some small memory lock limit (e.g. 8MB) that we can tap into. +const MEMLOCK_BUDGET_SIZE_FOR_TESTS: usize = 4_000_000; + // When getting accounts for shrinking from the index, this is the # of accounts to lookup per thread. // This allows us to split up accounts index accesses across multiple threads. const SHRINK_COLLECT_CHUNK_SIZE: usize = 50; @@ -153,7 +157,7 @@ pub(crate) trait ShrinkCollectRefs<'a>: Sync + Send { fn collect(&mut self, other: Self); fn add( &mut self, - ref_count: u64, + ref_count: RefCount, account: &'a AccountFromStorage, slot_list: &[(Slot, AccountInfo)], ); @@ -176,7 +180,7 @@ impl<'a> ShrinkCollectRefs<'a> for AliveAccounts<'a> { } fn add( &mut self, - _ref_count: u64, + _ref_count: RefCount, account: &'a AccountFromStorage, _slot_list: &[(Slot, AccountInfo)], ) { @@ -210,7 +214,7 @@ impl<'a> ShrinkCollectRefs<'a> for ShrinkCollectAliveSeparatedByRefs<'a> { } fn add( &mut self, - ref_count: u64, + ref_count: RefCount, account: &'a AccountFromStorage, slot_list: &[(Slot, AccountInfo)], ) { @@ -299,10 +303,10 @@ pub const ACCOUNTS_DB_CONFIG_FOR_TESTING: AccountsDbConfig = AccountsDbConfig { partitioned_epoch_rewards_config: DEFAULT_PARTITIONED_EPOCH_REWARDS_CONFIG, storage_access: StorageAccess::File, scan_filter_for_shrinking: ScanFilter::OnlyAbnormalTest, - mark_obsolete_accounts: false, - num_clean_threads: None, + mark_obsolete_accounts: MarkObsoleteAccounts::Disabled, + num_background_threads: None, num_foreground_threads: None, - num_hash_threads: None, + memlock_budget_size: MEMLOCK_BUDGET_SIZE_FOR_TESTS, }; pub const ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS: AccountsDbConfig = AccountsDbConfig { index: Some(ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS), @@ -321,10 +325,10 @@ pub const ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS: AccountsDbConfig = AccountsDbConfig partitioned_epoch_rewards_config: DEFAULT_PARTITIONED_EPOCH_REWARDS_CONFIG, storage_access: StorageAccess::File, scan_filter_for_shrinking: ScanFilter::OnlyAbnormal, - mark_obsolete_accounts: false, - num_clean_threads: None, + mark_obsolete_accounts: MarkObsoleteAccounts::Disabled, + num_background_threads: None, num_foreground_threads: None, - num_hash_threads: None, + memlock_budget_size: MEMLOCK_BUDGET_SIZE_FOR_TESTS, }; struct LoadAccountsIndexForShrink<'a, T: ShrinkCollectRefs<'a>> { @@ -390,7 +394,6 @@ pub struct GetUniqueAccountsResult { pub struct AccountsAddRootTiming { pub index_us: u64, pub cache_us: u64, - pub store_us: u64, } /// Slots older the "number of slots in an epoch minus this number" @@ -444,13 +447,15 @@ pub struct AccountsDbConfig { pub partitioned_epoch_rewards_config: PartitionedEpochRewardsConfig, pub storage_access: StorageAccess, pub scan_filter_for_shrinking: ScanFilter, - pub mark_obsolete_accounts: bool, - /// Number of threads for background cleaning operations (`thread_pool_clean') - pub num_clean_threads: Option, - /// Number of threads for foreground operations (`thread_pool`) + pub mark_obsolete_accounts: MarkObsoleteAccounts, + /// Number of threads for background operations (`thread_pool_background') + pub num_background_threads: Option, + /// Number of threads for foreground operations (`thread_pool_foreground`) pub num_foreground_threads: Option, - /// Number of threads for background accounts hashing (`thread_pool_hash`) - pub num_hash_threads: Option, + /// Amount of memory (in bytes) that is allowed to be locked during db operations. + /// On linux it's verified on start-up with the kernel limits, such that during runtime + /// parts of it can be utilized without panicking. + pub memlock_budget_size: usize, } #[cfg(not(test))] @@ -486,12 +491,12 @@ pub enum ScanStorageResult { Stored(B), } -#[derive(Debug, Default)] +#[derive(Debug)] pub struct IndexGenerationInfo { pub accounts_data_len: u64, - /// The lt hash of the old/duplicate accounts identified during index generation. - /// Will be used when verifying the accounts lt hash, after rebuilding a Bank. - pub duplicates_lt_hash: Option>, + /// The accounts lt hash calculated during index generation. + /// Will be used when verifying accounts, after rebuilding a Bank. + pub calculated_accounts_lt_hash: AccountsLtHash, } #[derive(Debug, Default)] @@ -507,6 +512,8 @@ struct SlotIndexGenerationInfo { num_existed_in_mem: u64, /// Number of accounts in this slot that already existed, and were on-disk num_existed_on_disk: u64, + /// The accounts lt hash *of only this slot* + slot_lt_hash: SlotLtHash, } /// The lt hash of old/duplicate accounts @@ -524,6 +531,16 @@ impl Default for DuplicatesLtHash { } } +/// The lt hash of accounts in a single slot +#[derive(Debug)] +struct SlotLtHash(pub LtHash); + +impl Default for SlotLtHash { + fn default() -> Self { + Self(LtHash::identity()) + } +} + #[derive(Default, Debug)] struct GenerateIndexTimings { pub total_time_us: u64, @@ -539,7 +556,6 @@ struct GenerateIndexTimings { pub num_duplicate_accounts: u64, pub populate_duplicate_keys_us: u64, pub total_slots: u64, - pub par_duplicates_lt_hash_us: AtomicU64, pub visit_zero_lamports_us: u64, pub num_zero_lamport_single_refs: u64, pub all_accounts_are_zero_lamports_slots: u64, @@ -605,11 +621,6 @@ impl GenerateIndexTimings { startup_stats.copy_data_us.swap(0, Ordering::Relaxed), i64 ), - ( - "par_duplicates_lt_hash_us", - self.par_duplicates_lt_hash_us.load(Ordering::Relaxed), - i64 - ), ( "num_zero_lamport_single_refs", self.num_zero_lamport_single_refs, @@ -640,9 +651,6 @@ impl GenerateIndexTimings { } } -impl IndexValue for AccountInfo {} -impl DiskIndexValue for AccountInfo {} - impl IsZeroLamport for AccountSharedData { fn is_zero_lamport(&self) -> bool { self.lamports() == 0 @@ -655,56 +663,6 @@ impl IsZeroLamport for Account { } } -struct MultiThreadProgress<'a> { - last_update: Instant, - my_last_report_count: u64, - total_count: &'a AtomicU64, - report_delay_secs: u64, - first_caller: bool, - ultimate_count: u64, - start_time: Instant, -} - -impl<'a> MultiThreadProgress<'a> { - fn new(total_count: &'a AtomicU64, report_delay_secs: u64, ultimate_count: u64) -> Self { - Self { - last_update: Instant::now(), - my_last_report_count: 0, - total_count, - report_delay_secs, - first_caller: false, - ultimate_count, - start_time: Instant::now(), - } - } - fn report(&mut self, my_current_count: u64) { - let now = Instant::now(); - if now.duration_since(self.last_update).as_secs() >= self.report_delay_secs { - let my_total_newly_processed_slots_since_last_report = - my_current_count - self.my_last_report_count; - - self.my_last_report_count = my_current_count; - let previous_total_processed_slots_across_all_threads = self.total_count.fetch_add( - my_total_newly_processed_slots_since_last_report, - Ordering::Relaxed, - ); - self.first_caller = - self.first_caller || 0 == previous_total_processed_slots_across_all_threads; - if self.first_caller { - let total = previous_total_processed_slots_across_all_threads - + my_total_newly_processed_slots_since_last_report; - info!( - "generating index: {}/{} slots... ({}/s)", - total, - self.ultimate_count, - total / self.start_time.elapsed().as_secs().max(1), - ); - } - self.last_update = now; - } - } -} - /// An offset into the AccountsDb::storage vector pub type AtomicAccountsFileId = AtomicU32; pub type AccountsFileId = u32; @@ -830,36 +788,6 @@ impl LoadedAccountAccessor<'_> { } } } - - fn account_matches_owners(&self, owners: &[Pubkey]) -> Result { - match self { - LoadedAccountAccessor::Cached(cached_account) => cached_account - .as_ref() - .and_then(|cached_account| { - if cached_account.account.is_zero_lamport() { - None - } else { - owners - .iter() - .position(|entry| cached_account.account.owner() == entry) - } - }) - .ok_or(MatchAccountOwnerError::NoMatch), - LoadedAccountAccessor::Stored(maybe_storage_entry) => { - // storage entry may not be present if slot was cleaned up in - // between reading the accounts index and calling this function to - // get account meta from the storage entry here - maybe_storage_entry - .as_ref() - .map(|(storage_entry, offset)| { - storage_entry - .accounts - .account_matches_owners(*offset, owners) - }) - .unwrap_or(Err(MatchAccountOwnerError::UnableToLoad)) - } - } - } } pub enum LoadedAccount<'a> { @@ -956,12 +884,8 @@ pub struct AccountStorageEntry { /// storage holding the accounts pub accounts: AccountsFile, - /// Keeps track of the number of accounts stored in a specific AppendVec. - /// This is periodically checked to reuse the stores that do not have - /// any accounts in it - /// status corresponding to the storage, lets us know that - /// the append_vec, once maxed out, then emptied, can be reclaimed - count_and_status: SeqLock<(usize, AccountStorageStatus)>, + /// The number of alive accounts in this storage + count: AtomicUsize, alive_bytes: AtomicUsize, @@ -995,16 +919,17 @@ impl AccountStorageEntry { id: AccountsFileId, file_size: u64, provider: AccountsFileProvider, + storage_access: StorageAccess, ) -> Self { let tail = AccountsFile::file_name(slot, id); let path = Path::new(path).join(tail); - let accounts = provider.new_writable(path, file_size); + let accounts = provider.new_writable(path, file_size, storage_access); Self { id, slot, accounts, - count_and_status: SeqLock::new((0, AccountStorageStatus::Available)), + count: AtomicUsize::new(0), alive_bytes: AtomicUsize::new(0), zero_lamport_single_ref_offsets: RwLock::default(), obsolete_accounts: RwLock::default(), @@ -1019,14 +944,15 @@ impl AccountStorageEntry { return None; } - let count_and_status = self.count_and_status.lock_write(); self.accounts.reopen_as_readonly().map(|accounts| Self { id: self.id, slot: self.slot, - count_and_status: SeqLock::new(*count_and_status), + count: AtomicUsize::new(self.count()), alive_bytes: AtomicUsize::new(self.alive_bytes()), accounts, - zero_lamport_single_ref_offsets: RwLock::default(), + zero_lamport_single_ref_offsets: RwLock::new( + self.zero_lamport_single_ref_offsets.read().unwrap().clone(), + ), obsolete_accounts: RwLock::new(self.obsolete_accounts.read().unwrap().clone()), }) } @@ -1036,40 +962,16 @@ impl AccountStorageEntry { id, slot, accounts, - count_and_status: SeqLock::new((0, AccountStorageStatus::Available)), + count: AtomicUsize::new(0), alive_bytes: AtomicUsize::new(0), zero_lamport_single_ref_offsets: RwLock::default(), obsolete_accounts: RwLock::default(), } } - pub fn set_status(&self, mut status: AccountStorageStatus) { - let mut count_and_status = self.count_and_status.lock_write(); - - let count = count_and_status.0; - - if status == AccountStorageStatus::Full && count == 0 { - // this case arises when the append_vec is full (store_ptrs fails), - // but all accounts have already been removed from the storage - // - // the only time it's safe to call reset() on an append_vec is when - // every account has been removed - // **and** - // the append_vec has previously been completely full - // - self.accounts.reset(); - status = AccountStorageStatus::Available; - } - - *count_and_status = (count, status); - } - - pub fn status(&self) -> AccountStorageStatus { - self.count_and_status.read().1 - } - + /// Returns the number of alive accounts in this storage pub fn count(&self) -> usize { - self.count_and_status.read().0 + self.count.load(Ordering::Acquire) } pub fn alive_bytes(&self) -> usize { @@ -1128,6 +1030,20 @@ impl AccountStorageEntry { zero_lamport_single_ref_offsets.insert(offset) } + /// Insert offsets into the zero lamport single ref account offset set. + /// Return the number of new offsets that were inserted. + fn batch_insert_zero_lamport_single_ref_account_offsets(&self, offsets: &[Offset]) -> u64 { + let mut zero_lamport_single_ref_offsets = + self.zero_lamport_single_ref_offsets.write().unwrap(); + let mut count = 0; + for offset in offsets { + if zero_lamport_single_ref_offsets.insert(*offset) { + count += 1; + } + } + count + } + /// Return the number of zero_lamport_single_ref accounts in the storage. fn num_zero_lamport_single_ref_accounts(&self) -> usize { self.zero_lamport_single_ref_offsets.read().unwrap().len() @@ -1141,10 +1057,12 @@ impl AccountStorageEntry { self.alive_bytes().saturating_sub(zero_lamport_dead_bytes) } + /// Returns the number of bytes used in this storage pub fn written_bytes(&self) -> u64 { self.accounts.len() as u64 } + /// Returns the number of bytes, not accounts, this storage can hold pub fn capacity(&self) -> u64 { self.accounts.capacity() } @@ -1166,45 +1084,28 @@ impl AccountStorageEntry { } fn add_accounts(&self, num_accounts: usize, num_bytes: usize) { - let mut count_and_status = self.count_and_status.lock_write(); - *count_and_status = (count_and_status.0 + num_accounts, count_and_status.1); + self.count.fetch_add(num_accounts, Ordering::Release); self.alive_bytes.fetch_add(num_bytes, Ordering::Release); } - /// returns # of accounts remaining in the storage + /// Removes `num_bytes` and `num_accounts` from the storage, + /// and returns the remaining number of accounts. fn remove_accounts(&self, num_bytes: usize, num_accounts: usize) -> usize { - let mut count_and_status = self.count_and_status.lock_write(); - let (mut count, mut status) = *count_and_status; - - if count == num_accounts && status == AccountStorageStatus::Full { - // this case arises when we remove the last account from the - // storage, but we've learned from previous write attempts that - // the storage is full - // - // the only time it's safe to call reset() on an append_vec is when - // every account has been removed - // **and** - // the append_vec has previously been completely full - // - // otherwise, the storage may be in flight with a store() - // call - self.accounts.reset(); - status = AccountStorageStatus::Available; - } + let prev_alive_bytes = self.alive_bytes.fetch_sub(num_bytes, Ordering::Release); + let prev_count = self.count.fetch_sub(num_accounts, Ordering::Release); - // Some code path is removing accounts too many; this may result in an - // unintended reveal of old state for unrelated accounts. + // enforce invariant that we're not removing too many bytes or accounts assert!( - count >= num_accounts, - "double remove of account in slot: {}/store: {}!!", - self.slot(), - self.id(), + num_bytes <= prev_alive_bytes && num_accounts <= prev_count, + "Too many bytes or accounts removed from storage! slot: {}, id: {}, initial alive \ + bytes: {prev_alive_bytes}, initial num accounts: {prev_count}, num bytes removed: \ + {num_bytes}, num accounts removed: {num_accounts}", + self.slot, + self.id, ); - self.alive_bytes.fetch_sub(num_bytes, Ordering::Release); - count = count.saturating_sub(num_accounts); - *count_and_status = (count, status); - count + // SAFETY: subtraction is safe since we just asserted num_accounts <= prev_num_accounts + prev_count - num_accounts } /// Returns the path to the underlying accounts storage file @@ -1231,13 +1132,23 @@ pub fn get_temp_accounts_paths(count: u32) -> io::Result<(Vec, Vec, - ref_count: u64, + ref_count: RefCount, /// Indicates if this account might have a zero lamport index entry. /// If false, the account *shall* not have zero lamport index entries. /// If true, the account *might* have zero lamport index entries. might_contain_zero_lamport_entry: bool, } +/// Indicates when to mark accounts obsolete +/// * Disabled - do not mark accounts obsolete +/// * Enabled - mark accounts obsolete during write cache flush +#[derive(Default, Debug, Clone, Copy, PartialEq, Eq)] +pub enum MarkObsoleteAccounts { + #[default] + Disabled, + Enabled, +} + /// This is the return type of AccountsDb::construct_candidate_clean_keys. /// It's a collection of pubkeys with associated information to /// facilitate the decision making about which accounts can be removed @@ -1305,12 +1216,10 @@ pub struct AccountsDb { /// Starting file size of appendvecs file_size: u64, - /// Foreground thread pool used for par_iter - pub thread_pool: ThreadPool, - /// Thread pool for AccountsBackgroundServices - pub thread_pool_clean: ThreadPool, - // number of threads to use for accounts hash verify at startup - pub num_hash_threads: Option, + /// Thread pool for foreground tasks, e.g. transaction processing + pub thread_pool_foreground: ThreadPool, + /// Thread pool for background tasks, e.g. AccountsBackgroundService and flush/clean/shrink + pub thread_pool_background: ThreadPool, pub stats: AccountsStats, @@ -1360,8 +1269,6 @@ pub struct AccountsDb { pub(crate) active_stats: ActiveStats, - pub verify_accounts_hash_in_bg: VerifyAccountsHashInBackground, - /// Used to disable logging dead slots during removal. /// allow disabling noisy log pub log_dead_slots: AtomicBool, @@ -1396,29 +1303,13 @@ pub struct AccountsDb { /// Flag to indicate if the experimental obsolete account tracking feature is enabled. /// This feature tracks obsolete accounts in the account storage entry allowing /// for earlier cleaning of obsolete accounts in the storages and index. - pub mark_obsolete_accounts: bool, + pub mark_obsolete_accounts: MarkObsoleteAccounts, } pub fn quarter_thread_count() -> usize { std::cmp::max(2, num_cpus::get() / 4) } -pub fn make_min_priority_thread_pool() -> ThreadPool { - // Use lower thread count to reduce priority. - let num_threads = quarter_thread_count(); - rayon::ThreadPoolBuilder::new() - .thread_name(|i| format!("solAccountsLo{i:02}")) - .num_threads(num_threads) - .build() - .unwrap() -} - -/// Returns the default number of threads to use for background accounts hashing -pub fn default_num_hash_threads() -> NonZeroUsize { - // 1/8 of the number of cpus and up to 6 threads gives good balance for the system. - let num_threads = (num_cpus::get() / 8).clamp(2, 6); - NonZeroUsize::new(num_threads).unwrap() -} pub fn default_num_foreground_threads() -> usize { get_thread_count() } @@ -1431,7 +1322,7 @@ impl solana_frozen_abi::abi_example::AbiExample for AccountsDb { let some_data_len = 5; let some_slot: Slot = 0; let account = AccountSharedData::new(1, some_data_len, &key); - accounts_db.store_for_tests(some_slot, &[(&key, &account)]); + accounts_db.store_for_tests((some_slot, [(&key, &account)].as_slice())); accounts_db.add_root_and_flush_write_cache(0); accounts_db } @@ -1440,52 +1331,25 @@ impl solana_frozen_abi::abi_example::AbiExample for AccountsDb { impl AccountsDb { // The default high and low watermark sizes for the accounts read cache. // If the cache size exceeds MAX_SIZE_HI, it'll evict entries until the size is <= MAX_SIZE_LO. + // + // These default values were chosen empirically to minimize evictions on mainnet-beta. + // As of 2025-08-15 on mainnet-beta, the read cache size's steady state is around 2.5 GB, + // and add a bit more to buffer future growth. #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] - const DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_LO: usize = 400 * 1024 * 1024; + const DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_LO: usize = 3_000_000_000; #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] - const DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_HI: usize = 410 * 1024 * 1024; + const DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_HI: usize = 3_100_000_000; // See AccountsDbConfig::read_cache_evict_sample_size. #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] const DEFAULT_READ_ONLY_CACHE_EVICT_SAMPLE_SIZE: usize = 8; - pub fn default_for_tests() -> Self { - Self::new_single_for_tests() - } - - pub fn new_single_for_tests() -> Self { - AccountsDb::new_for_tests(Vec::new()) - } - - pub fn new_single_for_tests_with_provider(file_provider: AccountsFileProvider) -> Self { - AccountsDb::new_for_tests_with_provider(Vec::new(), file_provider) - } - - pub fn new_for_tests(paths: Vec) -> Self { - Self::new_for_tests_with_provider(paths, AccountsFileProvider::default()) - } - - fn new_for_tests_with_provider( - paths: Vec, - accounts_file_provider: AccountsFileProvider, - ) -> Self { - let mut db = AccountsDb::new_with_config( - paths, - Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), - None, - Arc::default(), - ); - db.accounts_file_provider = accounts_file_provider; - db - } - pub fn new_with_config( paths: Vec, - accounts_db_config: Option, + accounts_db_config: AccountsDbConfig, accounts_update_notifier: Option, exit: Arc, ) -> Self { - let accounts_db_config = accounts_db_config.unwrap_or_default(); let accounts_index_config = accounts_db_config.index.unwrap_or_default(); let accounts_index = AccountsIndex::new(&accounts_index_config, exit); @@ -1528,20 +1392,20 @@ impl AccountsDb { .num_foreground_threads .map(Into::into) .unwrap_or_else(default_num_foreground_threads); - let thread_pool = rayon::ThreadPoolBuilder::new() + let thread_pool_foreground = rayon::ThreadPoolBuilder::new() .num_threads(num_foreground_threads) - .thread_name(|i| format!("solAccounts{i:02}")) + .thread_name(|i| format!("solAcctsDbFg{i:02}")) .stack_size(ACCOUNTS_STACK_SIZE) .build() .expect("new rayon threadpool"); - let num_clean_threads = accounts_db_config - .num_clean_threads + let num_background_threads = accounts_db_config + .num_background_threads .map(Into::into) .unwrap_or_else(quarter_thread_count); - let thread_pool_clean = rayon::ThreadPoolBuilder::new() - .thread_name(|i| format!("solAccountsLo{i:02}")) - .num_threads(num_clean_threads) + let thread_pool_background = rayon::ThreadPoolBuilder::new() + .thread_name(|i| format!("solAcctsDbBg{i:02}")) + .num_threads(num_background_threads) .build() .expect("new rayon threadpool"); @@ -1575,10 +1439,8 @@ impl AccountsDb { exhaustively_verify_refcounts: accounts_db_config.exhaustively_verify_refcounts, storage_access: accounts_db_config.storage_access, scan_filter_for_shrinking: accounts_db_config.scan_filter_for_shrinking, - thread_pool, - thread_pool_clean, - num_hash_threads: accounts_db_config.num_hash_threads, - verify_accounts_hash_in_bg: VerifyAccountsHashInBackground::default(), + thread_pool_foreground, + thread_pool_background, active_stats: ActiveStats::default(), storage: AccountStorage::default(), accounts_cache: AccountsCache::default(), @@ -1645,6 +1507,7 @@ impl AccountsDb { self.next_id(), size, self.accounts_file_provider, + self.storage_access, ) } @@ -1658,10 +1521,10 @@ impl AccountsDb { ancient_account_cleans: &AtomicU64, epoch_schedule: &EpochSchedule, pubkeys_removed_from_accounts_index: &Mutex, - ) -> SlotList { + ) -> ReclaimsSlotList { let one_epoch_old = self.get_oldest_non_ancient_slot(epoch_schedule); let mut clean_rooted = Measure::start("clean_old_root-ms"); - let mut reclaims = Vec::new(); + let mut reclaims = ReclaimsSlotList::new(); let removed_from_index = self.accounts_index.clean_rooted_entries( pubkey, &mut reclaims, @@ -1696,20 +1559,19 @@ impl AccountsDb { reclaims: &SlotList, pubkeys_removed_from_accounts_index: &HashSet, ) -> ReclaimResult { - let mut measure = Measure::start("clean_old_root_reclaims"); - - let reclaim_result = self.handle_reclaims( - (!reclaims.is_empty()).then(|| reclaims.iter()), + if reclaims.is_empty() { + return ReclaimResult::default(); + } + let (reclaim_result, reclaim_us) = measure_us!(self.handle_reclaims( + reclaims.iter(), None, pubkeys_removed_from_accounts_index, HandleReclaims::ProcessDeadSlots(&self.clean_accounts_stats.purge_stats), MarkAccountsObsolete::No, - ); - measure.stop(); - debug!("{measure}"); + )); self.clean_accounts_stats .clean_old_root_reclaim_us - .fetch_add(measure.as_us(), Ordering::Relaxed); + .fetch_add(reclaim_us, Ordering::Relaxed); reclaim_result } @@ -1818,26 +1680,30 @@ impl AccountsDb { } #[must_use] - pub fn purge_keys_exact<'a, C>( - &'a self, - pubkey_to_slot_set: impl Iterator, - ) -> (Vec<(Slot, AccountInfo)>, PubkeysRemovedFromAccountsIndex) + pub fn purge_keys_exact( + &self, + pubkey_to_slot_set: impl IntoIterator, + ) -> ( + ReclaimsSlotList, + PubkeysRemovedFromAccountsIndex, + ) where - C: Contains<'a, Slot> + 'a, + C: for<'a> Contains<'a, Slot>, { - let mut reclaims = Vec::new(); + let mut reclaims = ReclaimsSlotList::new(); let mut dead_keys = Vec::new(); let mut purge_exact_count = 0; - let (_, purge_exact_us) = measure_us!(for (pubkey, slots_set) in pubkey_to_slot_set { - purge_exact_count += 1; - let is_empty = self - .accounts_index - .purge_exact(pubkey, slots_set, &mut reclaims); - if is_empty { - dead_keys.push(pubkey); - } - }); + let (_, purge_exact_us) = + measure_us!(for (pubkey, slots_set) in pubkey_to_slot_set.into_iter() { + purge_exact_count += 1; + let is_empty = self + .accounts_index + .purge_exact(&pubkey, slots_set, &mut reclaims); + if is_empty { + dead_keys.push(pubkey); + } + }); let (pubkeys_removed_from_accounts_index, handle_dead_keys_us) = measure_us!(self .accounts_index @@ -2050,7 +1916,7 @@ impl AccountsDb { // Free to consume all the cores during startup dirty_store_routine(); } else { - self.thread_pool_clean.install(|| { + self.thread_pool_background.install(|| { dirty_store_routine(); }); } @@ -2098,11 +1964,6 @@ impl AccountsDb { (candidates, min_dirty_slot) } - /// Call clean_accounts() with the common parameters that tests/benches use. - pub fn clean_accounts_for_tests(&self) { - self.clean_accounts(None, false, &EpochSchedule::default()) - } - /// called with cli argument to verify refcounts are correct on all accounts /// this is very slow /// this function will call Rayon par_iter, so you will want to have thread pool installed if @@ -2112,16 +1973,16 @@ impl AccountsDb { max_slot_inclusive.unwrap_or_else(|| self.accounts_index.max_root_inclusive()); info!("exhaustively verifying refcounts as of slot: {max_slot_inclusive}"); let pubkey_refcount = DashMap::>::default(); - let slots = self.storage.all_slots(); + let mut storages = self.storage.all_storages(); + storages.retain(|s| s.slot() <= max_slot_inclusive); // populate - slots.into_par_iter().for_each(|slot| { - if slot > max_slot_inclusive { - return; - } - if let Some(storage) = self.storage.get_slot_storage_entry(slot) { + storages.par_iter().for_each_init( + || Box::new(append_vec::new_scan_accounts_reader()), + |reader, storage| { + let slot = storage.slot(); storage .accounts - .scan_accounts(|_offset, account| { + .scan_accounts(reader.as_mut(), |_offset, account| { let pk = account.pubkey(); match pubkey_refcount.entry(*pk) { dashmap::mapref::entry::Entry::Occupied(mut occupied_entry) => { @@ -2134,9 +1995,9 @@ impl AccountsDb { } } }) - .expect("must scan accounts storage"); - } - }); + .expect("must scan accounts storage") + }, + ); let total = pubkey_refcount.len(); let failed = AtomicBool::default(); let threads = quarter_thread_count(); @@ -2218,8 +2079,8 @@ impl AccountsDb { if is_startup { self.exhaustively_verify_refcounts(max_clean_root_inclusive); } else { - // otherwise, use the cleaning thread pool - self.thread_pool_clean + // otherwise, use the background thread pool + self.thread_pool_background .install(|| self.exhaustively_verify_refcounts(max_clean_root_inclusive)); } } @@ -2253,7 +2114,7 @@ impl AccountsDb { let not_found_on_fork_accum = AtomicU64::new(0); let missing_accum = AtomicU64::new(0); let useful_accum = AtomicU64::new(0); - let reclaims: SlotList = Vec::with_capacity(num_candidates as usize); + let reclaims: SlotList = SlotList::with_capacity(num_candidates as usize); let reclaims = Mutex::new(reclaims); let pubkeys_removed_from_accounts_index: PubkeysRemovedFromAccountsIndex = HashSet::new(); let pubkeys_removed_from_accounts_index = Mutex::new(pubkeys_removed_from_accounts_index); @@ -2371,7 +2232,7 @@ impl AccountsDb { if is_startup { do_clean_scan(); } else { - self.thread_pool_clean.install(do_clean_scan); + self.thread_pool_background.install(do_clean_scan); } accounts_scan.stop(); drop(active_guard); @@ -2486,13 +2347,13 @@ impl AccountsDb { let mut reclaims_time = Measure::start("reclaims"); // Recalculate reclaims with new purge set let mut pubkey_to_slot_set = Vec::new(); - for candidates_bin in candidates.iter() { + for candidates_bin in candidates { let mut bin_set = candidates_bin - .iter() + .into_iter() .filter_map(|(pubkey, cleaning_info)| { - let slot_list = &cleaning_info.slot_list; + let slot_list = cleaning_info.slot_list; (!slot_list.is_empty()).then_some(( - *pubkey, + pubkey, slot_list .iter() .map(|(slot, _)| *slot) @@ -2504,16 +2365,18 @@ impl AccountsDb { } let (reclaims, pubkeys_removed_from_accounts_index2) = - self.purge_keys_exact(pubkey_to_slot_set.iter()); + self.purge_keys_exact(pubkey_to_slot_set); pubkeys_removed_from_accounts_index.extend(pubkeys_removed_from_accounts_index2); - self.handle_reclaims( - (!reclaims.is_empty()).then(|| reclaims.iter()), - None, - &pubkeys_removed_from_accounts_index, - HandleReclaims::ProcessDeadSlots(&self.clean_accounts_stats.purge_stats), - MarkAccountsObsolete::No, - ); + if !reclaims.is_empty() { + self.handle_reclaims( + reclaims.iter(), + None, + &pubkeys_removed_from_accounts_index, + HandleReclaims::ProcessDeadSlots(&self.clean_accounts_stats.purge_stats), + MarkAccountsObsolete::No, + ); + } reclaims_time.stop(); drop(active_guard); @@ -2694,7 +2557,7 @@ impl AccountsDb { /// It must be unrefed and removed to avoid double counting or missed counting in shrink fn handle_reclaims<'a, I>( &'a self, - reclaims: Option, + reclaims: I, expected_single_dead_slot: Option, pubkeys_removed_from_accounts_index: &PubkeysRemovedFromAccountsIndex, handle_reclaims: HandleReclaims<'a>, @@ -2704,32 +2567,27 @@ impl AccountsDb { I: Iterator, { let mut reclaim_result = ReclaimResult::default(); - if let Some(reclaims) = reclaims { - let (dead_slots, reclaimed_offsets) = self.remove_dead_accounts( - reclaims, - expected_single_dead_slot, - mark_accounts_obsolete, - ); - reclaim_result.1 = reclaimed_offsets; - - if let HandleReclaims::ProcessDeadSlots(purge_stats) = handle_reclaims { - if let Some(expected_single_dead_slot) = expected_single_dead_slot { - assert!(dead_slots.len() <= 1); - if dead_slots.len() == 1 { - assert!(dead_slots.contains(&expected_single_dead_slot)); - } - } - - self.process_dead_slots( - &dead_slots, - Some(&mut reclaim_result.0), - purge_stats, - pubkeys_removed_from_accounts_index, - ); - } else { - assert!(dead_slots.is_empty()); + let (dead_slots, reclaimed_offsets) = + self.remove_dead_accounts(reclaims, expected_single_dead_slot, mark_accounts_obsolete); + reclaim_result.1 = reclaimed_offsets; + let HandleReclaims::ProcessDeadSlots(purge_stats) = handle_reclaims; + if let Some(expected_single_dead_slot) = expected_single_dead_slot { + assert!(dead_slots.len() <= 1); + if dead_slots.len() == 1 { + assert!(dead_slots.contains(&expected_single_dead_slot)); } } + // if we are marking accounts obsolete, then any dead slots have already been cleaned + let clean_stored_dead_slots = + !matches!(mark_accounts_obsolete, MarkAccountsObsolete::Yes(_)); + + self.process_dead_slots( + &dead_slots, + Some(&mut reclaim_result.0), + purge_stats, + pubkeys_removed_from_accounts_index, + clean_stored_dead_slots, + ); reclaim_result } @@ -2820,22 +2678,34 @@ impl AccountsDb { // supported pipelines in AccountsDb /// pubkeys_removed_from_accounts_index - These keys have already been removed from the accounts index /// and should not be unref'd. If they exist in the accounts index, they are NEW. + /// clean_stored_dead_slots - clean_stored_dead_slots iterates through all the pubkeys in the dead + /// slots and unrefs them in the acocunts index if they are not present in + /// pubkeys_removed_from_accounts_index. Skipping clean is the equivilent to + /// pubkeys_removed_from_accounts_index containing all the pubkeys in the dead slots fn process_dead_slots( &self, dead_slots: &IntSet, purged_account_slots: Option<&mut AccountSlots>, purge_stats: &PurgeStats, pubkeys_removed_from_accounts_index: &PubkeysRemovedFromAccountsIndex, + clean_stored_dead_slots: bool, ) { if dead_slots.is_empty() { return; } let mut clean_dead_slots = Measure::start("reclaims::clean_dead_slots"); - self.clean_stored_dead_slots( - dead_slots, - purged_account_slots, - pubkeys_removed_from_accounts_index, - ); + + if clean_stored_dead_slots { + self.clean_stored_dead_slots( + dead_slots, + purged_account_slots, + pubkeys_removed_from_accounts_index, + ); + } + + // Remove dead slots from the accounts index root tracker + self.remove_dead_slots_metadata(dead_slots.iter()); + clean_dead_slots.stop(); let mut purge_removed_slots = Measure::start("reclaims::purge_removed_slots"); @@ -3059,11 +2929,6 @@ impl AccountsDb { let mut index_read_elapsed = Measure::start("index_read_elapsed"); - let len = stored_accounts.len(); - let alive_accounts_collect = Mutex::new(T::with_capacity(len, slot)); - let pubkeys_to_unref_collect = Mutex::new(Vec::with_capacity(len)); - let zero_lamport_single_ref_pubkeys_collect = Mutex::new(Vec::with_capacity(len)); - // Get a set of all obsolete offsets // Slot is not needed, as all obsolete accounts can be considered // dead for shrink. Zero lamport accounts are not marked obsolete @@ -3074,18 +2939,31 @@ impl AccountsDb { .collect(); // Filter all the accounts that are marked obsolete - let initial_len = stored_accounts.len(); + let total_starting_accounts = stored_accounts.len(); stored_accounts.retain(|account| !obsolete_offsets.contains(&account.index_info.offset())); - let obsolete_accounts_filtered = initial_len - stored_accounts.len(); + + let len = stored_accounts.len(); + let shrink_collect = Mutex::new(ShrinkCollect { + slot, + capacity: *capacity, + pubkeys_to_unref: Vec::with_capacity(len), + zero_lamport_single_ref_pubkeys: Vec::new(), + alive_accounts: T::with_capacity(len, slot), + total_starting_accounts, + all_are_zero_lamports: true, + alive_total_bytes: 0, // will be updated after `alive_accounts` is populated + }); stats .accounts_loaded .fetch_add(len as u64, Ordering::Relaxed); + stats + .obsolete_accounts_filtered + .fetch_add((total_starting_accounts - len) as u64, Ordering::Relaxed); stats .num_duplicated_accounts .fetch_add(*num_duplicated_accounts as u64, Ordering::Relaxed); - let all_are_zero_lamports_collect = Mutex::new(true); - self.thread_pool_clean.install(|| { + self.thread_pool_background.install(|| { stored_accounts .par_chunks(SHRINK_COLLECT_CHUNK_SIZE) .for_each(|stored_accounts| { @@ -3097,45 +2975,34 @@ impl AccountsDb { } = self.load_accounts_index_for_shrink(stored_accounts, stats, slot); // collect - alive_accounts_collect - .lock() - .unwrap() - .collect(alive_accounts); - pubkeys_to_unref_collect - .lock() - .unwrap() + let mut shrink_collect = shrink_collect.lock().unwrap(); + shrink_collect.alive_accounts.collect(alive_accounts); + shrink_collect + .pubkeys_to_unref .append(&mut pubkeys_to_unref); - zero_lamport_single_ref_pubkeys_collect - .lock() - .unwrap() + shrink_collect + .zero_lamport_single_ref_pubkeys .append(&mut zero_lamport_single_ref_pubkeys); if !all_are_zero_lamports { - *all_are_zero_lamports_collect.lock().unwrap() = false; + shrink_collect.all_are_zero_lamports = false; } }); }); - let alive_accounts = alive_accounts_collect.into_inner().unwrap(); - let pubkeys_to_unref = pubkeys_to_unref_collect.into_inner().unwrap(); - let zero_lamport_single_ref_pubkeys = zero_lamport_single_ref_pubkeys_collect - .into_inner() - .unwrap(); - index_read_elapsed.stop(); - stats - .obsolete_accounts_filtered - .fetch_add(obsolete_accounts_filtered as u64, Ordering::Relaxed); + let mut shrink_collect = shrink_collect.into_inner().unwrap(); + let alive_total_bytes = shrink_collect.alive_accounts.alive_bytes(); + shrink_collect.alive_total_bytes = alive_total_bytes; stats .index_read_elapsed .fetch_add(index_read_elapsed.as_us(), Ordering::Relaxed); - let alive_total_bytes = alive_accounts.alive_bytes(); - - stats - .accounts_removed - .fetch_add(len - alive_accounts.len(), Ordering::Relaxed); + stats.accounts_removed.fetch_add( + total_starting_accounts - shrink_collect.alive_accounts.len(), + Ordering::Relaxed, + ); stats.bytes_removed.fetch_add( capacity.saturating_sub(alive_total_bytes as u64), Ordering::Relaxed, @@ -3144,16 +3011,7 @@ impl AccountsDb { .bytes_written .fetch_add(alive_total_bytes as u64, Ordering::Relaxed); - ShrinkCollect { - slot, - capacity: *capacity, - pubkeys_to_unref, - zero_lamport_single_ref_pubkeys, - alive_accounts, - alive_total_bytes, - total_starting_accounts: len, - all_are_zero_lamports: all_are_zero_lamports_collect.into_inner().unwrap(), - } + shrink_collect } /// These accounts were found during shrink of `slot` to be slot_list=[slot] and ref_count == 1 and lamports = 0. @@ -3194,7 +3052,7 @@ impl AccountsDb { ); zero_lamport_single_ref_pubkeys.iter().for_each(|k| { - _ = self.purge_keys_exact([&(**k, slot)].into_iter()); + _ = self.purge_keys_exact([(**k, slot)]); }); } @@ -3799,7 +3657,7 @@ impl AccountsDb { let num_selected = shrink_slots.len(); let (_, shrink_all_us) = measure_us!({ - self.thread_pool_clean.install(|| { + self.thread_pool_background.install(|| { shrink_slots .into_par_iter() .for_each(|(slot, slot_shrink_candidate)| { @@ -3922,29 +3780,6 @@ impl AccountsDb { Ok(()) } - #[cfg(feature = "dev-context-only-utils")] - pub fn unchecked_scan_accounts( - &self, - metric_name: &'static str, - ancestors: &Ancestors, - mut scan_func: F, - config: &ScanConfig, - ) where - F: FnMut(&Pubkey, LoadedAccount, Slot), - { - self.accounts_index.unchecked_scan_accounts( - metric_name, - ancestors, - |pubkey, (account_info, slot)| { - self.get_account_accessor(slot, pubkey, &account_info.storage_location()) - .get_loaded_account(|loaded_account| { - scan_func(pubkey, loaded_account, slot); - }); - }, - config, - ); - } - pub fn index_scan_accounts( &self, ancestors: &Ancestors, @@ -4010,7 +3845,8 @@ impl AccountsDb { }) } ScanAccountStorageData::DataRefForStorage => { - storage.scan_accounts(|_offset, account| { + let mut reader = append_vec::new_scan_accounts_reader(); + storage.scan_accounts(&mut reader, |_offset, account| { let account_without_data = StoredAccountInfoWithoutData::new_from(&account); storage_scan_func(retval, &account_without_data, Some(account.data)); }) @@ -4035,7 +3871,7 @@ impl AccountsDb { // If we see the slot in the cache, then all the account information // is in this cached slot if slot_cache.len() > SCAN_SLOT_PAR_ITER_THRESHOLD { - ScanStorageResult::Cached(self.thread_pool.install(|| { + ScanStorageResult::Cached(self.thread_pool_foreground.install(|| { slot_cache .par_iter() .filter_map(|cached_account| { @@ -4091,47 +3927,6 @@ impl AccountsDb { self.do_load(ancestors, pubkey, None, load_hint, LoadZeroLamports::None) } - /// Return Ok(index_of_matching_owner) if the account owner at `offset` is one of the pubkeys in `owners`. - /// Return Err(MatchAccountOwnerError::NoMatch) if the account has 0 lamports or the owner is not one of - /// the pubkeys in `owners`. - /// Return Err(MatchAccountOwnerError::UnableToLoad) if the account could not be accessed. - pub fn account_matches_owners( - &self, - ancestors: &Ancestors, - account: &Pubkey, - owners: &[Pubkey], - ) -> Result { - let (slot, storage_location, _maybe_account_accesor) = self - .read_index_for_accessor_or_load_slow(ancestors, account, None, false) - .ok_or(MatchAccountOwnerError::UnableToLoad)?; - - if !storage_location.is_cached() { - let result = self.read_only_accounts_cache.load(*account, slot); - if let Some(account) = result { - return if account.is_zero_lamport() { - Err(MatchAccountOwnerError::NoMatch) - } else { - owners - .iter() - .position(|entry| account.owner() == entry) - .ok_or(MatchAccountOwnerError::NoMatch) - }; - } - } - - let (account_accessor, _slot) = self - .retry_to_get_account_accessor( - slot, - storage_location, - ancestors, - account, - None, - LoadHint::Unspecified, - ) - .ok_or(MatchAccountOwnerError::UnableToLoad)?; - account_accessor.account_matches_owners(owners) - } - /// load the account with `pubkey` into the read only accounts cache. /// The goal is to make subsequent loads (which caller expects to occur) to find the account quickly. pub fn load_account_into_read_cache(&self, ancestors: &Ancestors, pubkey: &Pubkey) { @@ -4476,18 +4271,15 @@ impl AccountsDb { /// Load account with `pubkey` and maybe put into read cache. /// - /// If the account is not already cached, invoke `should_put_in_read_cache_fn`. - /// The caller can inspect the account and indicate if it should be put into the read cache or not. - /// /// Return the account and the slot when the account was last stored. /// Return None for ZeroLamport accounts. pub fn load_account_with( &self, ancestors: &Ancestors, pubkey: &Pubkey, - should_put_in_read_cache_fn: impl Fn(&AccountSharedData) -> bool, + should_put_in_read_cache: bool, ) -> Option<(AccountSharedData, Slot)> { - let (slot, storage_location, _maybe_account_accesor) = + let (slot, storage_location, _maybe_account_accessor) = self.read_index_for_accessor_or_load_slow(ancestors, pubkey, None, false)?; // Notice the subtle `?` at previous line, we bail out pretty early if missing. @@ -4519,7 +4311,7 @@ impl AccountsDb { return None; } - if !in_write_cache && should_put_in_read_cache_fn(&account) { + if !in_write_cache && should_put_in_read_cache { /* We show this store into the read-only cache for account 'A' and future loads of 'A' from the read-only cache are safe/reflect 'A''s latest state on this fork. @@ -4554,7 +4346,7 @@ impl AccountsDb { let starting_max_root = self.accounts_index.max_root_inclusive(); - let (slot, storage_location, _maybe_account_accesor) = + let (slot, storage_location, _maybe_account_accessor) = self.read_index_for_accessor_or_load_slow(ancestors, pubkey, max_root, false)?; // Notice the subtle `?` at previous line, we bail out pretty early if missing. @@ -4621,8 +4413,8 @@ impl AccountsDb { if starting_max_root != ending_max_root { warn!( "do_load_with_populate_read_cache() scanning pubkey {pubkey} called with \ - fixed max root, but max root changed from {starting_max_root} to \ - {ending_max_root} during function call" + fixed max root, but max root changed from {starting_max_root} to \ + {ending_max_root} during function call" ); } } @@ -4650,16 +4442,6 @@ impl AccountsDb { } } - fn has_space_available(&self, slot: Slot, size: u64) -> bool { - let store = self.storage.get_slot_storage_entry(slot).unwrap(); - if store.status() == AccountStorageStatus::Available - && store.accounts.remaining_bytes() >= size - { - return true; - } - false - } - fn create_store( &self, slot: Slot, @@ -4860,17 +4642,14 @@ impl AccountsDb { } fn purge_slot_cache(&self, purged_slot: Slot, slot_cache: &SlotCache) { - let pubkey_to_slot_set: Vec<(Pubkey, Slot)> = slot_cache - .iter() - .map(|account| (*account.key(), purged_slot)) - .collect(); - self.purge_slot_cache_pubkeys(purged_slot, pubkey_to_slot_set, true); + let pubkeys = slot_cache.iter().map(|account| *account.key()); + self.purge_slot_cache_pubkeys(purged_slot, pubkeys, true); } fn purge_slot_cache_pubkeys( &self, purged_slot: Slot, - pubkey_to_slot_set: Vec<(Pubkey, Slot)>, + pubkeys: impl IntoIterator, is_dead: bool, ) { // Slot purged from cache should not exist in the backing store @@ -4878,8 +4657,11 @@ impl AccountsDb { .storage .get_slot_storage_entry_shrinking_in_progress_ok(purged_slot) .is_none()); - let num_purged_keys = pubkey_to_slot_set.len(); - let (reclaims, _) = self.purge_keys_exact(pubkey_to_slot_set.iter()); + let mut num_purged_keys = 0; + let (reclaims, _) = self.purge_keys_exact(pubkeys.into_iter().map(|key| { + num_purged_keys += 1; + (key, purged_slot) + })); assert_eq!(reclaims.len(), num_purged_keys); if is_dead { self.remove_dead_slots_metadata(std::iter::once(&purged_slot)); @@ -4913,8 +4695,7 @@ impl AccountsDb { let mut purge_accounts_index_elapsed = Measure::start("purge_accounts_index_elapsed"); // Purge this slot from the accounts index - let (reclaims, pubkeys_removed_from_accounts_index) = - self.purge_keys_exact(stored_keys.iter()); + let (reclaims, pubkeys_removed_from_accounts_index) = self.purge_keys_exact(stored_keys); purge_accounts_index_elapsed.stop(); purge_stats .purge_accounts_index_elapsed @@ -4926,13 +4707,15 @@ impl AccountsDb { // Slot should be dead after removing all its account entries // There is no reason to mark accounts obsolete as the slot storage is being purged let expected_dead_slot = Some(remove_slot); - self.handle_reclaims( - (!reclaims.is_empty()).then(|| reclaims.iter()), - expected_dead_slot, - &pubkeys_removed_from_accounts_index, - HandleReclaims::ProcessDeadSlots(purge_stats), - MarkAccountsObsolete::No, - ); + if !reclaims.is_empty() { + self.handle_reclaims( + reclaims.iter(), + expected_dead_slot, + &pubkeys_removed_from_accounts_index, + HandleReclaims::ProcessDeadSlots(purge_stats), + MarkAccountsObsolete::No, + ); + } handle_reclaims_elapsed.stop(); purge_stats .handle_reclaims_elapsed @@ -5101,78 +4884,13 @@ impl AccountsDb { hasher } - fn write_accounts_to_storage<'a>( - &self, - slot: Slot, - storage: &AccountStorageEntry, - accounts_and_meta_to_store: &impl StorableAccounts<'a>, - ) -> Vec { - let mut infos: Vec = Vec::with_capacity(accounts_and_meta_to_store.len()); - let mut total_append_accounts_us = 0; - while infos.len() < accounts_and_meta_to_store.len() { - let mut append_accounts = Measure::start("append_accounts"); - let stored_accounts_info = storage - .accounts - .write_accounts(accounts_and_meta_to_store, infos.len()); - append_accounts.stop(); - total_append_accounts_us += append_accounts.as_us(); - let Some(stored_accounts_info) = stored_accounts_info else { - storage.set_status(AccountStorageStatus::Full); - - // See if an account overflows the append vecs in the slot. - let data_len = accounts_and_meta_to_store.data_len(infos.len()); - let data_len = (data_len + STORE_META_OVERHEAD) as u64; - if !self.has_space_available(slot, data_len) { - info!( - "write_accounts_to_storage, no space: {}, {}, {}, {}, {}", - storage.accounts.capacity(), - storage.accounts.remaining_bytes(), - data_len, - infos.len(), - accounts_and_meta_to_store.len() - ); - let special_store_size = std::cmp::max(data_len * 2, self.file_size); - self.create_and_insert_store(slot, special_store_size, "large create"); - } - continue; - }; - - let store_id = storage.id(); - for (i, offset) in stored_accounts_info.offsets.iter().enumerate() { - infos.push(AccountInfo::new( - StorageLocation::AppendVec(store_id, *offset), - accounts_and_meta_to_store.is_zero_lamport(i), - )); - } - storage.add_accounts( - stored_accounts_info.offsets.len(), - stored_accounts_info.size, - ); - - // restore the state to available - storage.set_status(AccountStorageStatus::Available); - } - - self.stats - .store_append_accounts - .fetch_add(total_append_accounts_us, Ordering::Relaxed); - - infos - } - - pub fn mark_slot_frozen(&self, slot: Slot) { - if let Some(slot_cache) = self.accounts_cache.slot_cache(slot) { - slot_cache.mark_slot_frozen(); - slot_cache.report_slot_store_metrics(); - } - self.accounts_cache.report_size(); - } - - // These functions/fields are only usable from a dev context (i.e. tests and benches) - #[cfg(feature = "dev-context-only-utils")] - pub fn flush_accounts_cache_slot_for_tests(&self, slot: Slot) { - self.flush_slot_cache(slot); - } + pub fn mark_slot_frozen(&self, slot: Slot) { + if let Some(slot_cache) = self.accounts_cache.slot_cache(slot) { + slot_cache.mark_slot_frozen(); + slot_cache.report_slot_store_metrics(); + } + self.accounts_cache.report_size(); + } /// true if write cache is too big fn should_aggressively_flush_cache(&self) -> bool { @@ -5380,7 +5098,7 @@ impl AccountsDb { ) -> FlushStats { let mut flush_stats = FlushStats::default(); let iter_items: Vec<_> = slot_cache.iter().collect(); - let mut pubkey_to_slot_set: Vec<(Pubkey, Slot)> = vec![]; + let mut pubkeys: Vec = vec![]; if should_flush_f.is_some() { if let Some(max_clean_root) = max_clean_root { if slot > max_clean_root { @@ -5409,7 +5127,7 @@ impl AccountsDb { } else { // If we don't flush, we have to remove the entry from the // index, since it's equivalent to purging - pubkey_to_slot_set.push((*key, slot)); + pubkeys.push(*key); flush_stats.num_bytes_purged += aligned_stored_size(account.data().len()) as u64; flush_stats.num_accounts_purged += 1; @@ -5421,7 +5139,7 @@ impl AccountsDb { let is_dead_slot = accounts.is_empty(); // Remove the account index entries from earlier roots that are outdated by later roots. // Safe because queries to the index will be reading updates from later roots. - self.purge_slot_cache_pubkeys(slot, pubkey_to_slot_set, is_dead_slot); + self.purge_slot_cache_pubkeys(slot, pubkeys, is_dead_slot); if !is_dead_slot { // This ensures that all updates are written to an AppendVec, before any @@ -5432,10 +5150,25 @@ impl AccountsDb { flush_stats.num_bytes_flushed.0, "flush_slot_cache", ); + + // Use ReclaimOldSlots to reclaim old slots if marking obsolete accounts and cleaning + // Cleaning is enabled if `should_flush_f` is Some. + // should_flush_f is set to None when + // 1) There's an ongoing scan to avoid reclaiming accounts being scanned. + // 2) The slot is > max_clean_root to prevent unrooted slots from reclaiming rooted versions. + let reclaim_method = if self.mark_obsolete_accounts == MarkObsoleteAccounts::Enabled + && should_flush_f.is_some() + { + UpsertReclaim::ReclaimOldSlots + } else { + UpsertReclaim::IgnoreReclaims + }; + let (store_accounts_timing_inner, store_accounts_total_inner_us) = measure_us!(self - .store_accounts_frozen( + ._store_accounts_frozen( (slot, &accounts[..]), &flushed_store, + reclaim_method, UpdateIndexThreadSelection::PoolWithThreshold, )); flush_stats.store_accounts_timing = store_accounts_timing_inner; @@ -5458,7 +5191,7 @@ impl AccountsDb { self.uncleaned_pubkeys .entry(slot) .or_default() - .extend(accounts.iter().map(|(pubkey, _account)| **pubkey)); + .extend(accounts.into_iter().map(|(pubkey, _account)| *pubkey)); flush_stats } @@ -5519,44 +5252,6 @@ impl AccountsDb { } } - fn write_accounts_to_cache<'a, 'b>( - &self, - slot: Slot, - accounts_and_meta_to_store: &impl StorableAccounts<'b>, - txs: Option<&[&SanitizedTransaction]>, - ) -> Vec { - let mut current_write_version = if self.accounts_update_notifier.is_some() { - self.write_version - .fetch_add(accounts_and_meta_to_store.len() as u64, Ordering::AcqRel) - } else { - 0 - }; - - (0..accounts_and_meta_to_store.len()) - .map(|index| { - let txn = txs.map(|txs| *txs.get(index).expect("txs must be present if provided")); - accounts_and_meta_to_store.account_default_if_zero_lamport(index, |account| { - let account_shared_data = account.to_account_shared_data(); - let pubkey = account.pubkey(); - let account_info = - AccountInfo::new(StorageLocation::Cached, account.is_zero_lamport()); - - self.notify_account_at_accounts_update( - slot, - &account_shared_data, - &txn, - pubkey, - current_write_version, - ); - current_write_version = current_write_version.saturating_add(1); - - self.accounts_cache.store(slot, pubkey, account_shared_data); - account_info - }) - }) - .collect() - } - fn report_store_stats(&self) { let mut total_count = 0; let mut newest_slot = 0; @@ -5589,28 +5284,6 @@ impl AccountsDb { ("total_alive_bytes", total_alive_bytes, i64), ("total_alive_ratio", total_alive_ratio, f64), ); - datapoint_info!( - "accounts_db-perf-stats", - ( - "delta_hash_num", - self.stats.delta_hash_num.swap(0, Ordering::Relaxed), - i64 - ), - ( - "delta_hash_scan_us", - self.stats - .delta_hash_scan_time_total_us - .swap(0, Ordering::Relaxed), - i64 - ), - ( - "delta_hash_accumulate_us", - self.stats - .delta_hash_accumulate_time_total_us - .swap(0, Ordering::Relaxed), - i64 - ), - ); } /// Calculates the accounts lt hash @@ -5676,58 +5349,6 @@ impl AccountsDb { AccountsLtHash(lt_hash) } - /// Calculates the accounts lt hash - /// - /// Intended to be used to verify the accounts lt hash at startup. - /// - /// The `duplicates_lt_hash` is the old/duplicate accounts to mix *out* of the storages. - /// This value comes from index generation. - /// The 'startup_slot' is the slot for which the accounts_lt_hash is calculated. - pub fn calculate_accounts_lt_hash_at_startup_from_storages( - &self, - storages: &[Arc], - duplicates_lt_hash: &DuplicatesLtHash, - startup_slot: Slot, - ) -> AccountsLtHash { - // Randomized order works well with rayon work splitting, since we only care about - // uniform distribution of total work size per batch (other ordering strategies might be - // useful for optimizing disk read sizes and buffers usage in a single IO queue). - let storages = AccountStoragesOrderer::with_random_order(storages); - let mut lt_hash = storages - .par_iter() - .fold(LtHash::identity, |mut accum, storage| { - // Function is calculating the accounts_lt_hash from all accounts in the - // storages as of startup_slot. This means that any accounts marked obsolete at a - // slot newer than startup_slot should be included in the accounts_lt_hash - let obsolete_accounts = storage.get_obsolete_accounts(Some(startup_slot)); - storage - .accounts - .scan_accounts(|offset, account| { - // Obsolete accounts were not included in the original hash, so they should not be added here - if !obsolete_accounts.contains(&(offset, account.data.len())) { - let account_lt_hash = Self::lt_hash_account(&account, account.pubkey()); - accum.mix_in(&account_lt_hash.0); - } - }) - .expect("must scan accounts storage"); - accum - }) - .reduce(LtHash::identity, |mut accum, elem| { - accum.mix_in(&elem); - accum - }); - - if self.mark_obsolete_accounts { - // If `mark_obsolete_accounts` is true, then none if the duplicate accounts were - // included in the lt_hash, and do not need to be mixed out. - // The duplicates_lt_hash should be the default value. - assert_eq!(*duplicates_lt_hash, DuplicatesLtHash::default()); - } - lt_hash.mix_out(&duplicates_lt_hash.0); - - AccountsLtHash(lt_hash) - } - /// Calculates the capitalization /// /// Panics if capitalization overflows a u64. @@ -5834,6 +5455,9 @@ impl AccountsDb { } } + /// Updates the accounts index with the given `infos` and `accounts`. + /// Returns a vector of `SlotList` containing the reclaims for each batch processed. + /// The element of the returned vector is guaranteed to be non-empty. fn update_index<'a>( &self, infos: Vec, @@ -5841,12 +5465,20 @@ impl AccountsDb { reclaim: UpsertReclaim, update_index_thread_selection: UpdateIndexThreadSelection, thread_pool: &ThreadPool, - ) -> SlotList { + ) -> Vec> { let target_slot = accounts.target_slot(); let len = std::cmp::min(accounts.len(), infos.len()); + // If reclaiming old slots, ensure the target slot is a root + // Having an unrooted slot reclaim a rooted version of a slot + // could lead to index corruption if the unrooted version is + // discarded + if reclaim == UpsertReclaim::ReclaimOldSlots { + assert!(target_slot <= self.accounts_index.max_root_inclusive()); + } + let update = |start, end| { - let mut reclaims = Vec::with_capacity((end - start) / 2); + let mut reclaims = ReclaimsSlotList::with_capacity((end - start) / 2); (start..end).for_each(|i| { let info = infos[i]; @@ -5883,11 +5515,17 @@ impl AccountsDb { let end = std::cmp::min(start + chunk_size, len); update(start, end) }) - .flatten() - .collect::>() + .filter(|reclaims| !reclaims.is_empty()) + .collect() }) } else { - update(0, len) + let reclaims = update(0, len); + if reclaims.is_empty() { + // If no reclaims, return an empty vector + vec![] + } else { + vec![reclaims] + } } } @@ -6053,12 +5691,9 @@ impl AccountsDb { (dead_slots, reclaimed_offsets) } - fn remove_dead_slots_metadata<'a>( - &'a self, - dead_slots_iter: impl Iterator + Clone, - ) { + fn remove_dead_slots_metadata<'a>(&'a self, dead_slots_iter: impl Iterator) { let mut measure = Measure::start("remove_dead_slots_metadata-ms"); - self.clean_dead_slots_from_accounts_index(dead_slots_iter.clone()); + self.clean_dead_slots_from_accounts_index(dead_slots_iter); measure.stop(); inc_new_counter_info!("remove_dead_slots_metadata-ms", measure.as_ms() as usize); } @@ -6072,7 +5707,7 @@ impl AccountsDb { pubkeys_removed_from_accounts_index: &'a PubkeysRemovedFromAccountsIndex, ) { let batches = 1 + (num_pubkeys / UNREF_ACCOUNTS_BATCH_SIZE); - self.thread_pool_clean.install(|| { + self.thread_pool_background.install(|| { (0..batches).into_par_iter().for_each(|batch| { let skip = batch * UNREF_ACCOUNTS_BATCH_SIZE; self.accounts_index.scan( @@ -6135,7 +5770,7 @@ impl AccountsDb { fn clean_dead_slots_from_accounts_index<'a>( &'a self, - dead_slots_iter: impl Iterator + Clone, + dead_slots_iter: impl Iterator, ) { let mut accounts_index_root_stats = AccountsIndexRootsStats::default(); let mut measure = Measure::start("clean_dead_slot"); @@ -6188,7 +5823,7 @@ impl AccountsDb { } // get all pubkeys in all dead slots let purged_slot_pubkeys: HashSet<(Slot, Pubkey)> = { - self.thread_pool_clean.install(|| { + self.thread_pool_background.install(|| { stores .into_par_iter() .map(|store| { @@ -6230,30 +5865,15 @@ impl AccountsDb { .latest_accounts_index_roots_stats .update(&accounts_index_root_stats); - self.remove_dead_slots_metadata(dead_slots.iter()); measure.stop(); self.clean_accounts_stats .clean_stored_dead_slots_us .fetch_add(measure.as_us(), Ordering::Relaxed); } - pub fn store_cached<'a>(&self, accounts: impl StorableAccounts<'a>) { - self.store( - accounts, - None, - UpdateIndexThreadSelection::PoolWithThreshold, - ); - } - - pub(crate) fn store_cached_inline_update_index<'a>( - &self, - accounts: impl StorableAccounts<'a>, - transactions: Option<&'a [&'a SanitizedTransaction]>, - ) { - self.store(accounts, transactions, UpdateIndexThreadSelection::Inline); - } - - fn store<'a>( + /// Stores accounts in the write cache and updates the index. + /// This should only be used for accounts that are unrooted (unfrozen) + pub(crate) fn store_accounts_unfrozen<'a>( &self, accounts: impl StorableAccounts<'a>, transactions: Option<&'a [&'a SanitizedTransaction]>, @@ -6274,196 +5894,68 @@ impl AccountsDb { .store_total_data .fetch_add(total_data as u64, Ordering::Relaxed); - self.store_accounts_unfrozen(accounts, transactions, update_index_thread_selection); + // Store the accounts in the write cache + let mut store_accounts_time = Measure::start("store_accounts"); + let infos = self.write_accounts_to_cache(accounts.target_slot(), &accounts, transactions); + store_accounts_time.stop(); + self.stats + .store_accounts + .fetch_add(store_accounts_time.as_us(), Ordering::Relaxed); + + // Update the index + let mut update_index_time = Measure::start("update_index"); + + self.update_index( + infos, + &accounts, + UpsertReclaim::PreviousSlotEntryWasCached, + update_index_thread_selection, + &self.thread_pool_foreground, + ); + + update_index_time.stop(); + self.stats + .store_update_index + .fetch_add(update_index_time.as_us(), Ordering::Relaxed); + self.stats + .store_num_accounts + .fetch_add(accounts.len() as u64, Ordering::Relaxed); self.report_store_timings(); } - fn report_store_timings(&self) { - if self.stats.last_store_report.should_update(1000) { - let read_cache_stats = self.read_only_accounts_cache.get_and_reset_stats(); - datapoint_info!( - "accounts_db_store_timings", - ( - "hash_accounts", - self.stats.store_hash_accounts.swap(0, Ordering::Relaxed), - i64 - ), - ( - "store_accounts", - self.stats.store_accounts.swap(0, Ordering::Relaxed), - i64 - ), - ( - "update_index", - self.stats.store_update_index.swap(0, Ordering::Relaxed), - i64 - ), - ( - "handle_reclaims", - self.stats.store_handle_reclaims.swap(0, Ordering::Relaxed), - i64 - ), - ( - "append_accounts", - self.stats.store_append_accounts.swap(0, Ordering::Relaxed), - i64 - ), - ( - "stakes_cache_check_and_store_us", - self.stats - .stakes_cache_check_and_store_us - .swap(0, Ordering::Relaxed), - i64 - ), - ( - "num_accounts", - self.stats.store_num_accounts.swap(0, Ordering::Relaxed), - i64 - ), - ( - "total_data", - self.stats.store_total_data.swap(0, Ordering::Relaxed), - i64 - ), - ( - "read_only_accounts_cache_entries", - self.read_only_accounts_cache.cache_len(), - i64 - ), - ( - "read_only_accounts_cache_data_size", - self.read_only_accounts_cache.data_size(), - i64 - ), - ("read_only_accounts_cache_hits", read_cache_stats.hits, i64), - ( - "read_only_accounts_cache_misses", - read_cache_stats.misses, - i64 - ), - ( - "read_only_accounts_cache_evicts", - read_cache_stats.evicts, - i64 - ), - ( - "read_only_accounts_cache_load_us", - read_cache_stats.load_us, - i64 - ), - ( - "read_only_accounts_cache_store_us", - read_cache_stats.store_us, - i64 - ), - ( - "read_only_accounts_cache_evict_us", - read_cache_stats.evict_us, - i64 - ), - ( - "read_only_accounts_cache_evictor_wakeup_count_all", - read_cache_stats.evictor_wakeup_count_all, - i64 - ), - ( - "read_only_accounts_cache_evictor_wakeup_count_productive", - read_cache_stats.evictor_wakeup_count_productive, - i64 - ), - ( - "handle_dead_keys_us", - self.stats.handle_dead_keys_us.swap(0, Ordering::Relaxed), - i64 - ), - ( - "purge_exact_us", - self.stats.purge_exact_us.swap(0, Ordering::Relaxed), - i64 - ), - ( - "purge_exact_count", - self.stats.purge_exact_count.swap(0, Ordering::Relaxed), - i64 - ), - ); - - datapoint_info!( - "accounts_db_store_timings2", - ( - "create_store_count", - self.stats.create_store_count.swap(0, Ordering::Relaxed), - i64 - ), - ( - "store_get_slot_store", - self.stats.store_get_slot_store.swap(0, Ordering::Relaxed), - i64 - ), - ( - "store_find_existing", - self.stats.store_find_existing.swap(0, Ordering::Relaxed), - i64 - ), - ( - "dropped_stores", - self.stats.dropped_stores.swap(0, Ordering::Relaxed), - i64 - ), - ); - } - } - - /// Stores accounts in the write cache and updates the index. - /// This should only be used for accounts that are unrooted (unfrozen) - fn store_accounts_unfrozen<'a>( + /// Stores accounts in the storage and updates the index. + /// This function is intended for accounts that are rooted (frozen). + /// - `UpsertReclaims` is set to `IgnoreReclaims`. If the slot in `accounts` differs from the new slot, + /// accounts may be removed from the account index. In such cases, the caller must ensure that alive + /// accounts are decremented for the older storage or that the old storage is removed entirely + pub fn store_accounts_frozen<'a>( &self, accounts: impl StorableAccounts<'a>, - transactions: Option<&'a [&'a SanitizedTransaction]>, + storage: &Arc, update_index_thread_selection: UpdateIndexThreadSelection, - ) { - let slot = accounts.target_slot(); - - // Store the accounts in the write cache - let mut store_accounts_time = Measure::start("store_accounts"); - let infos = self.write_accounts_to_cache(slot, &accounts, transactions); - store_accounts_time.stop(); - self.stats - .store_accounts - .fetch_add(store_accounts_time.as_us(), Ordering::Relaxed); - - // Update the index - let mut update_index_time = Measure::start("update_index"); - - self.update_index( - infos, - &accounts, - UpsertReclaim::PreviousSlotEntryWasCached, + ) -> StoreAccountsTiming { + self._store_accounts_frozen( + accounts, + storage, + UpsertReclaim::IgnoreReclaims, update_index_thread_selection, - &self.thread_pool, - ); - - update_index_time.stop(); - self.stats - .store_update_index - .fetch_add(update_index_time.as_us(), Ordering::Relaxed); - self.stats - .store_num_accounts - .fetch_add(accounts.len() as u64, Ordering::Relaxed); + ) } /// Stores accounts in the storage and updates the index. - /// This should only be used on accounts that are rooted (frozen) - pub fn store_accounts_frozen<'a>( + /// This function is intended for accounts that are rooted (frozen). + /// - `UpsertReclaims` must be set to `IgnoreReclaims` at this time + fn _store_accounts_frozen<'a>( &self, accounts: impl StorableAccounts<'a>, storage: &Arc, + reclaim_handling: UpsertReclaim, update_index_thread_selection: UpdateIndexThreadSelection, ) -> StoreAccountsTiming { let slot = accounts.target_slot(); let mut store_accounts_time = Measure::start("store_accounts"); - // Flush the read cache if neccessary. This will occur during shrink or clean + // Flush the read cache if necessary. This will occur during shrink or clean if self.read_only_accounts_cache.can_slot_be_in_cache(slot) { (0..accounts.len()).for_each(|index| { // based on the patterns of how a validator writes accounts, it is almost always the case that there is no read only cache entry @@ -6479,13 +5971,10 @@ impl AccountsDb { self.stats .store_accounts .fetch_add(store_accounts_time.as_us(), Ordering::Relaxed); - let mut update_index_time = Measure::start("update_index"); - let reclaim = UpsertReclaim::IgnoreReclaims; + self.mark_zero_lamport_single_ref_accounts(&infos, storage, reclaim_handling); - // if we are squashing a single slot, then we can expect a single dead slot - let expected_single_dead_slot = - (!accounts.contains_multiple_slots()).then(|| accounts.target_slot()); + let mut update_index_time = Measure::start("update_index"); // If the cache was flushed, then because `update_index` occurs // after the account are stored by the above `store_accounts_to` @@ -6494,9 +5983,9 @@ impl AccountsDb { let reclaims = self.update_index( infos, &accounts, - UpsertReclaim::IgnoreReclaims, + reclaim_handling, update_index_thread_selection, - &self.thread_pool_clean, + &self.thread_pool_background, ); update_index_time.stop(); @@ -6507,33 +5996,42 @@ impl AccountsDb { .store_num_accounts .fetch_add(accounts.len() as u64, Ordering::Relaxed); - // A store for a single slot should: - // 1) Only make "reclaims" for the same slot - // 2) Should not cause any slots to be removed from the storage - // database because - // a) this slot has at least one account (the one being stored), - // b)From 1) we know no other slots are included in the "reclaims" - // - // From 1) and 2) we guarantee passing `no_purge_stats` == None, which is - // equivalent to asserting there will be no dead slots, is safe. + // If there are any reclaims then they should be handled. Reclaims affect + // all storages, and may result in the removal of dead storages. let mut handle_reclaims_elapsed = 0; - if reclaim == UpsertReclaim::PopulateReclaims { + + // since reclaims only contains non-empty SlotList, we + // should skip handle_reclaims only when reclaims is empty. No need to + // check the elements of reclaims are empty. + if !reclaims.is_empty() { + let reclaims_len = reclaims.iter().map(|r| r.len()).sum::(); + self.stats + .num_reclaims + .fetch_add(reclaims_len as u64, Ordering::Relaxed); + let purge_stats = PurgeStats::default(); let mut handle_reclaims_time = Measure::start("handle_reclaims"); self.handle_reclaims( - (!reclaims.is_empty()).then(|| reclaims.iter()), - expected_single_dead_slot, + reclaims.iter().flatten(), + None, &HashSet::default(), - // this callsite does NOT process dead slots - HandleReclaims::DoNotProcessDeadSlots, - MarkAccountsObsolete::No, + HandleReclaims::ProcessDeadSlots(&purge_stats), + MarkAccountsObsolete::Yes(slot), ); handle_reclaims_time.stop(); handle_reclaims_elapsed = handle_reclaims_time.as_us(); + self.stats.num_obsolete_slots_removed.fetch_add( + purge_stats.num_stored_slots_removed.load(Ordering::Relaxed), + Ordering::Relaxed, + ); + self.stats.num_obsolete_bytes_removed.fetch_add( + purge_stats + .total_removed_stored_bytes + .load(Ordering::Relaxed), + Ordering::Relaxed, + ); self.stats .store_handle_reclaims .fetch_add(handle_reclaims_elapsed, Ordering::Relaxed); - } else { - assert!(reclaims.is_empty()); } StoreAccountsTiming { @@ -6543,6 +6041,296 @@ impl AccountsDb { } } + fn write_accounts_to_cache<'a, 'b>( + &self, + slot: Slot, + accounts_and_meta_to_store: &impl StorableAccounts<'b>, + txs: Option<&[&SanitizedTransaction]>, + ) -> Vec { + let mut current_write_version = if self.accounts_update_notifier.is_some() { + self.write_version + .fetch_add(accounts_and_meta_to_store.len() as u64, Ordering::AcqRel) + } else { + 0 + }; + + (0..accounts_and_meta_to_store.len()) + .map(|index| { + let txn = txs.map(|txs| *txs.get(index).expect("txs must be present if provided")); + accounts_and_meta_to_store.account_default_if_zero_lamport(index, |account| { + let account_shared_data = account.to_account_shared_data(); + let pubkey = account.pubkey(); + let account_info = + AccountInfo::new(StorageLocation::Cached, account.is_zero_lamport()); + + self.notify_account_at_accounts_update( + slot, + &account_shared_data, + &txn, + pubkey, + current_write_version, + ); + current_write_version = current_write_version.saturating_add(1); + + self.accounts_cache.store(slot, pubkey, account_shared_data); + account_info + }) + }) + .collect() + } + + fn write_accounts_to_storage<'a>( + &self, + slot: Slot, + storage: &AccountStorageEntry, + accounts_and_meta_to_store: &impl StorableAccounts<'a>, + ) -> Vec { + let mut infos: Vec = Vec::with_capacity(accounts_and_meta_to_store.len()); + let mut total_append_accounts_us = 0; + while infos.len() < accounts_and_meta_to_store.len() { + let mut append_accounts = Measure::start("append_accounts"); + let stored_accounts_info = storage + .accounts + .write_accounts(accounts_and_meta_to_store, infos.len()); + append_accounts.stop(); + total_append_accounts_us += append_accounts.as_us(); + let Some(stored_accounts_info) = stored_accounts_info else { + // See if an account overflows the storage in the slot. + let data_len = accounts_and_meta_to_store.data_len(infos.len()); + let data_len = (data_len + STORE_META_OVERHEAD) as u64; + if data_len > storage.accounts.remaining_bytes() { + info!( + "write_accounts_to_storage, no space: {}, {}, {}, {}, {}", + storage.accounts.capacity(), + storage.accounts.remaining_bytes(), + data_len, + infos.len(), + accounts_and_meta_to_store.len() + ); + let special_store_size = std::cmp::max(data_len * 2, self.file_size); + self.create_and_insert_store(slot, special_store_size, "large create"); + } + continue; + }; + + let store_id = storage.id(); + for (i, offset) in stored_accounts_info.offsets.iter().enumerate() { + infos.push(AccountInfo::new( + StorageLocation::AppendVec(store_id, *offset), + accounts_and_meta_to_store.is_zero_lamport(i), + )); + } + storage.add_accounts( + stored_accounts_info.offsets.len(), + stored_accounts_info.size, + ); + } + + self.stats + .store_append_accounts + .fetch_add(total_append_accounts_us, Ordering::Relaxed); + + infos + } + + /// Marks zero lamport single reference accounts in the storage during store_accounts + fn mark_zero_lamport_single_ref_accounts( + &self, + account_infos: &[AccountInfo], + storage: &AccountStorageEntry, + reclaim_handling: UpsertReclaim, + ) { + // If the reclaim handling is `ReclaimOldSlots`, then all zero lamport accounts are single + // ref accounts and they need to be inserted into the storages zero lamport single ref + // accounts list + // For other values of reclaim handling, there are no zero lamport single ref accounts + // so nothing needs to be done in this function + if reclaim_handling == UpsertReclaim::ReclaimOldSlots { + let mut add_zero_lamport_accounts = Measure::start("add_zero_lamport_accounts"); + let mut num_zero_lamport_accounts_added = 0; + + for account_info in account_infos { + if account_info.is_zero_lamport() { + storage.insert_zero_lamport_single_ref_account_offset(account_info.offset()); + num_zero_lamport_accounts_added += 1; + } + } + + // If any zero lamport accounts were added, the storage may be valid for shrinking + if num_zero_lamport_accounts_added > 0 + && self.is_candidate_for_shrink(storage) + && Self::is_shrinking_productive(storage) + { + self.shrink_candidate_slots + .lock() + .unwrap() + .insert(storage.slot); + } + + add_zero_lamport_accounts.stop(); + self.stats + .add_zero_lamport_accounts_us + .fetch_add(add_zero_lamport_accounts.as_us(), Ordering::Relaxed); + self.stats + .num_zero_lamport_accounts_added + .fetch_add(num_zero_lamport_accounts_added, Ordering::Relaxed); + } + } + + fn report_store_timings(&self) { + if self.stats.last_store_report.should_update(1000) { + let read_cache_stats = self.read_only_accounts_cache.get_and_reset_stats(); + datapoint_info!( + "accounts_db_store_timings", + ( + "store_accounts", + self.stats.store_accounts.swap(0, Ordering::Relaxed), + i64 + ), + ( + "update_index", + self.stats.store_update_index.swap(0, Ordering::Relaxed), + i64 + ), + ( + "handle_reclaims", + self.stats.store_handle_reclaims.swap(0, Ordering::Relaxed), + i64 + ), + ( + "append_accounts", + self.stats.store_append_accounts.swap(0, Ordering::Relaxed), + i64 + ), + ( + "stakes_cache_check_and_store_us", + self.stats + .stakes_cache_check_and_store_us + .swap(0, Ordering::Relaxed), + i64 + ), + ( + "num_accounts", + self.stats.store_num_accounts.swap(0, Ordering::Relaxed), + i64 + ), + ( + "total_data", + self.stats.store_total_data.swap(0, Ordering::Relaxed), + i64 + ), + ( + "num_reclaims", + self.stats.num_reclaims.swap(0, Ordering::Relaxed), + i64 + ), + ( + "read_only_accounts_cache_entries", + self.read_only_accounts_cache.cache_len(), + i64 + ), + ( + "read_only_accounts_cache_data_size", + self.read_only_accounts_cache.data_size(), + i64 + ), + ("read_only_accounts_cache_hits", read_cache_stats.hits, i64), + ( + "read_only_accounts_cache_misses", + read_cache_stats.misses, + i64 + ), + ( + "read_only_accounts_cache_evicts", + read_cache_stats.evicts, + i64 + ), + ( + "read_only_accounts_cache_load_us", + read_cache_stats.load_us, + i64 + ), + ( + "read_only_accounts_cache_store_us", + read_cache_stats.store_us, + i64 + ), + ( + "read_only_accounts_cache_evict_us", + read_cache_stats.evict_us, + i64 + ), + ( + "read_only_accounts_cache_evictor_wakeup_count_all", + read_cache_stats.evictor_wakeup_count_all, + i64 + ), + ( + "read_only_accounts_cache_evictor_wakeup_count_productive", + read_cache_stats.evictor_wakeup_count_productive, + i64 + ), + ( + "handle_dead_keys_us", + self.stats.handle_dead_keys_us.swap(0, Ordering::Relaxed), + i64 + ), + ( + "purge_exact_us", + self.stats.purge_exact_us.swap(0, Ordering::Relaxed), + i64 + ), + ( + "purge_exact_count", + self.stats.purge_exact_count.swap(0, Ordering::Relaxed), + i64 + ), + ( + "num_obsolete_slots_removed", + self.stats + .num_obsolete_slots_removed + .swap(0, Ordering::Relaxed), + i64 + ), + ( + "num_obsolete_bytes_removed", + self.stats + .num_obsolete_bytes_removed + .swap(0, Ordering::Relaxed), + i64 + ), + ( + "add_zero_lamport_accounts_us", + self.stats + .add_zero_lamport_accounts_us + .swap(0, Ordering::Relaxed), + i64 + ), + ( + "num_zero_lamport_accounts_added", + self.stats + .num_zero_lamport_accounts_added + .swap(0, Ordering::Relaxed), + i64 + ), + ); + + datapoint_info!( + "accounts_db_store_timings2", + ( + "create_store_count", + self.stats.create_store_count.swap(0, Ordering::Relaxed), + i64 + ), + ( + "dropped_stores", + self.stats.dropped_stores.swap(0, Ordering::Relaxed), + i64 + ), + ); + } + } + pub fn add_root(&self, slot: Slot) -> AccountsAddRootTiming { let mut index_time = Measure::start("index_add_root"); self.accounts_index.add_root(slot); @@ -6550,22 +6338,10 @@ impl AccountsDb { let mut cache_time = Measure::start("cache_add_root"); self.accounts_cache.add_root(slot); cache_time.stop(); - let mut store_time = Measure::start("store_add_root"); - // We would not expect this slot to be shrinking right now, but other slots may be. - // But, even if it was, we would just mark a store id as dirty unnecessarily and that is ok. - // So, allow shrinking to be in progress. - if let Some(store) = self - .storage - .get_slot_storage_entry_shrinking_in_progress_ok(slot) - { - self.dirty_stores.insert(slot, store); - } - store_time.stop(); AccountsAddRootTiming { index_us: index_time.as_us(), cache_us: cache_time.as_us(), - store_us: store_time.as_us(), } } @@ -6596,9 +6372,10 @@ impl AccountsDb { *self.latest_full_snapshot_slot.lock_write() = Some(slot); } - fn generate_index_for_slot( + fn generate_index_for_slot<'a>( &self, - storage: &AccountStorageEntry, + reader: &mut impl RequiredLenBufFileRead<'a>, + storage: &'a AccountStorageEntry, slot: Slot, store_id: AccountsFileId, storage_info: &StorageSizeAndCountMap, @@ -6606,86 +6383,97 @@ impl AccountsDb { if storage.accounts.get_account_data_lens(&[0]).is_empty() { return SlotIndexGenerationInfo::default(); } - let secondary = !self.account_indexes.is_empty(); let mut accounts_data_len = 0; let mut stored_size_alive = 0; let mut zero_lamport_pubkeys = vec![]; + let mut zero_lamport_offsets = vec![]; let mut all_accounts_are_zero_lamports = true; + let mut slot_lt_hash = SlotLtHash::default(); + let mut keyed_account_infos = vec![]; + + let geyser_notifier = self + .accounts_update_notifier + .as_ref() + .filter(|notifier| notifier.snapshot_notifications_enabled()); + + // If geyser notifications at startup from snapshot are enabled, we need to pass in a + // write version for each account notification. This value does not need to be + // globally unique, as geyser plugins also receive the slot number. We only need to + // ensure that more recent accounts have a higher write version than older accounts. + // Even more relaxed, we really only need to have different write versions if there are + // multiple versions of the same account in a single storage, which is not allowed. + // + // Since we scan the storage from oldest to newest, we can simply increment a local + // counter per account and use that for the write version. + let mut write_version_for_geyser = 0; - let (insert_time_us, generate_index_results) = { - let mut keyed_account_infos = vec![]; - // this closure is the shared code when scanning the storage - let mut itemizer = |info: IndexInfo| { - stored_size_alive += info.stored_size_aligned; - if info.index_info.lamports > 0 { - accounts_data_len += info.index_info.data_len; + storage + .accounts + .scan_accounts(reader, |offset, account| { + let data_len = account.data.len(); + stored_size_alive += storage.accounts.calculate_stored_size(data_len); + let is_account_zero_lamport = account.is_zero_lamport(); + if !is_account_zero_lamport { + accounts_data_len += data_len as u64; all_accounts_are_zero_lamports = false; } else { - // zero lamport accounts - zero_lamport_pubkeys.push(info.index_info.pubkey); + // With obsolete accounts enabled, all zero lamport accounts + // are obsolete or single ref by the end of index generation + // Store the offsets here + if self.mark_obsolete_accounts == MarkObsoleteAccounts::Enabled { + zero_lamport_offsets.push(offset); + } + zero_lamport_pubkeys.push(*account.pubkey); } keyed_account_infos.push(( - info.index_info.pubkey, + *account.pubkey, AccountInfo::new( - StorageLocation::AppendVec(store_id, info.index_info.offset), // will never be cached - info.index_info.is_zero_lamport(), + StorageLocation::AppendVec(store_id, offset), // will never be cached + is_account_zero_lamport, ), )); - }; - if secondary { - // WITH secondary indexes -- scan accounts WITH account data - storage.accounts.scan_accounts(|offset, account| { - let data_len = account.data.len() as u64; - let stored_size_aligned = - storage.accounts.calculate_stored_size(data_len as usize); - let info = IndexInfo { - stored_size_aligned, - index_info: IndexInfoInner { - offset, - pubkey: *account.pubkey, - lamports: account.lamports, - data_len, - }, - }; - itemizer(info); + if !self.account_indexes.is_empty() { self.accounts_index.update_secondary_indexes( account.pubkey, &account, &self.account_indexes, ); - }) - } else { - // withOUT secondary indexes -- scan accounts withOUT account data - storage - .accounts - .scan_accounts_without_data(|offset, account| { - let data_len = account.data_len as u64; - let stored_size_aligned = - storage.accounts.calculate_stored_size(data_len as usize); - let info = IndexInfo { - stored_size_aligned, - index_info: IndexInfoInner { - offset, - pubkey: *account.pubkey, - lamports: account.lamports, - data_len, - }, - }; - itemizer(info); - }) - } + } + + let account_lt_hash = Self::lt_hash_account(&account, account.pubkey()); + slot_lt_hash.0.mix_in(&account_lt_hash.0); + + if let Some(geyser_notifier) = geyser_notifier { + debug_assert!(geyser_notifier.snapshot_notifications_enabled()); + let account_for_geyser = AccountForGeyser { + pubkey: account.pubkey(), + lamports: account.lamports(), + owner: account.owner(), + executable: account.executable(), + rent_epoch: account.rent_epoch(), + data: account.data(), + }; + geyser_notifier.notify_account_restore_from_snapshot( + slot, + write_version_for_geyser, + &account_for_geyser, + ); + write_version_for_geyser += 1; + } + }) .expect("must scan accounts storage"); - self.accounts_index - .insert_new_if_missing_into_primary_index(slot, keyed_account_infos) - }; + + let (insert_time_us, insert_info) = self + .accounts_index + .insert_new_if_missing_into_primary_index(slot, keyed_account_infos); { // second, collect into the shared DashMap once we've figured out all the info per store_id let mut info = storage_info.entry(store_id).or_default(); info.stored_size += stored_size_alive; - info.count += generate_index_results.count; + info.count += insert_info.count; // sanity check that stored_size is not larger than the u64 aligned size of the accounts files. // Note that the stored_size is aligned, so it can be larger than the size of the accounts file. @@ -6707,15 +6495,25 @@ impl AccountsDb { .insert(slot, zero_lamport_pubkeys.clone()); assert!(old.is_none()); } + + // If obsolete accounts are enabled, add them as single ref accounts here + // to avoid having to revisit them later + // This is safe with obsolete accounts as all zero lamport accounts will be single ref + // or obsolete by the end of index generation + if self.mark_obsolete_accounts == MarkObsoleteAccounts::Enabled { + storage.batch_insert_zero_lamport_single_ref_account_offsets(&zero_lamport_offsets); + zero_lamport_pubkeys = Vec::new(); + } SlotIndexGenerationInfo { insert_time_us, - num_accounts: generate_index_results.count as u64, + num_accounts: insert_info.count as u64, accounts_data_len, zero_lamport_pubkeys, all_accounts_are_zero_lamports, - num_did_not_exist: generate_index_results.num_did_not_exist, - num_existed_in_mem: generate_index_results.num_existed_in_mem, - num_existed_on_disk: generate_index_results.num_existed_on_disk, + num_did_not_exist: insert_info.num_did_not_exist, + num_existed_in_mem: insert_info.num_existed_in_mem, + num_existed_on_disk: insert_info.num_existed_on_disk, + slot_lt_hash, } } @@ -6725,395 +6523,390 @@ impl AccountsDb { verify: bool, ) -> IndexGenerationInfo { let mut total_time = Measure::start("generate_index"); - let mut slots = self.storage.all_slots(); - slots.sort_unstable(); + + let mut storages = self.storage.all_storages(); + storages.sort_unstable_by_key(|storage| storage.slot); if let Some(limit) = limit_load_slot_count_from_snapshot { - slots.truncate(limit); // get rid of the newer slots and keep just the older + storages.truncate(limit); // get rid of the newer slots and keep just the older } - let accounts_data_len = AtomicU64::new(0); + let num_storages = storages.len(); - let zero_lamport_pubkeys = Mutex::new(HashSet::new()); - let mut outer_duplicates_lt_hash = None; - - // pass == 0 always runs and generates the index - // pass == 1 only runs if verify == true. - // verify checks that all the expected items are in the accounts index and measures how long it takes to look them all up - let passes = if verify { 2 } else { 1 }; - for pass in 0..passes { - if pass == 0 { - self.accounts_index - .set_startup(Startup::StartupWithExtraThreads); + self.accounts_index + .set_startup(Startup::StartupWithExtraThreads); + let storage_info = StorageSizeAndCountMap::default(); + + /// Accumulator for the values produced while generating the index + #[derive(Debug)] + struct IndexGenerationAccumulator { + insert_us: u64, + num_accounts: u64, + accounts_data_len: u64, + zero_lamport_pubkeys: Vec, + all_accounts_are_zero_lamports_slots: u64, + all_zeros_slots: Vec<(Slot, Arc)>, + num_did_not_exist: u64, + num_existed_in_mem: u64, + num_existed_on_disk: u64, + lt_hash: LtHash, + } + impl IndexGenerationAccumulator { + const fn new() -> Self { + Self { + insert_us: 0, + num_accounts: 0, + accounts_data_len: 0, + zero_lamport_pubkeys: Vec::new(), + all_accounts_are_zero_lamports_slots: 0, + all_zeros_slots: Vec::new(), + num_did_not_exist: 0, + num_existed_in_mem: 0, + num_existed_on_disk: 0, + lt_hash: LtHash::identity(), + } } - let storage_info = StorageSizeAndCountMap::default(); - let total_processed_slots_across_all_threads = AtomicU64::new(0); - let outer_slots_len = slots.len(); - let threads = num_cpus::get(); - let chunk_size = (outer_slots_len / (std::cmp::max(1, threads.saturating_sub(1)))) + 1; // approximately 400k slots in a snapshot - let mut index_time = Measure::start("index"); - let insertion_time_us = AtomicU64::new(0); - let total_including_duplicates = AtomicU64::new(0); - let all_accounts_are_zero_lamports_slots = AtomicU64::new(0); - let mut all_zeros_slots = Mutex::new(Vec::<(Slot, Arc)>::new()); - let scan_time: u64 = slots - .par_chunks(chunk_size) - .map(|slots| { - let mut log_status = MultiThreadProgress::new( - &total_processed_slots_across_all_threads, - 2, - outer_slots_len as u64, - ); - let mut scan_time_sum = 0; - let mut all_accounts_are_zero_lamports_slots_inner = 0; - let mut all_zeros_slots_inner = vec![]; - let mut local_zero_lamport_pubkeys = Vec::new(); - let mut insert_time_sum = 0; - let mut total_including_duplicates_sum = 0; - let mut accounts_data_len_sum = 0; - let mut local_num_did_not_exist = 0; - let mut local_num_existed_in_mem = 0; - let mut local_num_existed_on_disk = 0; - for (index, slot) in slots.iter().enumerate() { - let mut scan_time = Measure::start("scan"); - log_status.report(index as u64); - let Some(storage) = self.storage.get_slot_storage_entry(*slot) else { - // no storage at this slot, no information to pull out - continue; - }; - let store_id = storage.id(); - - scan_time.stop(); - scan_time_sum += scan_time.as_us(); - - let insert_us = if pass == 0 { - // generate index - self.maybe_throttle_index_generation(); - let SlotIndexGenerationInfo { - insert_time_us: insert_us, - num_accounts: total_this_slot, - accounts_data_len: accounts_data_len_this_slot, - zero_lamport_pubkeys: mut zero_lamport_pubkeys_this_slot, - all_accounts_are_zero_lamports, - num_did_not_exist, - num_existed_in_mem, - num_existed_on_disk, - } = self.generate_index_for_slot( - &storage, - *slot, - store_id, - &storage_info, - ); + fn accumulate(&mut self, other: Self) { + self.insert_us += other.insert_us; + self.num_accounts += other.num_accounts; + self.accounts_data_len += other.accounts_data_len; + self.zero_lamport_pubkeys.extend(other.zero_lamport_pubkeys); + self.all_accounts_are_zero_lamports_slots += + other.all_accounts_are_zero_lamports_slots; + self.all_zeros_slots.extend(other.all_zeros_slots); + self.num_did_not_exist += other.num_did_not_exist; + self.num_existed_in_mem += other.num_existed_in_mem; + self.num_existed_on_disk += other.num_existed_on_disk; + self.lt_hash.mix_in(&other.lt_hash); + } + } - local_num_did_not_exist += num_did_not_exist; - local_num_existed_in_mem += num_existed_in_mem; - local_num_existed_on_disk += num_existed_on_disk; - total_including_duplicates_sum += total_this_slot; - accounts_data_len_sum += accounts_data_len_this_slot; - if all_accounts_are_zero_lamports { - all_accounts_are_zero_lamports_slots_inner += 1; - all_zeros_slots_inner.push((*slot, Arc::clone(&storage))); + let mut total_accum = IndexGenerationAccumulator::new(); + let storages_orderer = + AccountStoragesOrderer::with_random_order(&storages).into_concurrent_consumer(); + let exit_logger = AtomicBool::new(false); + let num_processed = AtomicU64::new(0); + let num_threads = num_cpus::get(); + let mut index_time = Measure::start("index"); + thread::scope(|s| { + let thread_handles = (0..num_threads) + .map(|i| { + thread::Builder::new() + .name(format!("solGenIndex{i:02}")) + .spawn_scoped(s, || { + let mut thread_accum = IndexGenerationAccumulator::new(); + let mut reader = append_vec::new_scan_accounts_reader(); + while let Some(next_item) = storages_orderer.next() { + self.maybe_throttle_index_generation(); + let storage = next_item.storage; + let store_id = storage.id(); + let slot = storage.slot(); + let slot_info = self.generate_index_for_slot( + &mut reader, + storage, + slot, + store_id, + &storage_info, + ); + thread_accum.insert_us += slot_info.insert_time_us; + thread_accum.num_accounts += slot_info.num_accounts; + thread_accum.accounts_data_len += slot_info.accounts_data_len; + thread_accum + .zero_lamport_pubkeys + .extend(slot_info.zero_lamport_pubkeys); + if slot_info.all_accounts_are_zero_lamports { + thread_accum.all_accounts_are_zero_lamports_slots += 1; + thread_accum.all_zeros_slots.push(( + slot, + Arc::clone(&storages[next_item.original_index]), + )); + } + thread_accum.num_did_not_exist += slot_info.num_did_not_exist; + thread_accum.num_existed_in_mem += slot_info.num_existed_in_mem; + thread_accum.num_existed_on_disk += slot_info.num_existed_on_disk; + thread_accum.lt_hash.mix_in(&slot_info.slot_lt_hash.0); + num_processed.fetch_add(1, Ordering::Relaxed); } - local_zero_lamport_pubkeys.append(&mut zero_lamport_pubkeys_this_slot); - - insert_us - } else { - // verify index matches expected and measure the time to get all items - assert!(verify); - let mut lookup_time = Measure::start("lookup_time"); - storage - .accounts - .scan_accounts_without_data(|offset, account| { - let key = account.pubkey(); - let index_entry = self.accounts_index.get_cloned(key).unwrap(); - let slot_list = index_entry.slot_list.read().unwrap(); - let mut count = 0; - for (slot2, account_info2) in slot_list.iter() { - if slot2 == slot { - count += 1; - let ai = AccountInfo::new( - StorageLocation::AppendVec(store_id, offset), // will never be cached - account.is_zero_lamport(), - ); - assert_eq!(&ai, account_info2); - } - } - assert_eq!(1, count); - }) - .expect("must scan accounts storage"); - lookup_time.stop(); - lookup_time.as_us() - }; - insert_time_sum += insert_us; - } - - if pass == 0 { - let mut zero_lamport_pubkeys_lock = zero_lamport_pubkeys.lock().unwrap(); - zero_lamport_pubkeys_lock.reserve(local_zero_lamport_pubkeys.len()); - zero_lamport_pubkeys_lock.extend(local_zero_lamport_pubkeys.into_iter()); - drop(zero_lamport_pubkeys_lock); - - // This thread has finished processing its chunk of slots. - // Update the index stats now. - let index_stats = self.accounts_index.bucket_map_holder_stats(); - - // stats for inserted entries that previously did *not* exist - index_stats.inc_insert_count(local_num_did_not_exist); - index_stats.add_mem_count(local_num_did_not_exist as usize); - - // stats for inserted entries that previous did exist *in-mem* - index_stats - .entries_from_mem - .fetch_add(local_num_existed_in_mem, Ordering::Relaxed); - index_stats - .updates_in_mem - .fetch_add(local_num_existed_in_mem, Ordering::Relaxed); - - // stats for inserted entries that previously did exist *on-disk* - index_stats.add_mem_count(local_num_existed_on_disk as usize); - index_stats - .entries_missing - .fetch_add(local_num_existed_on_disk, Ordering::Relaxed); - index_stats - .updates_in_mem - .fetch_add(local_num_existed_on_disk, Ordering::Relaxed); - } - - all_accounts_are_zero_lamports_slots.fetch_add( - all_accounts_are_zero_lamports_slots_inner, - Ordering::Relaxed, - ); - all_zeros_slots - .lock() - .unwrap() - .append(&mut all_zeros_slots_inner); - insertion_time_us.fetch_add(insert_time_sum, Ordering::Relaxed); - total_including_duplicates - .fetch_add(total_including_duplicates_sum, Ordering::Relaxed); - accounts_data_len.fetch_add(accounts_data_len_sum, Ordering::Relaxed); - scan_time_sum + thread_accum + }) }) - .sum(); - index_time.stop(); - - let mut index_flush_us = 0; - let total_duplicate_slot_keys = AtomicU64::default(); - let mut populate_duplicate_keys_us = 0; - let total_num_unique_duplicate_keys = AtomicU64::default(); - - // outer vec is accounts index bin (determined by pubkey value) - // inner vec is the pubkeys within that bin that are present in > 1 slot - let unique_pubkeys_by_bin = Mutex::new(Vec::>::default()); - if pass == 0 { - // tell accounts index we are done adding the initial accounts at startup - let mut m = Measure::start("accounts_index_idle_us"); - self.accounts_index.set_startup(Startup::Normal); - m.stop(); - index_flush_us = m.as_us(); - - populate_duplicate_keys_us = measure_us!({ - // this has to happen before visit_duplicate_pubkeys_during_startup below - // get duplicate keys from acct idx. We have to wait until we've finished flushing. - self.accounts_index - .populate_and_retrieve_duplicate_keys_from_startup(|slot_keys| { - total_duplicate_slot_keys - .fetch_add(slot_keys.len() as u64, Ordering::Relaxed); - let unique_keys = - HashSet::::from_iter(slot_keys.iter().map(|(_, key)| *key)); - for (slot, key) in slot_keys { - self.uncleaned_pubkeys.entry(slot).or_default().push(key); - } - let unique_pubkeys_by_bin_inner = - unique_keys.into_iter().collect::>(); - total_num_unique_duplicate_keys.fetch_add( - unique_pubkeys_by_bin_inner.len() as u64, - Ordering::Relaxed, + .collect::, _>>() + .expect("spawn threads"); + let logger_thread_handle = thread::Builder::new() + .name("solGenIndexLog".to_string()) + .spawn_scoped(s, || { + let mut last_update = Instant::now(); + loop { + if exit_logger.load(Ordering::Relaxed) { + break; + } + let num_processed = num_processed.load(Ordering::Relaxed); + if num_processed == num_storages as u64 { + info!("generating index: processed all slots"); + break; + } + let now = Instant::now(); + if now - last_update > Duration::from_secs(2) { + info!( + "generating index: processed {num_processed}/{num_storages} \ + slots..." ); - // does not matter that this is not ordered by slot - unique_pubkeys_by_bin - .lock() - .unwrap() - .push(unique_pubkeys_by_bin_inner); - }); + last_update = now; + } + thread::sleep(Duration::from_millis(500)) + } }) - .1; + .expect("spawn thread"); + for thread_handle in thread_handles { + let Ok(thread_accum) = thread_handle.join() else { + exit_logger.store(true, Ordering::Relaxed); + panic!("index generation failed"); + }; + total_accum.accumulate(thread_accum); } - let unique_pubkeys_by_bin = unique_pubkeys_by_bin.into_inner().unwrap(); - - let mut timings = GenerateIndexTimings { - index_flush_us, - scan_time, - index_time: index_time.as_us(), - insertion_time_us: insertion_time_us.load(Ordering::Relaxed), - total_duplicate_slot_keys: total_duplicate_slot_keys.load(Ordering::Relaxed), - total_num_unique_duplicate_keys: total_num_unique_duplicate_keys - .load(Ordering::Relaxed), - populate_duplicate_keys_us, - total_including_duplicates: total_including_duplicates.load(Ordering::Relaxed), - total_slots: slots.len() as u64, - all_accounts_are_zero_lamports_slots: all_accounts_are_zero_lamports_slots - .load(Ordering::Relaxed), - ..GenerateIndexTimings::default() - }; + // Make sure to join the logger thread *after* the main threads. + // This way, if a main thread errors, we won't spin indefinitely + // waiting for the logger thread to finish (it never will). + logger_thread_handle.join().expect("join thread"); + }); + index_time.stop(); - if pass == 0 { - #[derive(Debug, Default)] - struct DuplicatePubkeysVisitedInfo { - accounts_data_len_from_duplicates: u64, - num_duplicate_accounts: u64, - duplicates_lt_hash: Option>, - } - impl DuplicatePubkeysVisitedInfo { - fn reduce(mut self, other: Self) -> Self { - self.accounts_data_len_from_duplicates += - other.accounts_data_len_from_duplicates; - self.num_duplicate_accounts += other.num_duplicate_accounts; - - match ( - self.duplicates_lt_hash.is_some(), - other.duplicates_lt_hash.is_some(), - ) { - (true, true) => { - // SAFETY: We just checked that both values are Some - self.duplicates_lt_hash - .as_mut() - .unwrap() - .0 - .mix_in(&other.duplicates_lt_hash.as_ref().unwrap().0); - } - (true, false) => { - // nothing to do; `other` doesn't have a duplicates lt hash - } - (false, true) => { - // `self` doesn't have a duplicates lt hash, so pilfer from `other` - self.duplicates_lt_hash = other.duplicates_lt_hash; - } - (false, false) => { - // nothing to do; no duplicates lt hash at all + { + // Update the index stats now. + let index_stats = self.accounts_index.bucket_map_holder_stats(); + + // stats for inserted entries that previously did *not* exist + index_stats.inc_insert_count(total_accum.num_did_not_exist); + index_stats.add_mem_count(total_accum.num_did_not_exist as usize); + + // stats for inserted entries that previous did exist *in-mem* + index_stats + .entries_from_mem + .fetch_add(total_accum.num_existed_in_mem, Ordering::Relaxed); + index_stats + .updates_in_mem + .fetch_add(total_accum.num_existed_in_mem, Ordering::Relaxed); + + // stats for inserted entries that previously did exist *on-disk* + index_stats.add_mem_count(total_accum.num_existed_on_disk as usize); + index_stats + .entries_missing + .fetch_add(total_accum.num_existed_on_disk, Ordering::Relaxed); + index_stats + .updates_in_mem + .fetch_add(total_accum.num_existed_on_disk, Ordering::Relaxed); + } + + if let Some(geyser_notifier) = &self.accounts_update_notifier { + // We've finished scanning all the storages, and have thus sent all the + // account notifications. Now, let the geyser plugins know we're done. + geyser_notifier.notify_end_of_restore_from_snapshot(); + } + + if verify { + info!("Verifying index..."); + let start = Instant::now(); + storages.par_iter().for_each(|storage| { + let store_id = storage.id(); + let slot = storage.slot(); + storage + .accounts + .scan_accounts_without_data(|offset, account| { + let key = account.pubkey(); + let index_entry = self.accounts_index.get_cloned(key).unwrap(); + let slot_list = index_entry.slot_list.read().unwrap(); + let mut count = 0; + for (slot2, account_info2) in slot_list.iter() { + if *slot2 == slot { + count += 1; + let ai = AccountInfo::new( + StorageLocation::AppendVec(store_id, offset), // will never be cached + account.is_zero_lamport(), + ); + assert_eq!(&ai, account_info2); } } - self + assert_eq!(1, count); + }) + .expect("must scan accounts storage"); + }); + info!("Verifying index... Done in {:?}", start.elapsed()); + } + + let total_duplicate_slot_keys = AtomicU64::default(); + let total_num_unique_duplicate_keys = AtomicU64::default(); + + // outer vec is accounts index bin (determined by pubkey value) + // inner vec is the pubkeys within that bin that are present in > 1 slot + let unique_pubkeys_by_bin = Mutex::new(Vec::>::default()); + // tell accounts index we are done adding the initial accounts at startup + let mut m = Measure::start("accounts_index_idle_us"); + self.accounts_index.set_startup(Startup::Normal); + m.stop(); + let index_flush_us = m.as_us(); + + let populate_duplicate_keys_us = measure_us!({ + // this has to happen before visit_duplicate_pubkeys_during_startup below + // get duplicate keys from acct idx. We have to wait until we've finished flushing. + self.accounts_index + .populate_and_retrieve_duplicate_keys_from_startup(|slot_keys| { + total_duplicate_slot_keys.fetch_add(slot_keys.len() as u64, Ordering::Relaxed); + let unique_keys = + HashSet::::from_iter(slot_keys.iter().map(|(_, key)| *key)); + for (slot, key) in slot_keys { + self.uncleaned_pubkeys.entry(slot).or_default().push(key); } - } + let unique_pubkeys_by_bin_inner = unique_keys.into_iter().collect::>(); + total_num_unique_duplicate_keys + .fetch_add(unique_pubkeys_by_bin_inner.len() as u64, Ordering::Relaxed); + // does not matter that this is not ordered by slot + unique_pubkeys_by_bin + .lock() + .unwrap() + .push(unique_pubkeys_by_bin_inner); + }); + }) + .1; + let unique_pubkeys_by_bin = unique_pubkeys_by_bin.into_inner().unwrap(); + + let mut timings = GenerateIndexTimings { + index_flush_us, + scan_time: 0, + index_time: index_time.as_us(), + insertion_time_us: total_accum.insert_us, + total_duplicate_slot_keys: total_duplicate_slot_keys.load(Ordering::Relaxed), + total_num_unique_duplicate_keys: total_num_unique_duplicate_keys + .load(Ordering::Relaxed), + populate_duplicate_keys_us, + total_including_duplicates: total_accum.num_accounts, + total_slots: num_storages as u64, + all_accounts_are_zero_lamports_slots: total_accum.all_accounts_are_zero_lamports_slots, + ..GenerateIndexTimings::default() + }; - let zero_lamport_pubkeys_to_visit = - std::mem::take(&mut *zero_lamport_pubkeys.lock().unwrap()); - let (num_zero_lamport_single_refs, visit_zero_lamports_us) = - measure_us!(self - .visit_zero_lamport_pubkeys_during_startup(&zero_lamport_pubkeys_to_visit)); - timings.visit_zero_lamports_us = visit_zero_lamports_us; - timings.num_zero_lamport_single_refs = num_zero_lamport_single_refs; - - // subtract data.len() from accounts_data_len for all old accounts that are in the index twice - let mut accounts_data_len_dedup_timer = - Measure::start("handle accounts data len duplicates"); - let DuplicatePubkeysVisitedInfo { - accounts_data_len_from_duplicates, - num_duplicate_accounts, - duplicates_lt_hash, - } = unique_pubkeys_by_bin - .par_iter() - .fold( - DuplicatePubkeysVisitedInfo::default, - |accum, pubkeys_by_bin| { - let intermediate = pubkeys_by_bin - .par_chunks(4096) - .fold(DuplicatePubkeysVisitedInfo::default, |accum, pubkeys| { - let ( - accounts_data_len_from_duplicates, - accounts_duplicates_num, - duplicates_lt_hash, - ) = self - .visit_duplicate_pubkeys_during_startup(pubkeys, &timings); - let intermediate = DuplicatePubkeysVisitedInfo { - accounts_data_len_from_duplicates, - num_duplicate_accounts: accounts_duplicates_num, - duplicates_lt_hash, - }; - DuplicatePubkeysVisitedInfo::reduce(accum, intermediate) - }) - .reduce( - DuplicatePubkeysVisitedInfo::default, - DuplicatePubkeysVisitedInfo::reduce, - ); + #[derive(Debug, Default)] + struct DuplicatePubkeysVisitedInfo { + accounts_data_len_from_duplicates: u64, + num_duplicate_accounts: u64, + duplicates_lt_hash: Box, + } + impl DuplicatePubkeysVisitedInfo { + fn reduce(mut self, other: Self) -> Self { + self.accounts_data_len_from_duplicates += other.accounts_data_len_from_duplicates; + self.num_duplicate_accounts += other.num_duplicate_accounts; + self.duplicates_lt_hash + .0 + .mix_in(&other.duplicates_lt_hash.0); + self + } + } + + let (num_zero_lamport_single_refs, visit_zero_lamports_us) = measure_us!( + self.visit_zero_lamport_pubkeys_during_startup(total_accum.zero_lamport_pubkeys) + ); + timings.visit_zero_lamports_us = visit_zero_lamports_us; + timings.num_zero_lamport_single_refs = num_zero_lamport_single_refs; + + // subtract data.len() from accounts_data_len for all old accounts that are in the index twice + let mut accounts_data_len_dedup_timer = + Measure::start("handle accounts data len duplicates"); + let DuplicatePubkeysVisitedInfo { + accounts_data_len_from_duplicates, + num_duplicate_accounts, + duplicates_lt_hash, + } = unique_pubkeys_by_bin + .par_iter() + .fold( + DuplicatePubkeysVisitedInfo::default, + |accum, pubkeys_by_bin| { + let intermediate = pubkeys_by_bin + .par_chunks(4096) + .fold(DuplicatePubkeysVisitedInfo::default, |accum, pubkeys| { + let ( + accounts_data_len_from_duplicates, + accounts_duplicates_num, + duplicates_lt_hash, + ) = self.visit_duplicate_pubkeys_during_startup(pubkeys); + let intermediate = DuplicatePubkeysVisitedInfo { + accounts_data_len_from_duplicates, + num_duplicate_accounts: accounts_duplicates_num, + duplicates_lt_hash, + }; DuplicatePubkeysVisitedInfo::reduce(accum, intermediate) - }, - ) - .reduce( - DuplicatePubkeysVisitedInfo::default, - DuplicatePubkeysVisitedInfo::reduce, - ); - accounts_data_len_dedup_timer.stop(); - timings.accounts_data_len_dedup_time_us = accounts_data_len_dedup_timer.as_us(); - timings.num_duplicate_accounts = num_duplicate_accounts; - - accounts_data_len.fetch_sub(accounts_data_len_from_duplicates, Ordering::Relaxed); - if let Some(duplicates_lt_hash) = duplicates_lt_hash { - let old_val = outer_duplicates_lt_hash.replace(duplicates_lt_hash); - assert!(old_val.is_none()); - } - info!( - "accounts data len: {}", - accounts_data_len.load(Ordering::Relaxed) - ); + }) + .reduce( + DuplicatePubkeysVisitedInfo::default, + DuplicatePubkeysVisitedInfo::reduce, + ); + DuplicatePubkeysVisitedInfo::reduce(accum, intermediate) + }, + ) + .reduce( + DuplicatePubkeysVisitedInfo::default, + DuplicatePubkeysVisitedInfo::reduce, + ); + accounts_data_len_dedup_timer.stop(); + timings.accounts_data_len_dedup_time_us = accounts_data_len_dedup_timer.as_us(); + timings.num_duplicate_accounts = num_duplicate_accounts; - // insert all zero lamport account storage into the dirty stores and add them into the uncleaned roots for clean to pick up - let all_zero_slots_to_clean = std::mem::take(all_zeros_slots.get_mut().unwrap()); - info!( - "insert all zero slots to clean at startup {}", - all_zero_slots_to_clean.len() - ); - for (slot, storage) in all_zero_slots_to_clean { - self.dirty_stores.insert(slot, storage); - } - } + total_accum.lt_hash.mix_out(&duplicates_lt_hash.0); + total_accum.accounts_data_len -= accounts_data_len_from_duplicates; + info!("accounts data len: {}", total_accum.accounts_data_len); - if pass == 0 { - // Need to add these last, otherwise older updates will be cleaned - for root in &slots { - self.accounts_index.add_root(*root); - } + // insert all zero lamport account storage into the dirty stores and add them into the uncleaned roots for clean to pick up + info!( + "insert all zero slots to clean at startup {}", + total_accum.all_zeros_slots.len() + ); + for (slot, storage) in total_accum.all_zeros_slots { + self.dirty_stores.insert(slot, storage); + } - self.set_storage_count_and_alive_bytes(storage_info, &mut timings); - - if self.mark_obsolete_accounts { - let mut mark_obsolete_accounts_time = - Measure::start("mark_obsolete_accounts_time"); - // Mark all reclaims at max_slot. This is safe because only the snapshot paths care about - // this information. Since this account was just restored from the previous snapshot and - // it is known that it was already obsolete at that time, it must hold true that it will - // still be obsolete if a newer snapshot is created, since a newer snapshot will always - // be performed on a slot greater than the current slot - let slot_marked_obsolete = slots.last().copied().unwrap(); - let obsolete_account_stats = self.mark_obsolete_accounts_at_startup( - slot_marked_obsolete, - unique_pubkeys_by_bin, - ); + // Need to add these last, otherwise older updates will be cleaned + for storage in &storages { + self.accounts_index.add_root(storage.slot()); + } - mark_obsolete_accounts_time.stop(); - timings.mark_obsolete_accounts_us = mark_obsolete_accounts_time.as_us(); - timings.num_obsolete_accounts_marked = - obsolete_account_stats.accounts_marked_obsolete; - timings.num_slots_removed_as_obsolete = obsolete_account_stats.slots_removed; - } - } - total_time.stop(); - timings.total_time_us = total_time.as_us(); - timings.report(self.accounts_index.get_startup_stats()); + self.set_storage_count_and_alive_bytes(storage_info, &mut timings); + + if self.mark_obsolete_accounts == MarkObsoleteAccounts::Enabled { + let mut mark_obsolete_accounts_time = Measure::start("mark_obsolete_accounts_time"); + // Mark all reclaims at max_slot. This is safe because only the snapshot paths care about + // this information. Since this account was just restored from the previous snapshot and + // it is known that it was already obsolete at that time, it must hold true that it will + // still be obsolete if a newer snapshot is created, since a newer snapshot will always + // be performed on a slot greater than the current slot + let slot_marked_obsolete = storages.last().unwrap().slot(); + let obsolete_account_stats = + self.mark_obsolete_accounts_at_startup(slot_marked_obsolete, unique_pubkeys_by_bin); + + mark_obsolete_accounts_time.stop(); + timings.mark_obsolete_accounts_us = mark_obsolete_accounts_time.as_us(); + timings.num_obsolete_accounts_marked = obsolete_account_stats.accounts_marked_obsolete; + timings.num_slots_removed_as_obsolete = obsolete_account_stats.slots_removed; } + total_time.stop(); + timings.total_time_us = total_time.as_us(); + timings.report(self.accounts_index.get_startup_stats()); self.accounts_index.log_secondary_indexes(); - // The duplicates lt hash must be Some if populate_duplicates_lt_hash is true. - // But, if there were no duplicates or obsolete accounts marking removed all - // duplicates, then we'd never set outer_duplicates_lt_hash to Some! So do one - // last check here to ensure outer_duplicates_lt_hash is Some if we're supposed - // to calculate the duplicates lt hash. - if outer_duplicates_lt_hash.is_none() { - outer_duplicates_lt_hash = Some(Box::new(DuplicatesLtHash::default())); - } + // Now that the index is generated, get the total capacity of the in-mem maps + // across all the bins and set the initial value for the stat. + // We do this all at once, at the end, since getting the capacity requries iterating all + // the bins and grabbing a read lock, which we try to avoid whenever possible. + let index_capacity = self + .accounts_index + .account_maps + .iter() + .map(|bin| bin.capacity_for_startup()) + .sum(); + self.accounts_index + .bucket_map_holder_stats() + .capacity_in_mem + .store(index_capacity, Ordering::Relaxed); IndexGenerationInfo { - accounts_data_len: accounts_data_len.load(Ordering::Relaxed), - duplicates_lt_hash: outer_duplicates_lt_hash, + accounts_data_len: total_accum.accounts_data_len, + calculated_accounts_lt_hash: AccountsLtHash(total_accum.lt_hash), } } @@ -7127,30 +6920,21 @@ impl AccountsDb { let stats: ObsoleteAccountsStats = pubkeys_with_duplicates_by_bin .par_iter() .map(|pubkeys_by_bin| { - let reclaims = self.accounts_index.clean_and_unref_rooted_entries_by_bin( - pubkeys_by_bin, - |slot, account_info| { - // Since the unref makes every account a single ref account, all - // zero lamport accounts should be tracked as zero_lamport_single_ref - if account_info.is_zero_lamport() { - self.zero_lamport_single_ref_found(slot, account_info.offset()); - } - }, - ); + let reclaims = self + .accounts_index + .clean_and_unref_rooted_entries_by_bin(pubkeys_by_bin); let stats = PurgeStats::default(); - // Convert from a vector to a hashset for use in reclaims - let pubkeys_removed_from_accounts_index: PubkeysRemovedFromAccountsIndex = - pubkeys_by_bin.iter().cloned().collect(); - // Mark all the entries as obsolete, and remove any empty storages - self.handle_reclaims( - (!reclaims.is_empty()).then(|| reclaims.iter()), - None, - &pubkeys_removed_from_accounts_index, - HandleReclaims::ProcessDeadSlots(&stats), - MarkAccountsObsolete::Yes(slot_marked_obsolete), - ); + if !reclaims.is_empty() { + self.handle_reclaims( + reclaims.iter(), + None, + &HashSet::new(), + HandleReclaims::ProcessDeadSlots(&stats), + MarkAccountsObsolete::Yes(slot_marked_obsolete), + ); + } ObsoleteAccountsStats { accounts_marked_obsolete: reclaims.len() as u64, slots_removed: stats.total_removed_storage_entries.load(Ordering::Relaxed) @@ -7169,7 +6953,7 @@ impl AccountsDb { return; } // This number is chosen to keep the initial ram usage sufficiently small - // The process of generating the index is goverened entirely by how fast the disk index can be populated. + // The process of generating the index is governed entirely by how fast the disk index can be populated. // 10M accounts is sufficiently small that it will never have memory usage. It seems sufficiently large that it will provide sufficient performance. // Performance is measured by total time to generate the index. // Just estimating - 150M accounts can easily be held in memory in the accounts index on a 256G machine. 2-300M are also likely 'fine' during startup. @@ -7190,8 +6974,19 @@ impl AccountsDb { /// Visit zero lamport pubkeys and populate zero_lamport_single_ref info on /// storage. /// Returns the number of zero lamport single ref accounts found. - fn visit_zero_lamport_pubkeys_during_startup(&self, pubkeys: &HashSet) -> u64 { - let mut count = 0; + fn visit_zero_lamport_pubkeys_during_startup(&self, mut pubkeys: Vec) -> u64 { + let mut slot_offsets = HashMap::<_, Vec<_>>::default(); + // sort the pubkeys first so that in scan, the pubkeys are visited in + // index bucket in order. This helps to reduce the page faults and speed + // up the scan compared to visiting the pubkeys in random order. + let orig_len = pubkeys.len(); + pubkeys.sort_unstable(); + pubkeys.dedup(); + let uniq_len = pubkeys.len(); + info!( + "visit_zero_lamport_pubkeys_during_startup: {orig_len} pubkeys, {uniq_len} after dedup", + ); + self.accounts_index.scan( pubkeys.iter(), |_pubkey, slots_refs, _entry| { @@ -7201,8 +6996,10 @@ impl AccountsDb { let (slot_alive, account_info) = slot_list.first().unwrap(); assert!(!account_info.is_cached()); if account_info.is_zero_lamport() { - count += 1; - self.zero_lamport_single_ref_found(*slot_alive, account_info.offset()); + slot_offsets + .entry(*slot_alive) + .or_default() + .push(account_info.offset()); } } AccountsIndexScanResult::OnlyKeepInMemoryIfDirty @@ -7211,6 +7008,46 @@ impl AccountsDb { false, ScanFilter::All, ); + + let mut count = 0; + let mut dead_stores = 0; + let mut shrink_stores = 0; + let mut non_shrink_stores = 0; + for (slot, offsets) in slot_offsets { + if let Some(store) = self.storage.get_slot_storage_entry(slot) { + count += store.batch_insert_zero_lamport_single_ref_account_offsets(&offsets); + if store.num_zero_lamport_single_ref_accounts() == store.count() { + // all accounts in this storage can be dead + self.dirty_stores.entry(slot).or_insert(store); + dead_stores += 1; + } else if Self::is_shrinking_productive(&store) + && self.is_candidate_for_shrink(&store) + { + // this store might be eligible for shrinking now + if self.shrink_candidate_slots.lock().unwrap().insert(slot) { + shrink_stores += 1; + } + } else { + non_shrink_stores += 1; + } + } + } + self.shrink_stats + .num_zero_lamport_single_ref_accounts_found + .fetch_add(count, Ordering::Relaxed); + + self.shrink_stats + .num_dead_slots_added_to_clean + .fetch_add(dead_stores, Ordering::Relaxed); + + self.shrink_stats + .num_slots_with_zero_lamport_accounts_added_to_shrink + .fetch_add(shrink_stores, Ordering::Relaxed); + + self.shrink_stats + .marking_zero_dead_accounts_in_non_shrinkable_store + .fetch_add(non_shrink_stores, Ordering::Relaxed); + count } @@ -7228,15 +7065,10 @@ impl AccountsDb { fn visit_duplicate_pubkeys_during_startup( &self, pubkeys: &[Pubkey], - timings: &GenerateIndexTimings, - ) -> (u64, u64, Option>) { + ) -> (u64, u64, Box) { let mut accounts_data_len_from_duplicates = 0; let mut num_duplicate_accounts = 0_u64; - // With obsolete accounts, the duplicates_lt_hash should NOT be created. - // And skip calculating the lt_hash from accounts too. - let mut duplicates_lt_hash = - (!self.mark_obsolete_accounts).then(|| Box::new(DuplicatesLtHash::default())); - let mut lt_hash_time = Duration::default(); + let mut duplicates_lt_hash = Box::new(DuplicatesLtHash::default()); self.accounts_index.scan( pubkeys.iter(), |pubkey, slots_refs, _entry| { @@ -7265,14 +7097,9 @@ impl AccountsDb { accounts_data_len_from_duplicates += data_len; } num_duplicate_accounts += 1; - if let Some(duplicates_lt_hash) = duplicates_lt_hash.as_mut() { - let (_, duration) = meas_dur!({ - let account_lt_hash = - Self::lt_hash_account(&loaded_account, pubkey); - duplicates_lt_hash.0.mix_in(&account_lt_hash.0); - }); - lt_hash_time += duration; - } + let account_lt_hash = + Self::lt_hash_account(&loaded_account, pubkey); + duplicates_lt_hash.0.mix_in(&account_lt_hash.0); }); }); } @@ -7283,9 +7110,6 @@ impl AccountsDb { false, ScanFilter::All, ); - timings - .par_duplicates_lt_hash_us - .fetch_add(lt_hash_time.as_micros() as u64, Ordering::Relaxed); ( accounts_data_len_from_duplicates as u64, num_duplicate_accounts, @@ -7312,16 +7136,15 @@ impl AccountsDb { store.count(), ); { - let mut count_and_status = store.count_and_status.lock_write(); - assert_eq!(count_and_status.0, 0); - count_and_status.0 = entry.count; + let prev_count = store.count.swap(entry.count, Ordering::Release); + assert_eq!(prev_count, 0); } store .alive_bytes .store(entry.stored_size, Ordering::Release); } else { trace!("id: {id} clearing count"); - store.count_and_status.lock_write().0 = 0; + store.count.store(0, Ordering::Release); } } storage_size_storages_time.stop(); @@ -7361,10 +7184,10 @@ impl AccountsDb { for slot in &slots { let entry = self.storage.get_slot_storage_entry(*slot).unwrap(); info!( - " slot: {} id: {} count_and_status: {:?} len: {} capacity: {}", + " slot: {} id: {} count: {} len: {} capacity: {}", slot, entry.id(), - entry.count_and_status.read(), + entry.count(), entry.accounts.len(), entry.accounts.capacity(), ); @@ -7375,15 +7198,13 @@ impl AccountsDb { #[derive(Debug, Copy, Clone)] enum HandleReclaims<'a> { ProcessDeadSlots(&'a PurgeStats), - DoNotProcessDeadSlots, } /// Specify whether obsolete accounts should be marked or not during reclaims /// They should only be marked if they are also getting unreffed in the index -/// Temporariliy allow dead code until the feature is implemented -#[derive(Debug, Copy, Clone)] +/// Temporarily allow dead code until the feature is implemented +#[derive(Debug, Copy, Clone, PartialEq, Eq)] enum MarkAccountsObsolete { - #[allow(dead_code)] Yes(Slot), No, } @@ -7412,12 +7233,63 @@ impl AccountStorageEntry { // These functions/fields are only usable from a dev context (i.e. tests and benches) #[cfg(feature = "dev-context-only-utils")] impl AccountsDb { + pub fn default_for_tests() -> Self { + Self::new_single_for_tests() + } + + pub fn new_single_for_tests() -> Self { + AccountsDb::new_for_tests(Vec::new()) + } + + pub fn new_single_for_tests_with_provider_and_config( + file_provider: AccountsFileProvider, + accounts_db_config: AccountsDbConfig, + ) -> Self { + AccountsDb::new_for_tests_with_provider_and_config( + Vec::new(), + file_provider, + accounts_db_config, + ) + } + + pub fn new_for_tests(paths: Vec) -> Self { + Self::new_for_tests_with_provider_and_config( + paths, + AccountsFileProvider::default(), + ACCOUNTS_DB_CONFIG_FOR_TESTING, + ) + } + + fn new_for_tests_with_provider_and_config( + paths: Vec, + accounts_file_provider: AccountsFileProvider, + accounts_db_config: AccountsDbConfig, + ) -> Self { + let mut db = AccountsDb::new_with_config(paths, accounts_db_config, None, Arc::default()); + db.accounts_file_provider = accounts_file_provider; + db + } + /// Return the number of slots marked with uncleaned pubkeys. - /// This is useful for testing clean aglorithms. + /// This is useful for testing clean algorithms. pub fn get_len_of_slots_with_uncleaned_pubkeys(&self) -> usize { self.uncleaned_pubkeys.len() } + #[cfg(test)] + pub fn storage_access(&self) -> StorageAccess { + self.storage_access + } + + /// Call clean_accounts() with the common parameters that tests/benches use. + pub fn clean_accounts_for_tests(&self) { + self.clean_accounts(None, false, &EpochSchedule::default()) + } + + pub fn flush_accounts_cache_slot_for_tests(&self, slot: Slot) { + self.flush_slot_cache(slot); + } + /// useful to adapt tests written prior to introduction of the write cache /// to use the write cache pub fn add_root_and_flush_write_cache(&self, slot: Slot) { @@ -7469,10 +7341,28 @@ impl AccountsDb { } } + /// Iterate over all accounts from all `storages` and call `callback` with each account. + /// + /// `callback` parameters: + /// * Offset: the offset within the file of this account + /// * StoredAccountInfo: the account itself, with account data + pub fn scan_accounts_from_storages( + storages: &[Arc], + mut callback: impl for<'local> FnMut(Offset, StoredAccountInfo<'local>), + ) { + let mut reader = append_vec::new_scan_accounts_reader(); + for storage in storages { + storage + .accounts + .scan_accounts(&mut reader, &mut callback) + .expect("must scan accounts storage"); + } + } + /// callers used to call store_uncached. But, this is not allowed anymore. - pub fn store_for_tests(&self, slot: Slot, accounts: &[(&Pubkey, &AccountSharedData)]) { - self.store( - (slot, accounts), + pub fn store_for_tests<'a>(&self, accounts: impl StorableAccounts<'a>) { + self.store_accounts_unfrozen( + accounts, None, UpdateIndexThreadSelection::PoolWithThreshold, ); @@ -7486,13 +7376,12 @@ impl AccountsDb { 0, AccountSharedData::default().owner(), ); - self.store_for_tests(slot, &[(&pubkeys[idx], &account)]); + self.store_for_tests((slot, [(&pubkeys[idx], &account)].as_slice())); } } pub fn check_storage(&self, slot: Slot, alive_count: usize, total_count: usize) { let store = self.storage.get_slot_storage_entry(slot).unwrap(); - assert_eq!(store.status(), AccountStorageStatus::Available); assert_eq!(store.count(), alive_count); assert_eq!(store.accounts_count(), total_count); } @@ -7512,7 +7401,7 @@ impl AccountsDb { AccountSharedData::new((t + 1) as u64, space, AccountSharedData::default().owner()); pubkeys.push(pubkey); assert!(self.load_without_fixed_root(&ancestors, &pubkey).is_none()); - self.store_for_tests(slot, &[(&pubkey, &account)]); + self.store_for_tests((slot, [(&pubkey, &account)].as_slice())); } for t in 0..num_vote { let pubkey = solana_pubkey::new_rand(); @@ -7521,7 +7410,7 @@ impl AccountsDb { pubkeys.push(pubkey); let ancestors = vec![(slot, 0)].into_iter().collect(); assert!(self.load_without_fixed_root(&ancestors, &pubkey).is_none()); - self.store_for_tests(slot, &[(&pubkey, &account)]); + self.store_for_tests((slot, [(&pubkey, &account)].as_slice())); } } @@ -7538,8 +7427,20 @@ impl AccountsDb { sizes } - pub fn ref_count_for_pubkey(&self, pubkey: &Pubkey) -> RefCount { - self.accounts_index.ref_count_from_storage(pubkey) + // With obsolete accounts marked, obsolete references are marked in the storage + // and no longer need to be referenced. This leads to a static reference count + // of 1. As referencing checking is common in tests, this test wrapper abstracts the behavior + pub fn assert_ref_count(&self, pubkey: &Pubkey, expected_ref_count: RefCount) { + let expected_ref_count = match self.mark_obsolete_accounts { + MarkObsoleteAccounts::Disabled => expected_ref_count, + // When obsolete accounts are marked, the ref count is always 1 or 0 + MarkObsoleteAccounts::Enabled => expected_ref_count.min(1), + }; + + assert_eq!( + expected_ref_count, + self.accounts_index.ref_count_from_storage(pubkey) + ); } pub fn alive_account_count_in_slot(&self, slot: Slot) -> usize { @@ -7583,39 +7484,3 @@ impl AccountsDb { &self.uncleaned_pubkeys } } - -/// A set of utility functions used for testing and benchmarking -#[cfg(feature = "dev-context-only-utils")] -pub mod test_utils { - use {super::*, crate::accounts::Accounts}; - - pub fn create_test_accounts( - accounts: &Accounts, - pubkeys: &mut Vec, - num: usize, - slot: Slot, - ) { - let data_size = 0; - - for t in 0..num { - let pubkey = solana_pubkey::new_rand(); - let account = AccountSharedData::new( - (t + 1) as u64, - data_size, - AccountSharedData::default().owner(), - ); - accounts.store_cached((slot, &[(&pubkey, &account)][..]), None); - pubkeys.push(pubkey); - } - } - - // Only used by bench, not safe to call otherwise accounts can conflict with the - // accounts cache! - pub fn update_accounts_bench(accounts: &Accounts, pubkeys: &[Pubkey], slot: u64) { - for pubkey in pubkeys { - let amount = thread_rng().gen_range(0..10); - let account = AccountSharedData::new(amount, 0, AccountSharedData::default().owner()); - accounts.store_cached((slot, &[(pubkey, &account)][..]), None); - } - } -} diff --git a/accounts-db/src/accounts_db/geyser_plugin_utils.rs b/accounts-db/src/accounts_db/geyser_plugin_utils.rs index 61f076fb7359bb..11477b5d9939d7 100644 --- a/accounts-db/src/accounts_db/geyser_plugin_utils.rs +++ b/accounts-db/src/accounts_db/geyser_plugin_utils.rs @@ -1,82 +1,9 @@ use { - crate::{ - accounts_db::{AccountStorageEntry, AccountsDb}, - accounts_update_notifier_interface::AccountsUpdateNotifierInterface, - }, - solana_account::AccountSharedData, - solana_clock::Slot, - solana_measure::meas_dur, - solana_metrics::*, - solana_pubkey::Pubkey, - solana_transaction::sanitized::SanitizedTransaction, - std::{ - cmp::Reverse, - ops::AddAssign, - time::{Duration, Instant}, - }, + crate::accounts_db::AccountsDb, solana_account::AccountSharedData, solana_clock::Slot, + solana_pubkey::Pubkey, solana_transaction::sanitized::SanitizedTransaction, }; -#[derive(Default)] -pub struct GeyserPluginNotifyAtSnapshotRestoreStats { - pub notified_accounts: usize, - pub elapsed_notifying: Duration, - pub total_pure_notify: Duration, -} - -impl GeyserPluginNotifyAtSnapshotRestoreStats { - pub fn report(&self) { - datapoint_info!( - "accountsdb_plugin_notify_account_restore_from_snapshot_summary", - ("notified_accounts", self.notified_accounts, i64), - ( - "elapsed_notifying_us", - self.elapsed_notifying.as_micros(), - i64 - ), - ( - "total_pure_notify_us", - self.total_pure_notify.as_micros(), - i64 - ), - ); - } -} - -impl AddAssign for GeyserPluginNotifyAtSnapshotRestoreStats { - fn add_assign(&mut self, other: Self) { - self.notified_accounts += other.notified_accounts; - self.elapsed_notifying += other.elapsed_notifying; - self.total_pure_notify += other.total_pure_notify; - } -} - impl AccountsDb { - /// Notify the plugins of account data when AccountsDb is restored from a snapshot. - /// - /// Since accounts may have multiple versions in different slots, plugins must handle - /// deduplication by inspected the slot and write version of each account notification. - pub fn notify_account_restore_from_snapshot(&self) { - let Some(accounts_update_notifier) = &self.accounts_update_notifier else { - return; - }; - - let mut notify_stats = GeyserPluginNotifyAtSnapshotRestoreStats::default(); - if accounts_update_notifier.snapshot_notifications_enabled() { - let mut slots = self.storage.all_slots(); - slots.sort_unstable_by_key(|&slot| Reverse(slot)); - slots - .into_iter() - .filter_map(|slot| self.storage.get_slot_storage_entry(slot)) - .map(|storage| { - Self::notify_accounts_in_storage(accounts_update_notifier.as_ref(), &storage) - }) - .for_each(|stats| notify_stats += stats); - } - - accounts_update_notifier.notify_end_of_restore_from_snapshot(); - notify_stats.report(); - } - pub fn notify_account_at_accounts_update( &self, slot: Slot, @@ -95,45 +22,17 @@ impl AccountsDb { ); } } - - fn notify_accounts_in_storage( - notifier: &dyn AccountsUpdateNotifierInterface, - storage: &AccountStorageEntry, - ) -> GeyserPluginNotifyAtSnapshotRestoreStats { - let mut pure_notify_time = Duration::ZERO; - let mut i = 0; - let notifying_start = Instant::now(); - storage - .accounts - .scan_accounts_for_geyser(|account| { - i += 1; - // later entries in the same slot are more recent and override earlier accounts for the same pubkey - // We can pass an incrementing number here for write_version in the future, if the storage does not have a write_version. - // As long as all accounts for this slot are in 1 append vec that can be iterated oldest to newest. - let (_, notify_dur) = meas_dur!(notifier.notify_account_restore_from_snapshot( - storage.slot(), - i as u64, - &account - )); - pure_notify_time += notify_dur; - }) - .expect("must scan accounts storage"); - let notifying_time = notifying_start.elapsed(); - - GeyserPluginNotifyAtSnapshotRestoreStats { - notified_accounts: i, - elapsed_notifying: notifying_time, - total_pure_notify: pure_notify_time, - } - } } #[cfg(test)] pub mod tests { use { super::*, - crate::accounts_update_notifier_interface::{ - AccountForGeyser, AccountsUpdateNotifier, AccountsUpdateNotifierInterface, + crate::{ + accounts_db::{AccountsDbConfig, MarkObsoleteAccounts, ACCOUNTS_DB_CONFIG_FOR_TESTING}, + accounts_update_notifier_interface::{ + AccountForGeyser, AccountsUpdateNotifier, AccountsUpdateNotifierInterface, + }, }, dashmap::DashMap, solana_account::ReadableAccount as _, @@ -141,6 +40,7 @@ pub mod tests { atomic::{AtomicBool, Ordering}, Arc, }, + test_case::test_case, }; impl AccountsDb { @@ -195,9 +95,18 @@ pub mod tests { } } - #[test] - fn test_notify_account_restore_from_snapshot() { - let mut accounts = AccountsDb::new_single_for_tests(); + #[test_case(MarkObsoleteAccounts::Enabled)] + #[test_case(MarkObsoleteAccounts::Disabled)] + fn test_notify_account_restore_from_snapshot(mark_obsolete_accounts: MarkObsoleteAccounts) { + let mut accounts_db = AccountsDb::new_with_config( + Vec::new(), + AccountsDbConfig { + mark_obsolete_accounts, + ..ACCOUNTS_DB_CONFIG_FOR_TESTING + }, + None, + Arc::default(), + ); let key1 = Pubkey::new_unique(); let key2 = Pubkey::new_unique(); let account = AccountSharedData::new(1, 0, &Pubkey::default()); @@ -206,31 +115,48 @@ pub mod tests { // Need to add root and flush write cache for each slot to ensure accounts are written // to correct slots. Cache flush can skip writes if accounts have already been written to // a newer slot - accounts.store_for_tests(0, &[(&key1, &account)]); - accounts.add_root_and_flush_write_cache(0); - accounts.store_for_tests(1, &[(&key1, &account)]); - accounts.add_root_and_flush_write_cache(1); + let slot0 = 0; + let storage0 = accounts_db.create_and_insert_store(slot0, /*size*/ 4_096, ""); + storage0 + .accounts + .write_accounts(&(slot0, [(&key1, &account)].as_slice()), /*skip*/ 0); + + let slot1 = 1; + let storage1 = accounts_db.create_and_insert_store(slot1, /*size*/ 4_096, ""); + storage1 + .accounts + .write_accounts(&(slot1, [(&key1, &account)].as_slice()), /*skip*/ 0); // Account with key2 is updated in a single slot, should get notified once - accounts.store_for_tests(2, &[(&key2, &account)]); - accounts.add_root_and_flush_write_cache(2); + let slot2 = 2; + let storage2 = accounts_db.create_and_insert_store(slot2, /*size*/ 4_096, ""); + storage2 + .accounts + .write_accounts(&(slot2, [(&key2, &account)].as_slice()), /*skip*/ 0); // Do the notification let notifier = GeyserTestPlugin::default(); let notifier = Arc::new(notifier); - accounts.set_geyser_plugin_notifier(Some(notifier.clone())); - accounts.notify_account_restore_from_snapshot(); + accounts_db.set_geyser_plugin_notifier(Some(notifier.clone())); + accounts_db.generate_index(None, false); // Ensure key1 was notified twice in different slots { let notified_key1 = notifier.accounts_notified.get(&key1).unwrap(); assert_eq!(notified_key1.len(), 2); - let (slot, write_version, _account) = ¬ified_key1[0]; - assert_eq!(*slot, 1); - assert_eq!(*write_version, 1); - let (slot, write_version, _account) = ¬ified_key1[1]; - assert_eq!(*slot, 0); - assert_eq!(*write_version, 1); + + // Since index generation goes through storages in parallel, there's not a + // deterministic order for which slots will notify first. + // So, we sort the accounts_notified values to ensure we can assert correctly. + let mut notified_key1_values = notified_key1.value().clone(); + notified_key1_values.sort_unstable_by_key(|k| k.0); + + let (slot, write_version, _account) = ¬ified_key1_values[0]; + assert_eq!(*slot, slot0); + assert_eq!(*write_version, 0); + let (slot, write_version, _account) = ¬ified_key1_values[1]; + assert_eq!(*slot, slot1); + assert_eq!(*write_version, 0); } // Ensure key2 was notified once @@ -238,8 +164,8 @@ pub mod tests { let notified_key2 = notifier.accounts_notified.get(&key2).unwrap(); assert_eq!(notified_key2.len(), 1); let (slot, write_version, _account) = ¬ified_key2[0]; - assert_eq!(*slot, 2); - assert_eq!(*write_version, 1); + assert_eq!(*slot, slot2); + assert_eq!(*write_version, 0); } // Ensure we were notified that startup is done @@ -263,24 +189,24 @@ pub mod tests { let account1 = AccountSharedData::new(account1_lamports1, 1, AccountSharedData::default().owner()); let slot0 = 0; - accounts.store_cached((slot0, &[(&key1, &account1)][..])); + accounts.store_for_tests((slot0, &[(&key1, &account1)][..])); let key2 = solana_pubkey::new_rand(); let account2_lamports: u64 = 200; let account2 = AccountSharedData::new(account2_lamports, 1, AccountSharedData::default().owner()); - accounts.store_cached((slot0, &[(&key2, &account2)][..])); + accounts.store_for_tests((slot0, &[(&key2, &account2)][..])); let account1_lamports2 = 2; let slot1 = 1; let account1 = AccountSharedData::new(account1_lamports2, 1, account1.owner()); - accounts.store_cached((slot1, &[(&key1, &account1)][..])); + accounts.store_for_tests((slot1, &[(&key1, &account1)][..])); let key3 = solana_pubkey::new_rand(); let account3_lamports: u64 = 300; let account3 = AccountSharedData::new(account3_lamports, 1, AccountSharedData::default().owner()); - accounts.store_cached((slot1, &[(&key3, &account3)][..])); + accounts.store_for_tests((slot1, &[(&key3, &account3)][..])); assert_eq!(notifier.accounts_notified.get(&key1).unwrap().len(), 2); assert_eq!( diff --git a/accounts-db/src/accounts_db/stats.rs b/accounts-db/src/accounts_db/stats.rs index 6a5f5409696e59..a59c1497a309ea 100644 --- a/accounts-db/src/accounts_db/stats.rs +++ b/accounts-db/src/accounts_db/stats.rs @@ -10,12 +10,7 @@ use { #[derive(Debug, Default)] pub struct AccountsStats { - pub delta_hash_scan_time_total_us: AtomicU64, - pub delta_hash_accumulate_time_total_us: AtomicU64, - pub delta_hash_num: AtomicU64, - pub last_store_report: AtomicInterval, - pub store_hash_accounts: AtomicU64, pub store_accounts: AtomicU64, pub store_update_index: AtomicU64, pub store_handle_reclaims: AtomicU64, @@ -23,13 +18,16 @@ pub struct AccountsStats { pub stakes_cache_check_and_store_us: AtomicU64, pub store_num_accounts: AtomicU64, pub store_total_data: AtomicU64, + pub num_reclaims: AtomicU64, pub create_store_count: AtomicU64, - pub store_get_slot_store: AtomicU64, - pub store_find_existing: AtomicU64, pub dropped_stores: AtomicU64, pub handle_dead_keys_us: AtomicU64, pub purge_exact_us: AtomicU64, pub purge_exact_count: AtomicU64, + pub num_obsolete_slots_removed: AtomicUsize, + pub num_obsolete_bytes_removed: AtomicU64, + pub add_zero_lamport_accounts_us: AtomicU64, + pub num_zero_lamport_accounts_added: AtomicU64, } #[derive(Debug, Default)] diff --git a/accounts-db/src/accounts_db/tests.rs b/accounts-db/src/accounts_db/tests.rs index f2533e62c29926..abb79aa3b9cf2c 100644 --- a/accounts-db/src/accounts_db/tests.rs +++ b/accounts-db/src/accounts_db/tests.rs @@ -2,10 +2,8 @@ use { super::*, crate::{ - account_info::StoredSize, accounts_file::AccountsFileProvider, accounts_index::{tests::*, AccountSecondaryIndexesIncludeExclude}, - ancient_append_vecs, append_vec::{ aligned_stored_size, test_utils::TempFile, AccountMeta, AppendVec, StoredAccountMeta, StoredMeta, @@ -27,7 +25,7 @@ use { sync::{atomic::AtomicBool, RwLock}, thread::{self, Builder, JoinHandle}, }, - test_case::test_case, + test_case::{test_case, test_matrix}, }; fn linear_ancestors(end_slot: u64) -> Ancestors { @@ -118,28 +116,36 @@ impl AccountStorageEntry { /// For test that should panic, use the following syntax. /// define_accounts_db_test!(TEST_NAME, panic = "PANIC_MSG", |accounts_db| { TEST_BODY }); macro_rules! define_accounts_db_test { - (@testfn $name:ident, $accounts_file_provider: ident, |$accounts_db:ident| $inner: tt) => { - fn run_test($accounts_db: AccountsDb) { - $inner - } - let accounts_db = - AccountsDb::new_single_for_tests_with_provider($accounts_file_provider); - run_test(accounts_db); - + (@testfn $name:ident, $accounts_file_provider: ident, $mark_obsolete_accounts: ident, |$accounts_db:ident| $inner: tt) => { + fn run_test($accounts_db: AccountsDb) { + $inner + } + let accounts_db = AccountsDb::new_single_for_tests_with_provider_and_config( + $accounts_file_provider, + AccountsDbConfig { + mark_obsolete_accounts: $mark_obsolete_accounts, + ..ACCOUNTS_DB_CONFIG_FOR_TESTING + }, + ); + run_test(accounts_db); }; ($name:ident, |$accounts_db:ident| $inner: tt) => { - #[test_case(AccountsFileProvider::AppendVec; "append_vec")] - #[test_case(AccountsFileProvider::HotStorage; "hot_storage")] - fn $name(accounts_file_provider: AccountsFileProvider) { - define_accounts_db_test!(@testfn $name, accounts_file_provider, |$accounts_db| $inner); + #[test_matrix( + [AccountsFileProvider::AppendVec, AccountsFileProvider::HotStorage], + [MarkObsoleteAccounts::Enabled, MarkObsoleteAccounts::Disabled] + )] + fn $name(accounts_file_provider: AccountsFileProvider, mark_obsolete_accounts: MarkObsoleteAccounts) { + define_accounts_db_test!(@testfn $name, accounts_file_provider, mark_obsolete_accounts, |$accounts_db| $inner); } }; ($name:ident, panic = $panic_message:literal, |$accounts_db:ident| $inner: tt) => { - #[test_case(AccountsFileProvider::AppendVec; "append_vec")] - #[test_case(AccountsFileProvider::HotStorage; "hot_storage")] + #[test_matrix( + [AccountsFileProvider::AppendVec, AccountsFileProvider::HotStorage], + [MarkObsoleteAccounts::Enabled, MarkObsoleteAccounts::Disabled] + )] #[should_panic(expected = $panic_message)] - fn $name(accounts_file_provider: AccountsFileProvider) { - define_accounts_db_test!(@testfn $name, accounts_file_provider, |$accounts_db| $inner); + fn $name(accounts_file_provider: AccountsFileProvider, mark_obsolete_accounts: MarkObsoleteAccounts) { + define_accounts_db_test!(@testfn $name, accounts_file_provider, mark_obsolete_accounts, |$accounts_db| $inner); } }; } @@ -175,7 +181,16 @@ fn run_generate_index_duplicates_within_slot_test(db: AccountsDb, reverse: bool) append_vec.accounts.write_accounts(&storable_accounts, 0); assert!(!db.accounts_index.contains(&pubkey)); - db.generate_index(None, false); + let storage_info = StorageSizeAndCountMap::default(); + let storage = db.get_storage_for_slot(slot0).unwrap(); + let mut reader = append_vec::new_scan_accounts_reader(); + db.generate_index_for_slot( + &mut reader, + &storage, + storage.slot(), + storage.id(), + &storage_info, + ); } define_accounts_db_test!( @@ -358,7 +373,7 @@ pub(crate) fn append_single_account_with_default_hash( account, &AccountSecondaryIndexes::default(), account_info, - &mut Vec::default(), + &mut ReclaimsSlotList::new(), UpsertReclaim::IgnoreReclaims, ); } @@ -386,6 +401,7 @@ fn sample_storage_with_entries_id_fill_percentage( mark_alive: bool, account_data_size: Option, fill_percentage: u64, + storage_access: StorageAccess, ) -> Arc { let (_temp_dirs, paths) = get_temp_accounts_paths(1).unwrap(); let file_size = account_data_size.unwrap_or(123) * 100 / fill_percentage; @@ -396,11 +412,13 @@ fn sample_storage_with_entries_id_fill_percentage( id, size_aligned as u64, AccountsFileProvider::AppendVec, + storage_access, ); let av = AccountsFile::AppendVec(AppendVec::new( &tf.path, true, (1024 * 1024).max(size_aligned), + storage_access, )); data.accounts = av; @@ -416,6 +434,7 @@ fn sample_storage_with_entries_id( id: AccountsFileId, mark_alive: bool, account_data_size: Option, + storage_access: StorageAccess, ) -> Arc { sample_storage_with_entries_id_fill_percentage( tf, @@ -425,6 +444,7 @@ fn sample_storage_with_entries_id( mark_alive, account_data_size, 100, + storage_access, ) } @@ -432,7 +452,7 @@ define_accounts_db_test!(test_accountsdb_add_root, |db| { let key = Pubkey::default(); let account0 = AccountSharedData::new(1, 0, &key); - db.store_for_tests(0, &[(&key, &account0)]); + db.store_for_tests((0, [(&key, &account0)].as_slice())); db.add_root(0); let ancestors = vec![(1, 1)].into_iter().collect(); assert_eq!( @@ -445,10 +465,10 @@ define_accounts_db_test!(test_accountsdb_latest_ancestor, |db| { let key = Pubkey::default(); let account0 = AccountSharedData::new(1, 0, &key); - db.store_for_tests(0, &[(&key, &account0)]); + db.store_for_tests((0, [(&key, &account0)].as_slice())); let account1 = AccountSharedData::new(0, 0, &key); - db.store_for_tests(1, &[(&key, &account1)]); + db.store_for_tests((1, [(&key, &account1)].as_slice())); let ancestors = vec![(1, 1)].into_iter().collect(); assert_eq!( @@ -463,14 +483,17 @@ define_accounts_db_test!(test_accountsdb_latest_ancestor, |db| { ); let mut accounts = Vec::new(); - db.unchecked_scan_accounts( - "", + db.scan_accounts( &ancestors, - |_, account, _| { - accounts.push(account.take_account()); + 0, + |scan_result| { + if let Some((_, account, _)) = scan_result { + accounts.push(account); + } }, &ScanConfig::default(), - ); + ) + .expect("should scan accounts"); assert_eq!(accounts, vec![account1]); }); @@ -478,10 +501,10 @@ define_accounts_db_test!(test_accountsdb_latest_ancestor_with_root, |db| { let key = Pubkey::default(); let account0 = AccountSharedData::new(1, 0, &key); - db.store_for_tests(0, &[(&key, &account0)]); + db.store_for_tests((0, [(&key, &account0)].as_slice())); let account1 = AccountSharedData::new(0, 0, &key); - db.store_for_tests(1, &[(&key, &account1)]); + db.store_for_tests((1, [(&key, &account1)].as_slice())); db.add_root(0); let ancestors = vec![(1, 1)].into_iter().collect(); @@ -502,7 +525,7 @@ define_accounts_db_test!(test_accountsdb_root_one_slot, |db| { let account0 = AccountSharedData::new(1, 0, &key); // store value 1 in the "root", i.e. db zero - db.store_for_tests(0, &[(&key, &account0)]); + db.store_for_tests((0, [(&key, &account0)].as_slice())); // now we have: // @@ -515,7 +538,7 @@ define_accounts_db_test!(test_accountsdb_root_one_slot, |db| { // store value 0 in one child let account1 = AccountSharedData::new(0, 0, &key); - db.store_for_tests(1, &[(&key, &account1)]); + db.store_for_tests((1, [(&key, &account1)].as_slice())); // masking accounts is done at the Accounts level, at accountsDB we see // original account (but could also accept "None", which is implemented @@ -593,14 +616,21 @@ define_accounts_db_test!(test_accountsdb_count_stores, |db| { let pubkey = solana_pubkey::new_rand(); let account = AccountSharedData::new(1, DEFAULT_FILE_SIZE as usize / 3, &pubkey); - db.store_for_tests(1, &[(&pubkey, &account)]); - db.store_for_tests(1, &[(&pubkeys[0], &account)]); + db.store_for_tests((1, [(&pubkey, &account)].as_slice())); + db.store_for_tests((1, [(&pubkeys[0], &account)].as_slice())); // adding root doesn't change anything db.add_root_and_flush_write_cache(1); { let slot_0_store = &db.storage.get_slot_storage_entry(0).unwrap(); let slot_1_store = &db.storage.get_slot_storage_entry(1).unwrap(); - assert_eq!(slot_0_store.count(), 2); + + // With obsolete accounts enabled, flush_write_cache will clean pubkeys in slot0 + // when flushing slot1 + if db.mark_obsolete_accounts == MarkObsoleteAccounts::Enabled { + assert_eq!(slot_0_store.count(), 1); + } else { + assert_eq!(slot_0_store.count(), 2); + } assert_eq!(slot_1_store.count(), 2); assert_eq!(slot_0_store.accounts_count(), 2); assert_eq!(slot_1_store.accounts_count(), 2); @@ -609,7 +639,7 @@ define_accounts_db_test!(test_accountsdb_count_stores, |db| { // overwrite old rooted account version; only the r_slot_0_stores.count() should be // decremented // slot 2 is not a root and should be ignored by clean - db.store_for_tests(2, &[(&pubkeys[0], &account)]); + db.store_for_tests((2, [(&pubkeys[0], &account)].as_slice())); db.clean_accounts_for_tests(); { let slot_0_store = &db.storage.get_slot_storage_entry(0).unwrap(); @@ -626,11 +656,11 @@ define_accounts_db_test!(test_accounts_unsquashed, |db0| { // 1 token in the "root", i.e. db zero let account0 = AccountSharedData::new(1, 0, &key); - db0.store_for_tests(0, &[(&key, &account0)]); + db0.store_for_tests((0, [(&key, &account0)].as_slice())); // 0 lamports in the child let account1 = AccountSharedData::new(0, 0, &key); - db0.store_for_tests(1, &[(&key, &account1)]); + db0.store_for_tests((1, [(&key, &account1)].as_slice())); // masking accounts is done at the Accounts level, at accountsDB we see // original account @@ -646,6 +676,81 @@ define_accounts_db_test!(test_accounts_unsquashed, |db0| { ); }); +/// Test to verify that reclaiming old storages during flush works correctly. +/// Creates multiple storages with accounts, flushes them, and then creates a new storage +/// that invalidates some of the old accounts. The test checks that one of the old storages +/// is reclaimed as the storage is fully invalidated +#[test] +fn test_flush_slots_with_reclaim_old_slots() { + let accounts = AccountsDb::new_single_for_tests(); + let mut pubkeys = vec![]; + + // Create and flush 5 slots with 5 accounts each + for slot in 0..5 { + let mut slot_pubkeys = vec![]; + for _ in 0..5 { + let pubkey = solana_pubkey::new_rand(); + let account = AccountSharedData::new(slot + 1, 0, &pubkey); + accounts.store_for_tests((slot, [(&pubkey, &account)].as_slice())); + slot_pubkeys.push(pubkey); + } + pubkeys.push(slot_pubkeys); + accounts.add_root_and_flush_write_cache(slot); + } + + // Create another slot which invalidates 5 accounts from the first slot, + // 4 accounts from the second slot, etc. + let new_slot = 5; + for (slot, slot_pubkeys) in pubkeys.iter().enumerate() { + for pubkey in slot_pubkeys.iter().take(5 - slot) { + let account = AccountSharedData::new(new_slot + 1, 0, pubkey); + accounts.store_for_tests((new_slot, [(pubkey, &account)].as_slice())); + } + } + + // Get the accounts from the write cache slot + let accounts_list: Vec<(_, _)> = accounts + .accounts_cache + .slot_cache(new_slot) + .unwrap() + .iter() + .map(|iter_item| { + let pubkey = *iter_item.key(); + let account = iter_item.value().account.clone(); + (pubkey, account) + }) + .collect(); + + let storage = accounts.create_and_insert_store(new_slot, 4096, "test_flush_slots"); + + accounts.accounts_index.add_root(new_slot); + + // Flushing this storage directly using _store_accounts_frozen. This is done to pass in UpsertReclaim::ReclaimOldSlots + accounts._store_accounts_frozen( + (new_slot, &accounts_list[..]), + &storage, + UpsertReclaim::ReclaimOldSlots, + UpdateIndexThreadSelection::Inline, + ); + + // Remove the flushed slot from the cache + assert!(accounts.accounts_cache.remove_slot(new_slot).is_some()); + + // Verify that the storage for the first slot has been removed + assert!(accounts.storage.get_slot_storage_entry(0).is_none()); + for slot in 1..5 { + assert!(accounts.storage.get_slot_storage_entry(slot).is_some()); + + // Verify that the obsolete accounts for the remaining slots are correct + let storage = accounts.storage.get_slot_storage_entry(slot).unwrap(); + assert_eq!( + storage.get_obsolete_accounts(Some(new_slot)).len() as u64, + 5 - slot + ); + } + assert!(accounts.storage.get_slot_storage_entry(new_slot).is_some()); +} + fn run_test_remove_unrooted_slot(is_cached: bool, db: AccountsDb) { let unrooted_slot = 9; let unrooted_bank_id = 9; @@ -654,10 +759,19 @@ fn run_test_remove_unrooted_slot(is_cached: bool, db: AccountsDb) { let ancestors = vec![(unrooted_slot, 1)].into_iter().collect(); assert!(!db.accounts_index.contains(&key)); if is_cached { - db.store_cached((unrooted_slot, &[(&key, &account0)][..])); + db.store_for_tests((unrooted_slot, &[(&key, &account0)][..])); + assert!(db.accounts_cache.contains(unrooted_slot)); } else { - db.store_for_tests(unrooted_slot, &[(&key, &account0)]); + let file_size = 4096; // value doesn't need to be exact, just big enough to hold account0 + let storage = db.create_and_insert_store(unrooted_slot, file_size, ""); + db.store_accounts_frozen( + (unrooted_slot, [(&key, &account0)].as_slice()), + &storage, + UpdateIndexThreadSelection::Inline, + ); + assert!(db.storage.get_slot_storage_entry(unrooted_slot).is_some()); } + assert!(!db.accounts_index.is_alive_root(unrooted_slot)); assert!(db.accounts_index.contains(&key)); db.assert_load_account(unrooted_slot, key, 1); @@ -670,7 +784,7 @@ fn run_test_remove_unrooted_slot(is_cached: bool, db: AccountsDb) { // Test we can store for the same slot again and get the right information let account0 = AccountSharedData::new(2, 0, &key); - db.store_for_tests(unrooted_slot, &[(&key, &account0)]); + db.store_for_tests((unrooted_slot, [(&key, &account0)].as_slice())); db.assert_load_account(unrooted_slot, key, 2); } @@ -689,7 +803,7 @@ fn update_accounts(accounts: &AccountsDb, pubkeys: &[Pubkey], slot: Slot, range: if let Some((mut account, _)) = accounts.load_without_fixed_root(&ancestors, &pubkeys[idx]) { account.checked_add_lamports(1).unwrap(); - accounts.store_for_tests(slot, &[(&pubkeys[idx], &account)]); + accounts.store_for_tests((slot, [(&pubkeys[idx], &account)].as_slice())); if account.is_zero_lamport() { let ancestors = vec![(slot, 0)].into_iter().collect(); assert!(accounts @@ -752,7 +866,7 @@ fn test_account_grow_many() { for i in 0..9 { let key = solana_pubkey::new_rand(); let account = AccountSharedData::new(i + 1, size as usize / 4, &key); - accounts.store_for_tests(0, &[(&key, &account)]); + accounts.store_for_tests((0, [(&key, &account)].as_slice())); keys.push(key); } let ancestors = vec![(0, 0)].into_iter().collect(); @@ -785,28 +899,25 @@ fn test_account_grow() { for pass in 0..27 { let accounts = AccountsDb::new_single_for_tests(); - let status = [AccountStorageStatus::Available, AccountStorageStatus::Full]; let pubkey1 = solana_pubkey::new_rand(); let account1 = AccountSharedData::new(1, DEFAULT_FILE_SIZE as usize / 2, &pubkey1); - accounts.store_for_tests(0, &[(&pubkey1, &account1)]); + accounts.store_for_tests((0, [(&pubkey1, &account1)].as_slice())); if pass == 0 { accounts.add_root_and_flush_write_cache(0); let store = &accounts.storage.get_slot_storage_entry(0).unwrap(); assert_eq!(store.count(), 1); - assert_eq!(store.status(), AccountStorageStatus::Available); continue; } let pubkey2 = solana_pubkey::new_rand(); let account2 = AccountSharedData::new(1, DEFAULT_FILE_SIZE as usize / 2, &pubkey2); - accounts.store_for_tests(0, &[(&pubkey2, &account2)]); + accounts.store_for_tests((0, [(&pubkey2, &account2)].as_slice())); if pass == 1 { accounts.add_root_and_flush_write_cache(0); assert_eq!(accounts.storage.len(), 1); let store = &accounts.storage.get_slot_storage_entry(0).unwrap(); assert_eq!(store.count(), 2); - assert_eq!(store.status(), AccountStorageStatus::Available); continue; } let ancestors = vec![(0, 0)].into_iter().collect(); @@ -827,13 +938,11 @@ fn test_account_grow() { // lots of writes, but they are all duplicates for i in 0..25 { - accounts.store_for_tests(0, &[(&pubkey1, &account1)]); + accounts.store_for_tests((0, [(&pubkey1, &account1)].as_slice())); let flush = pass == i + 2; if flush { accounts.add_root_and_flush_write_cache(0); assert_eq!(accounts.storage.len(), 1); - let store = &accounts.storage.get_slot_storage_entry(0).unwrap(); - assert_eq!(store.status(), status[0]); } let ancestors = vec![(0, 0)].into_iter().collect(); assert_eq!( @@ -860,14 +969,23 @@ fn test_account_grow() { #[test] fn test_lazy_gc_slot() { solana_logger::setup(); - //This test is pedantic - //A slot is purged when a non root bank is cleaned up. If a slot is behind root but it is - //not root, it means we are retaining dead banks. - let accounts = AccountsDb::new_single_for_tests(); + + // Only run this test with mark obsolete accounts disabled as garbage collection + // is not lazy with mark obsolete accounts enabled + let accounts = AccountsDb::new_with_config( + Vec::new(), + AccountsDbConfig { + mark_obsolete_accounts: MarkObsoleteAccounts::Disabled, + ..ACCOUNTS_DB_CONFIG_FOR_TESTING + }, + None, + Arc::default(), + ); + let pubkey = solana_pubkey::new_rand(); let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner()); //store an account - accounts.store_for_tests(0, &[(&pubkey, &account)]); + accounts.store_for_tests((0, [(&pubkey, &account)].as_slice())); accounts.add_root_and_flush_write_cache(0); let ancestors = vec![(0, 0)].into_iter().collect(); @@ -886,7 +1004,7 @@ fn test_lazy_gc_slot() { assert_eq!(accounts.storage.get_slot_storage_entry(0).unwrap().id(), id); //store causes clean - accounts.store_for_tests(1, &[(&pubkey, &account)]); + accounts.store_for_tests((1, [(&pubkey, &account)].as_slice())); //slot is gone accounts.print_accounts_stats("pre-clean"); @@ -914,8 +1032,8 @@ fn test_clean_zero_lamport_and_dead_slot() { let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner()); // Store two accounts - accounts.store_for_tests(0, &[(&pubkey1, &account)]); - accounts.store_for_tests(0, &[(&pubkey2, &account)]); + accounts.store_for_tests((0, [(&pubkey1, &account)].as_slice())); + accounts.store_for_tests((0, [(&pubkey2, &account)].as_slice())); // Make sure both accounts are in the same AppendVec in slot 0, which // will prevent pubkey1 from being cleaned up later even when it's a @@ -950,10 +1068,10 @@ fn test_clean_zero_lamport_and_dead_slot() { ); // Update account 1 in slot 1 - accounts.store_for_tests(1, &[(&pubkey1, &account)]); + accounts.store_for_tests((1, [(&pubkey1, &account)].as_slice())); // Update account 1 as zero lamports account - accounts.store_for_tests(2, &[(&pubkey1, &zero_lamport_account)]); + accounts.store_for_tests((2, [(&pubkey1, &zero_lamport_account)].as_slice())); // Pubkey 1 was the only account in slot 1, and it was updated in slot 2, so // slot 1 should be purged @@ -983,49 +1101,58 @@ fn test_clean_dead_slot_with_obsolete_accounts() { // Obsolete accounts are already unreffed so they should not be unreffed again - let accounts = AccountsDb::new_single_for_tests(); + let accounts = AccountsDb::new_with_config( + Vec::new(), + AccountsDbConfig { + mark_obsolete_accounts: MarkObsoleteAccounts::Enabled, + ..ACCOUNTS_DB_CONFIG_FOR_TESTING + }, + None, + Arc::default(), + ); + let pubkey = solana_pubkey::new_rand(); + let pubkey2 = solana_pubkey::new_rand(); let account = AccountSharedData::new(1, 1, AccountSharedData::default().owner()); + let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner()); + accounts.set_latest_full_snapshot_slot(2); - // Store account 1 in slot 0 - accounts.store_for_tests(0, &[(&pubkey, &account)]); + // Store pubkey1 and pubkey2 in slot 0 + accounts.store_for_tests((0, [(&pubkey, &account), (&pubkey2, &account)].as_slice())); - // Update account 1 as in slot 1 - accounts.store_for_tests(1, &[(&pubkey, &account)]); + // Update pubkey1 and make pubkey2 a zero lamport account in slot 1 + accounts.store_for_tests(( + 1, + [(&pubkey, &account), (&pubkey2, &zero_lamport_account)].as_slice(), + )); - // Update account 1 as in slot 2 - accounts.store_for_tests(2, &[(&pubkey, &account)]); + // Update pubkey1 as in slot 2 + accounts.store_for_tests((2, [(&pubkey, &account)].as_slice())); // Flush the slots individually to avoid reclaims accounts.add_root_and_flush_write_cache(0); accounts.add_root_and_flush_write_cache(1); accounts.add_root_and_flush_write_cache(2); - // Pubkey1 should be in 3 slots, 0 and 1 and 2 - assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey), 3); + // Slot 1 should not be removed as it has the zero lamport account + assert!(accounts.storage.get_slot_storage_entry(1).is_some()); + let slot = accounts.storage.get_slot_storage_entry(1).unwrap(); - // Mark pubkey in slot 1 as obsolete, simulating obsolete accounts being enabled - let old_storage = accounts - .storage - .get_slot_storage_entry_shrinking_in_progress_ok(1) - .unwrap(); - old_storage.mark_accounts_obsolete(vec![(0, 1)].into_iter(), 2); + // Ensure that slot1 also still contains the obsolete account + assert_eq!(slot.get_obsolete_accounts(None).len(), 1); - // Unreference pubkey, which would occur during the normal mark_accounts_obsolete flow - accounts.unref_pubkeys([pubkey].iter(), 1, &HashSet::new()); + // Ref count for pubkey1 should be 1 as obsolete accounts are enabled + accounts.assert_ref_count(&pubkey, 1); - // Pubkey1 should now have two references: Slot0 and Slot2. - assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey), 2); - - // Clean, remove slot0/1. + // Clean, which will remove slot1 accounts.clean_accounts_for_tests(); + assert!(accounts.storage.get_slot_storage_entry(0).is_none()); assert!(accounts.storage.get_slot_storage_entry(1).is_none()); - // Ref count for pubkey should be 1. It was decremented for slot1 and above, and decremented - // for slot0 during clean_accounts_for_tests - // It was NOT decremented for slot1 during clean_accounts_for_test as it was marked obsolete - assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey), 1); + // Ref count for pubkey should be 1. It was NOT decremented during clean_accounts_for_tests + // despite slot 1 being removed, because the account was already obsolete + accounts.assert_ref_count(&pubkey, 1); } #[test] @@ -1038,11 +1165,17 @@ fn test_remove_zero_lamport_multi_ref_accounts_panic() { let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner()); let slot = 1; - accounts.store_for_tests(slot, &[(&pubkey_zero, &one_lamport_account)]); - accounts.add_root_and_flush_write_cache(slot); + accounts.store_for_tests((slot, [(&pubkey_zero, &one_lamport_account)].as_slice())); + + // Flush without cleaning to avoid reclaiming pubkey_zero early + accounts.add_root(1); + accounts.flush_rooted_accounts_cache(Some(slot), false); + + accounts.store_for_tests((slot + 1, [(&pubkey_zero, &zero_lamport_account)].as_slice())); - accounts.store_for_tests(slot + 1, &[(&pubkey_zero, &zero_lamport_account)]); - accounts.add_root_and_flush_write_cache(slot + 1); + // Flush without cleaning to avoid reclaiming pubkey_zero early + accounts.add_root(2); + accounts.flush_rooted_accounts_cache(Some(slot + 1), false); // This should panic because there are 2 refs for pubkey_zero. accounts.remove_zero_lamport_single_ref_accounts_after_shrink( @@ -1064,20 +1197,25 @@ fn test_remove_zero_lamport_single_ref_accounts_after_shrink() { AccountSharedData::new(0, 0, AccountSharedData::default().owner()); let slot = 1; - accounts.store_for_tests( + accounts.store_for_tests(( slot, - &[(&pubkey_zero, &zero_lamport_account), (&pubkey2, &account)], - ); + [(&pubkey_zero, &zero_lamport_account), (&pubkey2, &account)].as_slice(), + )); // Simulate rooting the zero-lamport account, writes it to storage accounts.add_root_and_flush_write_cache(slot); if pass > 0 { // store in write cache - accounts.store_for_tests(slot + 1, &[(&pubkey_zero, &zero_lamport_account)]); + accounts + .store_for_tests((slot + 1, [(&pubkey_zero, &zero_lamport_account)].as_slice())); if pass == 2 { - // move to a storage (causing ref count to increase) - accounts.add_root_and_flush_write_cache(slot + 1); + // This test pass is still relevant with obsolete accounts enabled, but can be + // removed if all scenarios where flush_write_cache doesn't clean are eliminated. + + // add root and flush without clean (causing ref count to increase) + accounts.add_root(slot + 1); + accounts.flush_rooted_accounts_cache(None, false); } } @@ -1182,10 +1320,10 @@ fn test_shrink_zero_lamport_single_ref_account() { AccountSharedData::new(0, 0, AccountSharedData::default().owner()); let slot = 1; // Store a zero-lamport account and a non-zero lamport account - accounts.store_for_tests( + accounts.store_for_tests(( slot, - &[(&pubkey_zero, &zero_lamport_account), (&pubkey2, &account)], - ); + [(&pubkey_zero, &zero_lamport_account), (&pubkey2, &account)].as_slice(), + )); // Simulate rooting the zero-lamport account, should be a // candidate for cleaning @@ -1260,11 +1398,15 @@ fn test_clean_multiple_zero_lamport_decrements_index_ref_count() { let pubkey2 = solana_pubkey::new_rand(); let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner()); + // If there is no latest full snapshot, zero lamport accounts can be cleaned and removed + // immediately. Set latest full snapshot slot to zero to avoid cleaning zero lamport accounts + accounts.set_latest_full_snapshot_slot(0); + // Store 2 accounts in slot 0, then update account 1 in two more slots - accounts.store_for_tests(0, &[(&pubkey1, &zero_lamport_account)]); - accounts.store_for_tests(0, &[(&pubkey2, &zero_lamport_account)]); - accounts.store_for_tests(1, &[(&pubkey1, &zero_lamport_account)]); - accounts.store_for_tests(2, &[(&pubkey1, &zero_lamport_account)]); + accounts.store_for_tests((0, [(&pubkey1, &zero_lamport_account)].as_slice())); + accounts.store_for_tests((0, [(&pubkey2, &zero_lamport_account)].as_slice())); + accounts.store_for_tests((1, [(&pubkey1, &zero_lamport_account)].as_slice())); + accounts.store_for_tests((2, [(&pubkey1, &zero_lamport_account)].as_slice())); // Root all slots accounts.add_root_and_flush_write_cache(0); accounts.add_root_and_flush_write_cache(1); @@ -1272,8 +1414,8 @@ fn test_clean_multiple_zero_lamport_decrements_index_ref_count() { // Account ref counts should match how many slots they were stored in // Account 1 = 3 slots; account 2 = 1 slot - assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey1), 3); - assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey2), 1); + accounts.assert_ref_count(&pubkey1, 3); + accounts.assert_ref_count(&pubkey2, 1); accounts.clean_accounts_for_tests(); // Slots 0 and 1 should each have been cleaned because all of their @@ -1286,13 +1428,15 @@ fn test_clean_multiple_zero_lamport_decrements_index_ref_count() { // Index ref counts should be consistent with the slot stores. Account 1 ref count // should be 1 since slot 2 is the only alive slot; account 2 should have a ref // count of 0 due to slot 0 being dead - assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey1), 1); - assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey2), 0); + accounts.assert_ref_count(&pubkey1, 1); + accounts.assert_ref_count(&pubkey2, 0); + // Allow clean to clean any zero lamports up to and including slot 2 + accounts.set_latest_full_snapshot_slot(2); accounts.clean_accounts_for_tests(); // Slot 2 will now be cleaned, which will leave account 1 with a ref count of 0 assert!(accounts.storage.get_slot_storage_entry(2).is_none()); - assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey1), 0); + accounts.assert_ref_count(&pubkey1, 0); } #[test] @@ -1305,8 +1449,8 @@ fn test_clean_zero_lamport_and_old_roots() { let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner()); // Store a zero-lamport account - accounts.store_for_tests(0, &[(&pubkey, &account)]); - accounts.store_for_tests(1, &[(&pubkey, &zero_lamport_account)]); + accounts.store_for_tests((0, [(&pubkey, &account)].as_slice())); + accounts.store_for_tests((1, [(&pubkey, &zero_lamport_account)].as_slice())); // Simulate rooting the zero-lamport account, should be a // candidate for cleaning @@ -1334,71 +1478,106 @@ fn test_clean_zero_lamport_and_old_roots() { assert!(!accounts.accounts_index.contains_with(&pubkey, None, None)); } -#[test] -fn test_clean_old_with_normal_account() { +#[test_case(MarkObsoleteAccounts::Enabled)] +#[test_case(MarkObsoleteAccounts::Disabled)] +fn test_clean_old_with_normal_account(mark_obsolete_accounts: MarkObsoleteAccounts) { solana_logger::setup(); - let accounts = AccountsDb::new_single_for_tests(); + let accounts = AccountsDb::new_with_config( + Vec::new(), + AccountsDbConfig { + mark_obsolete_accounts, + ..ACCOUNTS_DB_CONFIG_FOR_TESTING + }, + None, + Arc::default(), + ); + let pubkey = solana_pubkey::new_rand(); let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner()); //store an account - accounts.store_for_tests(0, &[(&pubkey, &account)]); - accounts.store_for_tests(1, &[(&pubkey, &account)]); + accounts.store_for_tests((0, [(&pubkey, &account)].as_slice())); + accounts.store_for_tests((1, [(&pubkey, &account)].as_slice())); // simulate slots are rooted after while accounts.add_root_and_flush_write_cache(0); accounts.add_root_and_flush_write_cache(1); - //even if rooted, old state isn't cleaned up - assert_eq!(accounts.alive_account_count_in_slot(0), 1); assert_eq!(accounts.alive_account_count_in_slot(1), 1); - accounts.clean_accounts_for_tests(); + // With obsolete accounts enabled, slot 0 is cleaned during flush + if mark_obsolete_accounts == MarkObsoleteAccounts::Disabled { + assert_eq!(accounts.alive_account_count_in_slot(0), 1); + accounts.clean_accounts_for_tests(); + } //now old state is cleaned up assert_eq!(accounts.alive_account_count_in_slot(0), 0); assert_eq!(accounts.alive_account_count_in_slot(1), 1); } -#[test] -fn test_clean_old_with_zero_lamport_account() { +#[test_case(MarkObsoleteAccounts::Enabled)] +#[test_case(MarkObsoleteAccounts::Disabled)] +fn test_clean_old_with_zero_lamport_account(mark_obsolete_accounts: MarkObsoleteAccounts) { solana_logger::setup(); - let accounts = AccountsDb::new_single_for_tests(); + let accounts = AccountsDb::new_with_config( + Vec::new(), + AccountsDbConfig { + mark_obsolete_accounts, + ..ACCOUNTS_DB_CONFIG_FOR_TESTING + }, + None, + Arc::default(), + ); let pubkey1 = solana_pubkey::new_rand(); let pubkey2 = solana_pubkey::new_rand(); let normal_account = AccountSharedData::new(1, 0, AccountSharedData::default().owner()); let zero_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner()); //store an account - accounts.store_for_tests(0, &[(&pubkey1, &normal_account)]); - accounts.store_for_tests(1, &[(&pubkey1, &zero_account)]); - accounts.store_for_tests(0, &[(&pubkey2, &normal_account)]); - accounts.store_for_tests(1, &[(&pubkey2, &normal_account)]); + accounts.store_for_tests((0, [(&pubkey1, &normal_account)].as_slice())); + accounts.store_for_tests((1, [(&pubkey1, &zero_account)].as_slice())); + accounts.store_for_tests((0, [(&pubkey2, &normal_account)].as_slice())); + accounts.store_for_tests((1, [(&pubkey2, &normal_account)].as_slice())); //simulate slots are rooted after while accounts.add_root_and_flush_write_cache(0); accounts.add_root_and_flush_write_cache(1); - //even if rooted, old state isn't cleaned up - assert_eq!(accounts.alive_account_count_in_slot(0), 2); assert_eq!(accounts.alive_account_count_in_slot(1), 2); accounts.print_accounts_stats(""); - accounts.clean_accounts_for_tests(); + // With obsolete accounts enabled, slot 0 is cleaned during flush + if mark_obsolete_accounts == MarkObsoleteAccounts::Disabled { + // even if rooted, old state isn't cleaned up + assert_eq!(accounts.alive_account_count_in_slot(0), 2); + accounts.clean_accounts_for_tests(); + } //Old state behind zero-lamport account is cleaned up assert_eq!(accounts.alive_account_count_in_slot(0), 0); assert_eq!(accounts.alive_account_count_in_slot(1), 2); } -#[test] -fn test_clean_old_with_both_normal_and_zero_lamport_accounts() { +#[test_case(MarkObsoleteAccounts::Enabled)] +#[test_case(MarkObsoleteAccounts::Disabled)] +fn test_clean_old_with_both_normal_and_zero_lamport_accounts( + mark_obsolete_accounts: MarkObsoleteAccounts, +) { solana_logger::setup(); let mut accounts = AccountsDb { account_indexes: spl_token_mint_index_enabled(), - ..AccountsDb::new_single_for_tests() + ..AccountsDb::new_with_config( + Vec::new(), + AccountsDbConfig { + mark_obsolete_accounts, + ..ACCOUNTS_DB_CONFIG_FOR_TESTING + }, + None, + Arc::default(), + ) }; let pubkey1 = solana_pubkey::new_rand(); let pubkey2 = solana_pubkey::new_rand(); @@ -1418,19 +1597,24 @@ fn test_clean_old_with_both_normal_and_zero_lamport_accounts() { zero_account.set_data(account_data_with_mint); //store an account - accounts.store_for_tests(0, &[(&pubkey1, &normal_account)]); - accounts.store_for_tests(0, &[(&pubkey1, &normal_account)]); - accounts.store_for_tests(1, &[(&pubkey1, &zero_account)]); - accounts.store_for_tests(0, &[(&pubkey2, &normal_account)]); - accounts.store_for_tests(2, &[(&pubkey2, &normal_account)]); + accounts.store_for_tests((0, [(&pubkey1, &normal_account)].as_slice())); + accounts.store_for_tests((0, [(&pubkey1, &normal_account)].as_slice())); + accounts.store_for_tests((1, [(&pubkey1, &zero_account)].as_slice())); + accounts.store_for_tests((0, [(&pubkey2, &normal_account)].as_slice())); + accounts.store_for_tests((2, [(&pubkey2, &normal_account)].as_slice())); //simulate slots are rooted after while accounts.add_root_and_flush_write_cache(0); accounts.add_root_and_flush_write_cache(1); accounts.add_root_and_flush_write_cache(2); - //even if rooted, old state isn't cleaned up - assert_eq!(accounts.alive_account_count_in_slot(0), 2); + if mark_obsolete_accounts == MarkObsoleteAccounts::Enabled { + // With obsolete accounts enabled, slot 0 is cleaned during flush + assert_eq!(accounts.alive_account_count_in_slot(0), 0); + } else { + //even if rooted, old state isn't cleaned up + assert_eq!(accounts.alive_account_count_in_slot(0), 2); + } assert_eq!(accounts.alive_account_count_in_slot(1), 1); assert_eq!(accounts.alive_account_count_in_slot(2), 1); @@ -1529,30 +1713,43 @@ fn test_clean_old_with_both_normal_and_zero_lamport_accounts() { assert_eq!(found_accounts, vec![pubkey2]); } -#[test] -fn test_clean_max_slot_zero_lamport_account() { +#[test_case(MarkObsoleteAccounts::Enabled)] +#[test_case(MarkObsoleteAccounts::Disabled)] +fn test_clean_max_slot_zero_lamport_account(mark_obsolete_accounts: MarkObsoleteAccounts) { solana_logger::setup(); - let accounts = AccountsDb::new_single_for_tests(); + let accounts = AccountsDb::new_with_config( + Vec::new(), + AccountsDbConfig { + mark_obsolete_accounts, + ..ACCOUNTS_DB_CONFIG_FOR_TESTING + }, + None, + Arc::default(), + ); let pubkey = solana_pubkey::new_rand(); let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner()); let zero_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner()); // store an account, make it a zero lamport account // in slot 1 - accounts.store_for_tests(0, &[(&pubkey, &account)]); - accounts.store_for_tests(1, &[(&pubkey, &zero_account)]); + accounts.store_for_tests((0, [(&pubkey, &account)].as_slice())); + accounts.store_for_tests((1, [(&pubkey, &zero_account)].as_slice())); // simulate slots are rooted after while accounts.add_root_and_flush_write_cache(0); accounts.add_root_and_flush_write_cache(1); - // Only clean up to account 0, should not purge slot 0 based on - // updates in later slots in slot 1 - assert_eq!(accounts.alive_account_count_in_slot(0), 1); - assert_eq!(accounts.alive_account_count_in_slot(1), 1); - accounts.clean_accounts(Some(0), false, &EpochSchedule::default()); - assert_eq!(accounts.alive_account_count_in_slot(0), 1); + // Clean is performed as part of flush with obsolete accounts marked, so explicit clean isn't needed + if mark_obsolete_accounts == MarkObsoleteAccounts::Disabled { + // Only clean up to account 0, should not purge slot 0 based on + // updates in later slots in slot 1 + assert_eq!(accounts.alive_account_count_in_slot(0), 1); + assert_eq!(accounts.alive_account_count_in_slot(1), 1); + accounts.clean_accounts(Some(0), false, &EpochSchedule::default()); + assert_eq!(accounts.alive_account_count_in_slot(0), 1); + } + assert_eq!(accounts.alive_account_count_in_slot(1), 1); assert!(accounts.accounts_index.contains_with(&pubkey, None, None)); @@ -1590,12 +1787,16 @@ fn test_accounts_db_purge_keep_live() { let accounts = AccountsDb::new_single_for_tests(); accounts.add_root_and_flush_write_cache(0); + // If there is no latest full snapshot, zero lamport accounts can be cleaned and removed + // immediately. Set latest full snapshot slot to zero to avoid cleaning zero lamport accounts + accounts.set_latest_full_snapshot_slot(0); + // Step A let mut current_slot = 1; - accounts.store_for_tests(current_slot, &[(&pubkey, &account)]); + accounts.store_for_tests((current_slot, [(&pubkey, &account)].as_slice())); // Store another live account to slot 1 which will prevent any purge // since the store count will not be zero - accounts.store_for_tests(current_slot, &[(&pubkey2, &account2)]); + accounts.store_for_tests((current_slot, [(&pubkey2, &account2)].as_slice())); accounts.add_root_and_flush_write_cache(current_slot); let (slot1, account_info1) = accounts .accounts_index @@ -1616,7 +1817,7 @@ fn test_accounts_db_purge_keep_live() { // Step B current_slot += 1; let zero_lamport_slot = current_slot; - accounts.store_for_tests(current_slot, &[(&pubkey, &zero_lamport_account)]); + accounts.store_for_tests((current_slot, [(&pubkey, &zero_lamport_account)].as_slice())); accounts.add_root_and_flush_write_cache(current_slot); accounts.assert_load_account(current_slot, pubkey, zero_lamport); @@ -1640,7 +1841,7 @@ fn test_accounts_db_purge_keep_live() { // Zero lamport entry was not the one purged assert_eq!(index_slot, zero_lamport_slot); // The ref count should still be 2 because no slots were purged - assert_eq!(accounts.ref_count_for_pubkey(&pubkey), 2); + accounts.assert_ref_count(&pubkey, 2); // storage for slot 1 had 2 accounts, now has 1 after pubkey 1 // was reclaimed @@ -1666,11 +1867,11 @@ fn test_accounts_db_purge1() { accounts.add_root(0); let mut current_slot = 1; - accounts.store_for_tests(current_slot, &[(&pubkey, &account)]); + accounts.store_for_tests((current_slot, [(&pubkey, &account)].as_slice())); accounts.add_root_and_flush_write_cache(current_slot); current_slot += 1; - accounts.store_for_tests(current_slot, &[(&pubkey, &zero_lamport_account)]); + accounts.store_for_tests((current_slot, [(&pubkey, &zero_lamport_account)].as_slice())); accounts.add_root_and_flush_write_cache(current_slot); accounts.assert_load_account(current_slot, pubkey, zero_lamport); @@ -1728,7 +1929,7 @@ fn test_store_account_stress() { loop { let account_bal = thread_rng().gen_range(1..99); account.set_lamports(account_bal); - db.store_for_tests(slot, &[(&pubkey, &account)]); + db.store_for_tests((slot, [(&pubkey, &account)].as_slice())); let (account, slot) = db .load_without_fixed_root(&Ancestors::default(), &pubkey) @@ -1757,34 +1958,40 @@ fn test_accountsdb_scan_accounts() { let key0 = solana_pubkey::new_rand(); let account0 = AccountSharedData::new(1, 0, &key); - db.store_for_tests(0, &[(&key0, &account0)]); + db.store_for_tests((0, [(&key0, &account0)].as_slice())); let key1 = solana_pubkey::new_rand(); let account1 = AccountSharedData::new(2, 0, &key); - db.store_for_tests(1, &[(&key1, &account1)]); + db.store_for_tests((1, [(&key1, &account1)].as_slice())); let ancestors = vec![(0, 0)].into_iter().collect(); let mut accounts = Vec::new(); - db.unchecked_scan_accounts( - "", + db.scan_accounts( &ancestors, - |_, account, _| { - accounts.push(account.take_account()); + 0, + |scan_result| { + if let Some((_, account, _)) = scan_result { + accounts.push(account); + } }, &ScanConfig::default(), - ); + ) + .expect("should scan accounts"); assert_eq!(accounts, vec![account0]); let ancestors = vec![(1, 1), (0, 0)].into_iter().collect(); let mut accounts = Vec::new(); - db.unchecked_scan_accounts( - "", + db.scan_accounts( &ancestors, - |_, account, _| { - accounts.push(account.take_account()); + 0, + |scan_result| { + if let Some((_, account, _)) = scan_result { + accounts.push(account); + } }, &ScanConfig::default(), - ); + ) + .expect("should scan accounts"); assert_eq!(accounts.len(), 2); } @@ -1797,20 +2004,20 @@ fn test_cleanup_key_not_removed() { let key0 = solana_pubkey::new_rand(); let account0 = AccountSharedData::new(1, 0, &key); - db.store_for_tests(0, &[(&key0, &account0)]); + db.store_for_tests((0, [(&key0, &account0)].as_slice())); let key1 = solana_pubkey::new_rand(); let account1 = AccountSharedData::new(2, 0, &key); - db.store_for_tests(1, &[(&key1, &account1)]); + db.store_for_tests((1, [(&key1, &account1)].as_slice())); db.print_accounts_stats("pre"); let slots: HashSet = vec![1].into_iter().collect(); let purge_keys = [(key1, slots)]; - let _ = db.purge_keys_exact(purge_keys.iter()); + let _ = db.purge_keys_exact(purge_keys); let account2 = AccountSharedData::new(3, 0, &key); - db.store_for_tests(2, &[(&key1, &account2)]); + db.store_for_tests((2, [(&key1, &account2)].as_slice())); db.print_accounts_stats("post"); let ancestors = vec![(2, 0)].into_iter().collect(); @@ -1832,7 +2039,7 @@ fn test_store_large_account() { let data_len = DEFAULT_FILE_SIZE as usize + 7; let account = AccountSharedData::new(1, data_len, &key); - db.store_for_tests(0, &[(&key, &account)]); + db.store_for_tests((0, [(&key, &account)].as_slice())); let ancestors = vec![(0, 0)].into_iter().collect(); let ret = db.load_without_fixed_root(&ancestors, &key).unwrap(); @@ -1876,10 +2083,6 @@ fn test_stored_readable_account() { assert!(accounts_equal(&account, &stored_account)); } -/// A place holder stored size for a cached entry. We don't need to store the size for cached entries, but we have to pass something. -/// stored size is only used for shrinking. We don't shrink items in the write cache. -const CACHE_VIRTUAL_STORED_SIZE: StoredSize = 0; - #[test] fn test_hash_stored_account() { // Number are just sequential. @@ -1911,7 +2114,7 @@ fn test_hash_stored_account() { account_meta: &account_meta, data: &data, offset, - stored_size: CACHE_VIRTUAL_STORED_SIZE as usize, + stored_size: 0, }; let account = stored_account.to_account_shared_data(); @@ -1952,7 +2155,7 @@ fn test_verify_bank_capitalization() { let account = AccountSharedData::new(1, some_data_len, &key); let ancestors = vec![(some_slot, 0)].into_iter().collect(); - db.store_for_tests(some_slot, &[(&key, &account)]); + db.store_for_tests((some_slot, [(&key, &account)].as_slice())); if pass == 0 { db.add_root_and_flush_write_cache(some_slot); @@ -1964,13 +2167,14 @@ fn test_verify_bank_capitalization() { } let native_account_pubkey = solana_pubkey::new_rand(); - db.store_for_tests( + db.store_for_tests(( some_slot, - &[( + [( &native_account_pubkey, &create_loadable_account_for_test("foo"), - )], - ); + )] + .as_slice(), + )); db.add_root_and_flush_write_cache(some_slot); assert_eq!( @@ -1992,7 +2196,7 @@ fn test_storage_finder() { let account = AccountSharedData::new(lamports, data_len, &solana_pubkey::new_rand()); // pre-populate with a smaller empty store db.create_and_insert_store(1, 8192, "test_storage_finder"); - db.store_for_tests(1, &[(&key, &account)]); + db.store_for_tests((1, [(&key, &account)].as_slice())); } #[test] @@ -2011,7 +2215,7 @@ fn test_get_snapshot_storages_only_older_than_or_equal_to_snapshot_slot() { let base_slot = before_slot + 1; let after_slot = base_slot + 1; - db.store_for_tests(base_slot, &[(&key, &account)]); + db.store_for_tests((base_slot, [(&key, &account)].as_slice())); db.add_root_and_flush_write_cache(base_slot); assert!(db.get_storages(..=before_slot).0.is_empty()); @@ -2029,7 +2233,7 @@ fn test_get_snapshot_storages_only_non_empty() { let base_slot = 0; let after_slot = base_slot + 1; - db.store_for_tests(base_slot, &[(&key, &account)]); + db.store_for_tests((base_slot, [(&key, &account)].as_slice())); if pass == 0 { db.add_root_and_flush_write_cache(base_slot); db.storage.remove(&base_slot, false); @@ -2037,7 +2241,7 @@ fn test_get_snapshot_storages_only_non_empty() { continue; } - db.store_for_tests(base_slot, &[(&key, &account)]); + db.store_for_tests((base_slot, [(&key, &account)].as_slice())); db.add_root_and_flush_write_cache(base_slot); assert_eq!(1, db.get_storages(..=after_slot).0.len()); } @@ -2052,7 +2256,7 @@ fn test_get_snapshot_storages_only_roots() { let base_slot = 0; let after_slot = base_slot + 1; - db.store_for_tests(base_slot, &[(&key, &account)]); + db.store_for_tests((base_slot, [(&key, &account)].as_slice())); assert!(db.get_storages(..=after_slot).0.is_empty()); db.add_root_and_flush_write_cache(base_slot); @@ -2068,7 +2272,7 @@ fn test_get_snapshot_storages_exclude_empty() { let base_slot = 0; let after_slot = base_slot + 1; - db.store_for_tests(base_slot, &[(&key, &account)]); + db.store_for_tests((base_slot, [(&key, &account)].as_slice())); db.add_root_and_flush_write_cache(base_slot); assert_eq!(1, db.get_storages(..=after_slot).0.len()); @@ -2087,7 +2291,7 @@ fn test_get_snapshot_storages_with_base_slot() { let account = AccountSharedData::new(1, 0, &key); let slot = 10; - db.store_for_tests(slot, &[(&key, &account)]); + db.store_for_tests((slot, [(&key, &account)].as_slice())); db.add_root_and_flush_write_cache(slot); assert_eq!(0, db.get_storages(slot + 1..=slot + 1).0.len()); assert_eq!(1, db.get_storages(slot..=slot + 1).0.len()); @@ -2095,11 +2299,11 @@ fn test_get_snapshot_storages_with_base_slot() { define_accounts_db_test!( test_storage_remove_account_double_remove, - panic = "double remove of account in slot: 0/store: 0!!", + panic = "Too many bytes or accounts removed from storage! slot: 0, id: 0", |accounts| { let pubkey = solana_pubkey::new_rand(); let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner()); - accounts.store_for_tests(0, &[(&pubkey, &account)]); + accounts.store_for_tests((0, [(&pubkey, &account)].as_slice())); accounts.add_root_and_flush_write_cache(0); let storage_entry = accounts.storage.get_slot_storage_entry(0).unwrap(); storage_entry.remove_accounts(0, 1); @@ -2133,11 +2337,11 @@ fn do_full_clean_refcount(mut accounts: AccountsDb, store1_first: bool, store_si // A: Initialize AccountsDb with pubkey1 and pubkey2 current_slot += 1; if store1_first { - accounts.store_for_tests(current_slot, &[(&pubkey1, &account)]); - accounts.store_for_tests(current_slot, &[(&pubkey2, &account)]); + accounts.store_for_tests((current_slot, [(&pubkey1, &account)].as_slice())); + accounts.store_for_tests((current_slot, [(&pubkey2, &account)].as_slice())); } else { - accounts.store_for_tests(current_slot, &[(&pubkey2, &account)]); - accounts.store_for_tests(current_slot, &[(&pubkey1, &account)]); + accounts.store_for_tests((current_slot, [(&pubkey2, &account)].as_slice())); + accounts.store_for_tests((current_slot, [(&pubkey1, &account)].as_slice())); } accounts.add_root_and_flush_write_cache(current_slot); @@ -2147,14 +2351,14 @@ fn do_full_clean_refcount(mut accounts: AccountsDb, store1_first: bool, store_si // B: Test multiple updates to pubkey1 in a single slot/storage current_slot += 1; assert_eq!(0, accounts.alive_account_count_in_slot(current_slot)); - assert_eq!(1, accounts.ref_count_for_pubkey(&pubkey1)); - accounts.store_for_tests(current_slot, &[(&pubkey1, &account2)]); - accounts.store_for_tests(current_slot, &[(&pubkey1, &account2)]); + accounts.assert_ref_count(&pubkey1, 1); + accounts.store_for_tests((current_slot, [(&pubkey1, &account2)].as_slice())); + accounts.store_for_tests((current_slot, [(&pubkey1, &account2)].as_slice())); accounts.add_root_and_flush_write_cache(current_slot); assert_eq!(1, accounts.alive_account_count_in_slot(current_slot)); // Stores to same pubkey, same slot only count once towards the // ref count - assert_eq!(2, accounts.ref_count_for_pubkey(&pubkey1)); + accounts.assert_ref_count(&pubkey1, 2); accounts.add_root_and_flush_write_cache(current_slot); accounts.print_accounts_stats("Post-B pre-clean"); @@ -2166,12 +2370,12 @@ fn do_full_clean_refcount(mut accounts: AccountsDb, store1_first: bool, store_si // C: more updates to trigger clean of previous updates current_slot += 1; - assert_eq!(2, accounts.ref_count_for_pubkey(&pubkey1)); - accounts.store_for_tests(current_slot, &[(&pubkey1, &account3)]); - accounts.store_for_tests(current_slot, &[(&pubkey2, &account3)]); - accounts.store_for_tests(current_slot, &[(&pubkey3, &account4)]); + accounts.assert_ref_count(&pubkey1, 2); + accounts.store_for_tests((current_slot, [(&pubkey1, &account3)].as_slice())); + accounts.store_for_tests((current_slot, [(&pubkey2, &account3)].as_slice())); + accounts.store_for_tests((current_slot, [(&pubkey3, &account4)].as_slice())); accounts.add_root_and_flush_write_cache(current_slot); - assert_eq!(3, accounts.ref_count_for_pubkey(&pubkey1)); + accounts.assert_ref_count(&pubkey1, 3); info!("post C"); @@ -2179,10 +2383,10 @@ fn do_full_clean_refcount(mut accounts: AccountsDb, store1_first: bool, store_si // D: Make all keys 0-lamport, cleans all keys current_slot += 1; - assert_eq!(3, accounts.ref_count_for_pubkey(&pubkey1)); - accounts.store_for_tests(current_slot, &[(&pubkey1, &zero_lamport_account)]); - accounts.store_for_tests(current_slot, &[(&pubkey2, &zero_lamport_account)]); - accounts.store_for_tests(current_slot, &[(&pubkey3, &zero_lamport_account)]); + accounts.assert_ref_count(&pubkey1, 3); + accounts.store_for_tests((current_slot, [(&pubkey1, &zero_lamport_account)].as_slice())); + accounts.store_for_tests((current_slot, [(&pubkey2, &zero_lamport_account)].as_slice())); + accounts.store_for_tests((current_slot, [(&pubkey3, &zero_lamport_account)].as_slice())); let snapshot_stores = accounts.get_storages(..=current_slot).0; let total_accounts: usize = snapshot_stores.iter().map(|s| s.accounts_count()).sum(); @@ -2201,9 +2405,9 @@ fn do_full_clean_refcount(mut accounts: AccountsDb, store1_first: bool, store_si assert_eq!(total_accounts, total_accounts_post_clean); // should clean all 3 pubkeys - assert_eq!(accounts.ref_count_for_pubkey(&pubkey1), 0); - assert_eq!(accounts.ref_count_for_pubkey(&pubkey2), 0); - assert_eq!(accounts.ref_count_for_pubkey(&pubkey3), 0); + accounts.assert_ref_count(&pubkey1, 0); + accounts.assert_ref_count(&pubkey2, 0); + accounts.assert_ref_count(&pubkey3, 0); } // Setup 3 scenarios which try to differentiate between pubkey1 being in an @@ -2269,7 +2473,7 @@ fn test_shrink_candidate_slots() { current_slot += 1; for pubkey in &pubkeys { - accounts.store_for_tests(current_slot, &[(pubkey, &account)]); + accounts.store_for_tests((current_slot, [(pubkey, &account)].as_slice())); } let shrink_slot = current_slot; accounts.add_root_and_flush_write_cache(current_slot); @@ -2279,7 +2483,7 @@ fn test_shrink_candidate_slots() { let updated_pubkeys = &pubkeys[0..pubkey_count - pubkey_count_after_shrink]; for pubkey in updated_pubkeys { - accounts.store_for_tests(current_slot, &[(pubkey, &account)]); + accounts.store_for_tests((current_slot, [(pubkey, &account)].as_slice())); } accounts.add_root_and_flush_write_cache(current_slot); accounts.clean_accounts_for_tests(); @@ -2335,7 +2539,7 @@ fn test_shrink_candidate_slots_with_dead_ancient_account() { .map(|(pubkey, account)| (pubkey, account)) .collect(); let starting_ancient_slot = 1; - db.store_for_tests(starting_ancient_slot, &accounts); + db.store_for_tests((starting_ancient_slot, accounts.as_slice())); db.add_root_and_flush_write_cache(starting_ancient_slot); let storage = db.get_storage_for_slot(starting_ancient_slot).unwrap(); let ancient_accounts = db.get_unique_accounts_from_storage(&storage); @@ -2354,10 +2558,10 @@ fn test_shrink_candidate_slots_with_dead_ancient_account() { let ancient_append_vec_offset = db.ancient_append_vec_offset.unwrap().abs(); let current_slot = epoch_schedule.slots_per_epoch + ancient_append_vec_offset as u64 + 1; // Simulate killing of the ancient account by overwriting it in the current slot. - db.store_for_tests( + db.store_for_tests(( current_slot, - &[(&modified_account_pubkey, &modified_account)], - ); + [(&modified_account_pubkey, &modified_account)].as_slice(), + )); db.add_root_and_flush_write_cache(current_slot); // This should remove the dead ancient account from the index. db.clean_accounts_for_tests(); @@ -2405,8 +2609,9 @@ fn test_select_candidates_by_total_usage_no_candidates() { assert_eq!(0, next_candidates.len()); } -#[test] -fn test_select_candidates_by_total_usage_3_way_split_condition() { +#[test_case(StorageAccess::Mmap)] +#[test_case(StorageAccess::File)] +fn test_select_candidates_by_total_usage_3_way_split_condition(storage_access: StorageAccess) { // three candidates, one selected for shrink, one is put back to the candidate list and one is ignored solana_logger::setup(); let mut candidates = ShrinkCandidates::default(); @@ -2422,6 +2627,7 @@ fn test_select_candidates_by_total_usage_3_way_split_condition() { store1_slot as AccountsFileId, store_file_size, AccountsFileProvider::AppendVec, + storage_access, )); db.storage.insert(store1_slot, Arc::clone(&store1)); store1.alive_bytes.store(0, Ordering::Release); @@ -2434,6 +2640,7 @@ fn test_select_candidates_by_total_usage_3_way_split_condition() { store2_slot as AccountsFileId, store_file_size, AccountsFileProvider::AppendVec, + storage_access, )); db.storage.insert(store2_slot, Arc::clone(&store2)); store2 @@ -2448,6 +2655,7 @@ fn test_select_candidates_by_total_usage_3_way_split_condition() { store3_slot as AccountsFileId, store_file_size, AccountsFileProvider::AppendVec, + storage_access, )); db.storage.insert(store3_slot, Arc::clone(&store3)); store3 @@ -2468,8 +2676,9 @@ fn test_select_candidates_by_total_usage_3_way_split_condition() { assert!(next_candidates.contains(&store2_slot)); } -#[test] -fn test_select_candidates_by_total_usage_2_way_split_condition() { +#[test_case(StorageAccess::Mmap)] +#[test_case(StorageAccess::File)] +fn test_select_candidates_by_total_usage_2_way_split_condition(storage_access: StorageAccess) { // three candidates, 2 are selected for shrink, one is ignored solana_logger::setup(); let db = AccountsDb::new_single_for_tests(); @@ -2485,6 +2694,7 @@ fn test_select_candidates_by_total_usage_2_way_split_condition() { store1_slot as AccountsFileId, store_file_size, AccountsFileProvider::AppendVec, + storage_access, )); db.storage.insert(store1_slot, Arc::clone(&store1)); store1.alive_bytes.store(0, Ordering::Release); @@ -2497,6 +2707,7 @@ fn test_select_candidates_by_total_usage_2_way_split_condition() { store2_slot as AccountsFileId, store_file_size, AccountsFileProvider::AppendVec, + storage_access, )); db.storage.insert(store2_slot, Arc::clone(&store2)); store2 @@ -2511,6 +2722,7 @@ fn test_select_candidates_by_total_usage_2_way_split_condition() { store3_slot as AccountsFileId, store_file_size, AccountsFileProvider::AppendVec, + storage_access, )); db.storage.insert(store3_slot, Arc::clone(&store3)); store3 @@ -2528,8 +2740,9 @@ fn test_select_candidates_by_total_usage_2_way_split_condition() { assert_eq!(0, next_candidates.len()); } -#[test] -fn test_select_candidates_by_total_usage_all_clean() { +#[test_case(StorageAccess::Mmap)] +#[test_case(StorageAccess::File)] +fn test_select_candidates_by_total_usage_all_clean(storage_access: StorageAccess) { // 2 candidates, they must be selected to achieve the target alive ratio solana_logger::setup(); let db = AccountsDb::new_single_for_tests(); @@ -2545,6 +2758,7 @@ fn test_select_candidates_by_total_usage_all_clean() { store1_slot as AccountsFileId, store_file_size, AccountsFileProvider::AppendVec, + storage_access, )); db.storage.insert(store1_slot, Arc::clone(&store1)); store1 @@ -2559,6 +2773,7 @@ fn test_select_candidates_by_total_usage_all_clean() { store2_slot as AccountsFileId, store_file_size, AccountsFileProvider::AppendVec, + storage_access, )); db.storage.insert(store2_slot, Arc::clone(&store2)); store2 @@ -2576,8 +2791,6 @@ fn test_select_candidates_by_total_usage_all_clean() { assert_eq!(0, next_candidates.len()); } -const UPSERT_POPULATE_RECLAIMS: UpsertReclaim = UpsertReclaim::PopulateReclaims; - #[test] fn test_delete_dependencies() { solana_logger::setup(); @@ -2589,7 +2802,7 @@ fn test_delete_dependencies() { let info1 = AccountInfo::new(StorageLocation::AppendVec(1, 0), true); let info2 = AccountInfo::new(StorageLocation::AppendVec(2, 0), true); let info3 = AccountInfo::new(StorageLocation::AppendVec(3, 0), true); - let mut reclaims = vec![]; + let mut reclaims = ReclaimsSlotList::new(); accounts_index.upsert( 0, 0, @@ -2598,7 +2811,7 @@ fn test_delete_dependencies() { &AccountSecondaryIndexes::default(), info0, &mut reclaims, - UPSERT_POPULATE_RECLAIMS, + UpsertReclaim::IgnoreReclaims, ); accounts_index.upsert( 1, @@ -2608,7 +2821,7 @@ fn test_delete_dependencies() { &AccountSecondaryIndexes::default(), info1, &mut reclaims, - UPSERT_POPULATE_RECLAIMS, + UpsertReclaim::IgnoreReclaims, ); accounts_index.upsert( 1, @@ -2618,7 +2831,7 @@ fn test_delete_dependencies() { &AccountSecondaryIndexes::default(), info1, &mut reclaims, - UPSERT_POPULATE_RECLAIMS, + UpsertReclaim::IgnoreReclaims, ); accounts_index.upsert( 2, @@ -2628,7 +2841,7 @@ fn test_delete_dependencies() { &AccountSecondaryIndexes::default(), info2, &mut reclaims, - UPSERT_POPULATE_RECLAIMS, + UpsertReclaim::IgnoreReclaims, ); accounts_index.upsert( 2, @@ -2638,7 +2851,7 @@ fn test_delete_dependencies() { &AccountSecondaryIndexes::default(), info2, &mut reclaims, - UPSERT_POPULATE_RECLAIMS, + UpsertReclaim::IgnoreReclaims, ); accounts_index.upsert( 3, @@ -2648,7 +2861,7 @@ fn test_delete_dependencies() { &AccountSecondaryIndexes::default(), info3, &mut reclaims, - UPSERT_POPULATE_RECLAIMS, + UpsertReclaim::IgnoreReclaims, ); accounts_index.add_root(0); accounts_index.add_root(1); @@ -2735,7 +2948,7 @@ fn test_store_overhead() { let accounts = AccountsDb::new_single_for_tests(); let account = AccountSharedData::default(); let pubkey = solana_pubkey::new_rand(); - accounts.store_for_tests(0, &[(&pubkey, &account)]); + accounts.store_for_tests((0, [(&pubkey, &account)].as_slice())); accounts.add_root_and_flush_write_cache(0); let store = accounts.storage.get_slot_storage_entry(0).unwrap(); let total_len = store.accounts.len(); @@ -2751,13 +2964,13 @@ fn test_store_clean_after_shrink() { let account = AccountSharedData::new(1, 16 * 4096, &Pubkey::default()); let pubkey1 = solana_pubkey::new_rand(); - accounts.store_cached((0, &[(&pubkey1, &account)][..])); + accounts.store_for_tests((0, &[(&pubkey1, &account)][..])); let pubkey2 = solana_pubkey::new_rand(); - accounts.store_cached((0, &[(&pubkey2, &account)][..])); + accounts.store_for_tests((0, &[(&pubkey2, &account)][..])); let zero_account = AccountSharedData::new(0, 1, &Pubkey::default()); - accounts.store_cached((1, &[(&pubkey1, &zero_account)][..])); + accounts.store_for_tests((1, &[(&pubkey1, &zero_account)][..])); // Add root 0 and flush separately accounts.add_root(0); @@ -2782,7 +2995,7 @@ fn test_store_clean_after_shrink() { accounts.clean_accounts_for_tests(); accounts.print_accounts_stats("post-clean"); - assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey1), 0); + accounts.assert_ref_count(&pubkey1, 0); } #[test] @@ -2799,7 +3012,7 @@ fn test_wrapping_storage_id() { // write unique keys to successive slots keys.iter().enumerate().for_each(|(slot, key)| { let slot = slot as Slot; - db.store_for_tests(slot, &[(key, &zero_lamport_account)]); + db.store_for_tests((slot, [(key, &zero_lamport_account)].as_slice())); db.add_root_and_flush_write_cache(slot); }); assert_eq!(slots - 1, db.next_id.load(Ordering::Acquire)); @@ -2824,7 +3037,7 @@ fn test_reuse_storage_id() { // write unique keys to successive slots keys.iter().enumerate().for_each(|(slot, key)| { let slot = slot as Slot; - db.store_for_tests(slot, &[(key, &zero_lamport_account)]); + db.store_for_tests((slot, [(key, &zero_lamport_account)].as_slice())); db.add_root_and_flush_write_cache(slot); // reset next_id to what it was previously to cause us to re-use the same id db.next_id.store(AccountsFileId::MAX, Ordering::Release); @@ -2842,8 +3055,8 @@ fn test_zero_lamport_new_root_not_cleaned() { let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner()); // Store zero lamport account into slots 0 and 1, root both slots - db.store_for_tests(0, &[(&account_key, &zero_lamport_account)]); - db.store_for_tests(1, &[(&account_key, &zero_lamport_account)]); + db.store_for_tests((0, [(&account_key, &zero_lamport_account)].as_slice())); + db.store_for_tests((1, [(&account_key, &zero_lamport_account)].as_slice())); db.add_root_and_flush_write_cache(0); db.add_root_and_flush_write_cache(1); @@ -2863,7 +3076,7 @@ fn test_store_load_cached() { let key = Pubkey::default(); let account0 = AccountSharedData::new(1, 0, &key); let slot = 0; - db.store_cached((slot, &[(&key, &account0)][..])); + db.store_for_tests((slot, &[(&key, &account0)][..])); // Load with no ancestors and no root will return nothing assert!(db @@ -2895,7 +3108,7 @@ fn test_store_flush_load_cached() { let key = Pubkey::default(); let account0 = AccountSharedData::new(1, 0, &key); let slot = 0; - db.store_cached((slot, &[(&key, &account0)][..])); + db.store_for_tests((slot, &[(&key, &account0)][..])); db.mark_slot_frozen(slot); // No root was added yet, requires an ancestor to find @@ -2927,9 +3140,9 @@ fn test_flush_accounts_cache() { let unrooted_key = solana_pubkey::new_rand(); let key5 = solana_pubkey::new_rand(); let key6 = solana_pubkey::new_rand(); - db.store_cached((unrooted_slot, &[(&unrooted_key, &account0)][..])); - db.store_cached((root5, &[(&key5, &account0)][..])); - db.store_cached((root6, &[(&key6, &account0)][..])); + db.store_for_tests((unrooted_slot, &[(&unrooted_key, &account0)][..])); + db.store_for_tests((root5, &[(&key5, &account0)][..])); + db.store_for_tests((root6, &[(&key6, &account0)][..])); for slot in &[unrooted_slot, root5, root6] { db.mark_slot_frozen(*slot); } @@ -2988,7 +3201,7 @@ fn run_test_flush_accounts_cache_if_needed(num_roots: usize, num_unrooted: usize let num_slots = 2 * max_cache_slots(); for i in 0..num_roots + num_unrooted { let key = Pubkey::new_unique(); - db.store_cached((i as Slot, &[(&key, &account0)][..])); + db.store_for_tests((i as Slot, &[(&key, &account0)][..])); keys.push(key); db.mark_slot_frozen(i as Slot); if i < num_roots { @@ -3041,8 +3254,8 @@ fn test_read_only_accounts_cache() { let account_key = Pubkey::new_unique(); let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner()); let slot1_account = AccountSharedData::new(1, 1, AccountSharedData::default().owner()); - db.store_cached((0, &[(&account_key, &zero_lamport_account)][..])); - db.store_cached((1, &[(&account_key, &slot1_account)][..])); + db.store_for_tests((0, &[(&account_key, &zero_lamport_account)][..])); + db.store_for_tests((1, &[(&account_key, &slot1_account)][..])); db.add_root(0); db.add_root(1); @@ -3064,7 +3277,7 @@ fn test_read_only_accounts_cache() { .unwrap(); assert_eq!(account.lamports(), 1); assert_eq!(db.read_only_accounts_cache.cache_len(), 1); - db.store_cached((2, &[(&account_key, &zero_lamport_account)][..])); + db.store_for_tests((2, &[(&account_key, &zero_lamport_account)][..])); assert_eq!(db.read_only_accounts_cache.cache_len(), 1); let account = db .load_with_fixed_root(&Ancestors::default(), &account_key) @@ -3080,8 +3293,8 @@ fn test_load_with_read_only_accounts_cache() { let account_key = Pubkey::new_unique(); let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner()); let slot1_account = AccountSharedData::new(1, 1, AccountSharedData::default().owner()); - db.store_cached((0, &[(&account_key, &zero_lamport_account)][..])); - db.store_cached((1, &[(&account_key, &slot1_account)][..])); + db.store_for_tests((0, &[(&account_key, &zero_lamport_account)][..])); + db.store_for_tests((1, &[(&account_key, &slot1_account)][..])); db.add_root(0); db.add_root(1); @@ -3092,43 +3305,43 @@ fn test_load_with_read_only_accounts_cache() { assert_eq!(db.read_only_accounts_cache.cache_len(), 0); let (account, slot) = db - .load_account_with(&Ancestors::default(), &account_key, |_| false) + .load_account_with(&Ancestors::default(), &account_key, false) .unwrap(); assert_eq!(account.lamports(), 1); assert_eq!(db.read_only_accounts_cache.cache_len(), 0); assert_eq!(slot, 1); let (account, slot) = db - .load_account_with(&Ancestors::default(), &account_key, |_| true) + .load_account_with(&Ancestors::default(), &account_key, true) .unwrap(); assert_eq!(account.lamports(), 1); assert_eq!(db.read_only_accounts_cache.cache_len(), 1); assert_eq!(slot, 1); - db.store_cached((2, &[(&account_key, &zero_lamport_account)][..])); - let account = db.load_account_with(&Ancestors::default(), &account_key, |_| false); + db.store_for_tests((2, &[(&account_key, &zero_lamport_account)][..])); + let account = db.load_account_with(&Ancestors::default(), &account_key, false); assert!(account.is_none()); assert_eq!(db.read_only_accounts_cache.cache_len(), 1); db.read_only_accounts_cache.reset_for_tests(); assert_eq!(db.read_only_accounts_cache.cache_len(), 0); - let account = db.load_account_with(&Ancestors::default(), &account_key, |_| true); + let account = db.load_account_with(&Ancestors::default(), &account_key, true); assert!(account.is_none()); assert_eq!(db.read_only_accounts_cache.cache_len(), 0); let slot2_account = AccountSharedData::new(2, 1, AccountSharedData::default().owner()); - db.store_cached((2, &[(&account_key, &slot2_account)][..])); + db.store_for_tests((2, &[(&account_key, &slot2_account)][..])); let (account, slot) = db - .load_account_with(&Ancestors::default(), &account_key, |_| false) + .load_account_with(&Ancestors::default(), &account_key, false) .unwrap(); assert_eq!(account.lamports(), 2); assert_eq!(db.read_only_accounts_cache.cache_len(), 0); assert_eq!(slot, 2); let slot2_account = AccountSharedData::new(2, 1, AccountSharedData::default().owner()); - db.store_cached((2, &[(&account_key, &slot2_account)][..])); + db.store_for_tests((2, &[(&account_key, &slot2_account)][..])); let (account, slot) = db - .load_account_with(&Ancestors::default(), &account_key, |_| true) + .load_account_with(&Ancestors::default(), &account_key, true) .unwrap(); assert_eq!(account.lamports(), 2); // The account shouldn't be added to read_only_cache because it is in write_cache. @@ -3136,100 +3349,6 @@ fn test_load_with_read_only_accounts_cache() { assert_eq!(slot, 2); } -#[test] -fn test_account_matches_owners() { - let db = Arc::new(AccountsDb::new_single_for_tests()); - - let owners: Vec = (0..2).map(|_| Pubkey::new_unique()).collect(); - - let account1_key = Pubkey::new_unique(); - let account1 = AccountSharedData::new(321, 10, &owners[0]); - - let account2_key = Pubkey::new_unique(); - let account2 = AccountSharedData::new(1, 1, &owners[1]); - - let account3_key = Pubkey::new_unique(); - let account3 = AccountSharedData::new(1, 1, &Pubkey::new_unique()); - - // Account with 0 lamports - let account4_key = Pubkey::new_unique(); - let account4 = AccountSharedData::new(0, 1, &owners[1]); - - db.store_cached((0, &[(&account1_key, &account1)][..])); - db.store_cached((1, &[(&account2_key, &account2)][..])); - db.store_cached((2, &[(&account3_key, &account3)][..])); - db.store_cached((3, &[(&account4_key, &account4)][..])); - - db.add_root(0); - db.add_root(1); - db.add_root(2); - db.add_root(3); - - // Set the latest full snapshot slot to one that is *older* than the slot account4 is in. - // This is required to ensure account4 is not purged during `clean`, - // which is required to have account_matches_owners() return NoMatch. - db.set_latest_full_snapshot_slot(2); - - // Flush the cache so that the account meta will be read from the storage - db.flush_accounts_cache(true, None); - db.clean_accounts_for_tests(); - - assert_eq!( - db.account_matches_owners(&Ancestors::default(), &account1_key, &owners), - Ok(0) - ); - assert_eq!( - db.account_matches_owners(&Ancestors::default(), &account2_key, &owners), - Ok(1) - ); - assert_eq!( - db.account_matches_owners(&Ancestors::default(), &account3_key, &owners), - Err(MatchAccountOwnerError::NoMatch) - ); - assert_eq!( - db.account_matches_owners(&Ancestors::default(), &account4_key, &owners), - Err(MatchAccountOwnerError::NoMatch) - ); - assert_eq!( - db.account_matches_owners(&Ancestors::default(), &Pubkey::new_unique(), &owners), - Err(MatchAccountOwnerError::UnableToLoad) - ); - - // Flush the cache and load account1 (so that it's in the cache) - db.flush_accounts_cache(true, None); - db.clean_accounts_for_tests(); - let _ = db - .do_load( - &Ancestors::default(), - &account1_key, - Some(0), - LoadHint::Unspecified, - LoadZeroLamports::SomeWithZeroLamportAccountForTests, - ) - .unwrap(); - - assert_eq!( - db.account_matches_owners(&Ancestors::default(), &account1_key, &owners), - Ok(0) - ); - assert_eq!( - db.account_matches_owners(&Ancestors::default(), &account2_key, &owners), - Ok(1) - ); - assert_eq!( - db.account_matches_owners(&Ancestors::default(), &account3_key, &owners), - Err(MatchAccountOwnerError::NoMatch) - ); - assert_eq!( - db.account_matches_owners(&Ancestors::default(), &account4_key, &owners), - Err(MatchAccountOwnerError::NoMatch) - ); - assert_eq!( - db.account_matches_owners(&Ancestors::default(), &Pubkey::new_unique(), &owners), - Err(MatchAccountOwnerError::UnableToLoad) - ); -} - /// a test that will accept either answer const LOAD_ZERO_LAMPORTS_ANY_TESTS: LoadZeroLamports = LoadZeroLamports::None; @@ -3240,8 +3359,8 @@ fn test_flush_cache_clean() { let account_key = Pubkey::new_unique(); let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner()); let slot1_account = AccountSharedData::new(1, 1, AccountSharedData::default().owner()); - db.store_cached((0, &[(&account_key, &zero_lamport_account)][..])); - db.store_cached((1, &[(&account_key, &slot1_account)][..])); + db.store_for_tests((0, &[(&account_key, &zero_lamport_account)][..])); + db.store_for_tests((1, &[(&account_key, &slot1_account)][..])); db.add_root(0); db.add_root(1); @@ -3276,9 +3395,22 @@ fn test_flush_cache_clean() { .is_none()); } -#[test] -fn test_flush_cache_dont_clean_zero_lamport_account() { - let db = Arc::new(AccountsDb::new_single_for_tests()); +#[test_case(MarkObsoleteAccounts::Enabled)] +#[test_case(MarkObsoleteAccounts::Disabled)] +fn test_flush_cache_dont_clean_zero_lamport_account(mark_obsolete_accounts: MarkObsoleteAccounts) { + let db = AccountsDb::new_with_config( + Vec::new(), + AccountsDbConfig { + mark_obsolete_accounts, + ..ACCOUNTS_DB_CONFIG_FOR_TESTING + }, + None, + Arc::default(), + ); + + // If there is no latest full snapshot, zero lamport accounts can be cleaned and removed + // immediately. Set latest full snapshot slot to zero to avoid cleaning zero lamport accounts + db.set_latest_full_snapshot_slot(0); let zero_lamport_account_key = Pubkey::new_unique(); let other_account_key = Pubkey::new_unique(); @@ -3289,24 +3421,24 @@ fn test_flush_cache_dont_clean_zero_lamport_account() { let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner()); // Store into slot 0, and then flush the slot to storage - db.store_cached((0, &[(&zero_lamport_account_key, &slot0_account)][..])); + db.store_for_tests((0, &[(&zero_lamport_account_key, &slot0_account)][..])); // Second key keeps other lamport account entry for slot 0 alive, // preventing clean of the zero_lamport_account in slot 1. - db.store_cached((0, &[(&other_account_key, &slot0_account)][..])); + db.store_for_tests((0, &[(&other_account_key, &slot0_account)][..])); db.add_root(0); db.flush_accounts_cache(true, None); assert!(db.storage.get_slot_storage_entry(0).is_some()); // Store into slot 1, a dummy slot that will be dead and purged before flush - db.store_cached((1, &[(&zero_lamport_account_key, &zero_lamport_account)][..])); + db.store_for_tests((1, &[(&zero_lamport_account_key, &zero_lamport_account)][..])); // Store into slot 2, which makes all updates from slot 1 outdated. // This means slot 1 is a dead slot. Later, slot 1 will be cleaned/purged - // before it even reaches storage, but this purge of slot 1should not affect + // before it even reaches storage, but this purge of slot 1 should not affect // the refcount of `zero_lamport_account_key` because cached keys do not bump // the refcount in the index. This means clean should *not* remove // `zero_lamport_account_key` from slot 2 - db.store_cached((2, &[(&zero_lamport_account_key, &zero_lamport_account)][..])); + db.store_for_tests((2, &[(&zero_lamport_account_key, &zero_lamport_account)][..])); db.add_root(1); db.add_root(2); @@ -3315,20 +3447,20 @@ fn test_flush_cache_dont_clean_zero_lamport_account() { db.flush_accounts_cache(true, None); db.clean_accounts_for_tests(); - // The `zero_lamport_account_key` is still alive in slot 1, so refcount for the + // The `zero_lamport_account_key` is still alive in slot 0, so refcount for the // pubkey should be 2 - assert_eq!( - db.accounts_index - .ref_count_from_storage(&zero_lamport_account_key), - 2 - ); - assert_eq!( - db.accounts_index.ref_count_from_storage(&other_account_key), - 1 - ); + if mark_obsolete_accounts == MarkObsoleteAccounts::Disabled { + db.assert_ref_count(&zero_lamport_account_key, 2); + } else { + // However, if obsolete accounts are enabled, it will only be alive in slot 2 + db.assert_ref_count(&zero_lamport_account_key, 1); + } + db.assert_ref_count(&other_account_key, 1); // The zero-lamport account in slot 2 should not be purged yet, because the - // entry in slot 1 is blocking cleanup of the zero-lamport account. + // entry in slot 0 is blocking cleanup of the zero-lamport account. + // With obsolete accounts enabled, the zero lamport account being newer + // than the latest full snapshot blocks cleanup let max_root = None; // Fine to simulate a transaction load since we are not doing any out of band // removals, only using clean_accounts @@ -3358,7 +3490,7 @@ fn test_flush_cache_populates_uncleaned_pubkeys() { let account = AccountSharedData::new(10, 0, &Pubkey::default()); // storing accounts doesn't add anything to uncleaned_pubkeys - accounts_db.store_cached((slot, [(pubkey, account)].as_slice())); + accounts_db.store_for_tests((slot, [(pubkey, account)].as_slice())); assert_eq!(accounts_db.get_len_of_slots_with_uncleaned_pubkeys(), 0); // ...but ensure that rooting and flushing the write cache does @@ -3443,11 +3575,11 @@ fn test_scan_flush_accounts_cache_then_clean_drop() { / \ 1 2 (root) */ - db.store_cached((0, &[(&account_key, &zero_lamport_account)][..])); - db.store_cached((1, &[(&account_key, &slot1_account)][..])); + db.store_for_tests((0, &[(&account_key, &zero_lamport_account)][..])); + db.store_for_tests((1, &[(&account_key, &slot1_account)][..])); // Fodder for the scan so that the lock on `account_key` is not held - db.store_cached((1, &[(&account_key2, &slot1_account)][..])); - db.store_cached((2, &[(&account_key, &slot2_account)][..])); + db.store_for_tests((1, &[(&account_key2, &slot1_account)][..])); + db.store_for_tests((2, &[(&account_key, &slot2_account)][..])); let max_scan_root = 0; db.add_root(max_scan_root); @@ -3541,7 +3673,7 @@ define_accounts_db_test!(test_alive_bytes, |accounts_db| { for data_size in 0..num_keys { let account = AccountSharedData::new(1, data_size, &Pubkey::default()); - accounts_db.store_cached((slot, &[(&Pubkey::new_unique(), &account)][..])); + accounts_db.store_for_tests((slot, &[(&Pubkey::new_unique(), &account)][..])); } accounts_db.add_root(slot); @@ -3600,7 +3732,7 @@ define_accounts_db_test!( let zero_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner()); let key = Pubkey::new_unique(); - accounts_db.store_cached((slot, &[(&key, &zero_account)][..])); + accounts_db.store_for_tests((slot, &[(&key, &zero_account)][..])); pubkeys.push(key); } @@ -3668,7 +3800,7 @@ fn setup_accounts_db_cache_clean( .take(num_slots) .collect(); if scan_slot.is_some() { - accounts_db.store_cached( + accounts_db.store_for_tests( // Store it in a slot that isn't returned in `slots` ( stall_slot, @@ -3685,7 +3817,7 @@ fn setup_accounts_db_cache_clean( for slot in &slots { for key in &keys[*slot as usize..] { let space = 1; // 1 byte allows us to track by size - accounts_db.store_cached(( + accounts_db.store_for_tests(( *slot, &[(key, &AccountSharedData::new(1, space, &Pubkey::default()))][..], )); @@ -3728,7 +3860,7 @@ fn test_accounts_db_cache_clean_dead_slots() { slots.push(alive_slot); for key in &keys { // Store a slot that overwrites all previous keys, rendering all previous keys dead - accounts_db.store_cached(( + accounts_db.store_for_tests(( alive_slot, &[(key, &AccountSharedData::new(1, 0, &Pubkey::default()))][..], )); @@ -4027,15 +4159,15 @@ fn test_shrink_unref() { let account1 = AccountSharedData::new(1, 0, AccountSharedData::default().owner()); // Store into slot 0 - db.store_for_tests(0, &[(&account_key1, &account1)]); - db.store_for_tests(0, &[(&account_key2, &account1)]); + db.store_for_tests((0, [(&account_key1, &account1)].as_slice())); + db.store_for_tests((0, [(&account_key2, &account1)].as_slice())); db.add_root(0); // Make account_key1 in slot 0 outdated by updating in rooted slot 1 - db.store_cached((1, &[(&account_key1, &account1)][..])); + db.store_for_tests((1, &[(&account_key1, &account1)][..])); db.add_root(1); - // Flushes all roots - db.flush_accounts_cache(true, None); + // Flush without cleaning to avoid reclaiming account_key1 early + db.flush_rooted_accounts_cache(None, false); // Clean to remove outdated entry from slot 0 db.clean_accounts(Some(1), false, &EpochSchedule::default()); @@ -4048,11 +4180,11 @@ fn test_shrink_unref() { db.shrink_candidate_slots(&epoch_schedule); // Make slot 0 dead by updating the remaining key - db.store_cached((2, &[(&account_key2, &account1)][..])); + db.store_for_tests((2, &[(&account_key2, &account1)][..])); db.add_root(2); - // Flushes all roots - db.flush_accounts_cache(true, None); + // Flush without cleaning to avoid reclaiming account_key2 early + db.flush_rooted_accounts_cache(None, false); // Should be one store before clean for slot 0 db.get_and_assert_single_storage(0); @@ -4064,7 +4196,7 @@ fn test_shrink_unref() { // Ref count for `account_key1` (account removed earlier by shrink) // should be 1, since it was only stored in slot 0 and 1, and slot 0 // is now dead - assert_eq!(db.accounts_index.ref_count_from_storage(&account_key1), 1); + db.assert_ref_count(&account_key1, 1); } #[test] @@ -4078,12 +4210,12 @@ fn test_clean_drop_dead_zero_lamport_single_ref_accounts() { // slot 0 - stored a 1-lamport account let slot = 0; - accounts_db.store_cached((slot, &[(&key1, &one_account)][..])); + accounts_db.store_for_tests((slot, &[(&key1, &one_account)][..])); accounts_db.add_root(slot); // slot 1 - store a 0 -lamport account let slot = 1; - accounts_db.store_cached((slot, &[(&key1, &zero_account)][..])); + accounts_db.store_for_tests((slot, &[(&key1, &zero_account)][..])); accounts_db.add_root(slot); accounts_db.flush_accounts_cache(true, None); @@ -4106,13 +4238,13 @@ fn test_clean_drop_dead_storage_handle_zero_lamport_single_ref_accounts() { let account0 = AccountSharedData::new(0, 0, AccountSharedData::default().owner()); // Store into slot 0 - db.store_for_tests(0, &[(&account_key1, &account1)]); + db.store_for_tests((0, [(&account_key1, &account1)].as_slice())); db.add_root_and_flush_write_cache(0); // Make account_key1 in slot 0 outdated by updating in rooted slot 1 with a zero lamport account // And store one additional live account to make the store still alive after clean. - db.store_cached((1, &[(&account_key1, &account0)][..])); - db.store_cached((1, &[(&account_key2, &account1)][..])); + db.store_for_tests((1, &[(&account_key1, &account0)][..])); + db.store_for_tests((1, &[(&account_key2, &account1)][..])); db.add_root(1); // Flushes all roots db.flush_accounts_cache(true, None); @@ -4128,7 +4260,7 @@ fn test_clean_drop_dead_storage_handle_zero_lamport_single_ref_accounts() { // has one other alive account, it is not completely dead. So it won't // be a candidate for "clean" to drop. Instead, it becomes a candidate // for next round shrinking. - assert_eq!(db.accounts_index.ref_count_from_storage(&account_key1), 1); + db.assert_ref_count(&account_key1, 1); assert_eq!( db.get_and_assert_single_storage(1) .num_zero_lamport_single_ref_accounts(), @@ -4137,6 +4269,9 @@ fn test_clean_drop_dead_storage_handle_zero_lamport_single_ref_accounts() { assert!(db.shrink_candidate_slots.lock().unwrap().contains(&1)); } +/// Tests that shrink correctly marks newly single ref zero lamport accounts and sends them to clean +/// This test is still relevant with obsolete accounts enabled, but can be removed if all +/// scenarios where flush_write_cache doesn't clean are eliminated. #[test] fn test_shrink_unref_handle_zero_lamport_single_ref_accounts() { let db = AccountsDb::new_single_for_tests(); @@ -4147,15 +4282,15 @@ fn test_shrink_unref_handle_zero_lamport_single_ref_accounts() { let account0 = AccountSharedData::new(0, 0, AccountSharedData::default().owner()); // Store into slot 0 - db.store_for_tests(0, &[(&account_key1, &account1)]); - db.store_for_tests(0, &[(&account_key2, &account1)]); + db.store_for_tests((0, [(&account_key1, &account1)].as_slice())); + db.store_for_tests((0, [(&account_key2, &account1)].as_slice())); db.add_root_and_flush_write_cache(0); // Make account_key1 in slot 0 outdated by updating in rooted slot 1 with a zero lamport account - db.store_cached((1, &[(&account_key1, &account0)][..])); + db.store_for_tests((1, &[(&account_key1, &account0)][..])); db.add_root(1); - // Flushes all roots - db.flush_accounts_cache(true, None); + // Flushes all roots without clean + db.flush_rooted_accounts_cache(None, false); // Clean to remove outdated entry from slot 0 db.clean_accounts(Some(1), false, &EpochSchedule::default()); @@ -4169,7 +4304,7 @@ fn test_shrink_unref_handle_zero_lamport_single_ref_accounts() { // After shrink slot 0, check that the zero_lamport account on slot 1 // should be marked since it become singe_ref. - assert_eq!(db.accounts_index.ref_count_from_storage(&account_key1), 1); + db.assert_ref_count(&account_key1, 1); assert_eq!( db.get_and_assert_single_storage(1) .num_zero_lamport_single_ref_accounts(), @@ -4182,23 +4317,24 @@ fn test_shrink_unref_handle_zero_lamport_single_ref_accounts() { assert!(!db.shrink_candidate_slots.lock().unwrap().contains(&1)); // Now, make slot 0 dead by updating the remaining key - db.store_cached((2, &[(&account_key2, &account1)][..])); + db.store_for_tests((2, &[(&account_key2, &account1)][..])); db.add_root(2); // Flushes all roots db.flush_accounts_cache(true, None); - // Should be one store before clean for slot 0 and slot 1 - db.get_and_assert_single_storage(0); + // Should be one store before clean for slot 1 db.get_and_assert_single_storage(1); db.clean_accounts(Some(2), false, &EpochSchedule::default()); - // No stores should exist for slot 0 after clean + // No stores should exist for slot 0. If obsolete accounts are enabled, slot 0 stores are + // cleaned when slot 2 is flushed. If obsolete accounts are disabled, slot 0 stores are + // cleaned during the clean_accounts function call. assert_no_storages_at_slot(&db, 0); // No store should exit for slot 1 too as it has only a zero lamport single ref account. assert_no_storages_at_slot(&db, 1); // Store 2 should have a single account. - assert_eq!(db.accounts_index.ref_count_from_storage(&account_key2), 1); + db.assert_ref_count(&account_key2, 1); db.get_and_assert_single_storage(2); } @@ -4211,8 +4347,11 @@ define_accounts_db_test!(test_partial_clean, |db| { let account4 = AccountSharedData::new(4, 0, AccountSharedData::default().owner()); // Store accounts into slots 0 and 1 - db.store_for_tests(0, &[(&account_key1, &account1), (&account_key2, &account1)]); - db.store_for_tests(1, &[(&account_key1, &account2)]); + db.store_for_tests(( + 0, + [(&account_key1, &account1), (&account_key2, &account1)].as_slice(), + )); + db.store_for_tests((1, [(&account_key1, &account2)].as_slice())); db.print_accounts_stats("pre-clean1"); @@ -4233,7 +4372,10 @@ define_accounts_db_test!(test_partial_clean, |db| { db.add_root_and_flush_write_cache(0); // store into slot 2 - db.store_for_tests(2, &[(&account_key2, &account3), (&account_key1, &account3)]); + db.store_for_tests(( + 2, + [(&account_key2, &account3), (&account_key1, &account3)].as_slice(), + )); db.clean_accounts_for_tests(); db.print_accounts_stats("post-clean2"); @@ -4243,7 +4385,7 @@ define_accounts_db_test!(test_partial_clean, |db| { db.print_accounts_stats("post-clean3"); - db.store_for_tests(3, &[(&account_key2, &account4)]); + db.store_for_tests((3, [(&account_key2, &account4)].as_slice())); db.add_root_and_flush_write_cache(3); // Check that we can clean where max_root=3 and slot=2 is not rooted @@ -4282,7 +4424,7 @@ fn start_load_thread( return; } // Meddle load_limit to cover all branches of implementation. - // There should absolutely no behaviorial difference; the load_limit triggered + // There should absolutely no behavioral difference; the load_limit triggered // slow branch should only affect the performance. // Ordering::Relaxed is ok because of no data dependencies; the modified field is // completely free-standing cfg(test) control-flow knob. @@ -4318,7 +4460,7 @@ fn test_load_account_and_cache_flush_race() { let db = Arc::new(db); let pubkey = Arc::new(Pubkey::new_unique()); let exit = Arc::new(AtomicBool::new(false)); - db.store_cached(( + db.store_for_tests(( 0, &[( pubkey.as_ref(), @@ -4342,7 +4484,7 @@ fn test_load_account_and_cache_flush_race() { return; } account.set_lamports(slot + 1); - db.store_cached((slot, &[(pubkey.as_ref(), &account)][..])); + db.store_for_tests((slot, &[(pubkey.as_ref(), &account)][..])); db.add_root(slot); sleep(Duration::from_millis(RACY_SLEEP_MS)); db.flush_accounts_cache(true, None); @@ -4380,7 +4522,7 @@ fn do_test_load_account_and_shrink_race(with_retry: bool) { let lamports = 42; let mut account = AccountSharedData::new(1, 0, AccountSharedData::default().owner()); account.set_lamports(lamports); - db.store_for_tests(slot, &[(&pubkey, &account)]); + db.store_for_tests((slot, [(pubkey.as_ref(), &account)].as_slice())); // Set the slot as a root so account loads will see the contents of this slot db.add_root(slot); @@ -4478,7 +4620,7 @@ fn test_cache_flush_delayed_remove_unrooted_race() { let num_trials = 10; for _ in 0..num_trials { let pubkey = Pubkey::new_unique(); - db.store_cached((slot, &[(&pubkey, &account)][..])); + db.store_for_tests((slot, &[(&pubkey, &account)][..])); // Wait for both threads to finish flush_trial_start_sender.send(()).unwrap(); remove_trial_start_sender.send(()).unwrap(); @@ -4562,7 +4704,7 @@ fn test_cache_flush_remove_unrooted_race_multiple_slots() { let slot_to_pubkey_map: HashMap = (0..num_cached_slots) .map(|slot| { let pubkey = Pubkey::new_unique(); - db.store_cached((slot, &[(&pubkey, &account)][..])); + db.store_for_tests((slot, &[(&pubkey, &account)][..])); (slot, pubkey) }) .collect(); @@ -4667,9 +4809,9 @@ fn test_remove_uncleaned_slots_and_collect_pubkeys_up_to_slot() { let account2 = AccountSharedData::new(0, 0, &pubkey2); let account3 = AccountSharedData::new(0, 0, &pubkey3); - db.store_for_tests(slot1, &[(&pubkey1, &account1)]); - db.store_for_tests(slot2, &[(&pubkey2, &account2)]); - db.store_for_tests(slot3, &[(&pubkey3, &account3)]); + db.store_for_tests((slot1, [(&pubkey1, &account1)].as_slice())); + db.store_for_tests((slot2, [(&pubkey2, &account2)].as_slice())); + db.store_for_tests((slot3, [(&pubkey3, &account3)].as_slice())); // slot 1 is _not_ a root on purpose db.add_root(slot2); @@ -4696,8 +4838,9 @@ fn test_remove_uncleaned_slots_and_collect_pubkeys_up_to_slot() { assert!(candidates_contain(&pubkey3)); } -#[test] -fn test_shrink_productive() { +#[test_case(StorageAccess::Mmap)] +#[test_case(StorageAccess::File)] +fn test_shrink_productive(storage_access: StorageAccess) { solana_logger::setup(); let path = Path::new(""); let file_size = 100; @@ -4709,6 +4852,7 @@ fn test_shrink_productive() { slot as AccountsFileId, file_size, AccountsFileProvider::AppendVec, + storage_access, )); store.add_account(file_size as usize); assert!(!AccountsDb::is_shrinking_productive(&store)); @@ -4719,6 +4863,7 @@ fn test_shrink_productive() { slot as AccountsFileId, file_size, AccountsFileProvider::AppendVec, + storage_access, )); store.add_account(file_size as usize / 2); store.add_account(file_size as usize / 4); @@ -4729,8 +4874,9 @@ fn test_shrink_productive() { assert!(!AccountsDb::is_shrinking_productive(&store)); } -#[test] -fn test_is_candidate_for_shrink() { +#[test_case(StorageAccess::Mmap)] +#[test_case(StorageAccess::File)] +fn test_is_candidate_for_shrink(storage_access: StorageAccess) { solana_logger::setup(); let mut accounts = AccountsDb::new_single_for_tests(); @@ -4742,6 +4888,7 @@ fn test_is_candidate_for_shrink() { 1, store_file_size, AccountsFileProvider::AppendVec, + storage_access, )); match accounts.shrink_ratio { AccountShrinkThreshold::TotalSpace { shrink_ratio } => { @@ -4790,7 +4937,8 @@ define_accounts_db_test!(test_calculate_storage_count_and_alive_bytes, |accounts let storage = accounts.storage.get_slot_storage_entry(slot0).unwrap(); let storage_info = StorageSizeAndCountMap::default(); - accounts.generate_index_for_slot(&storage, slot0, 0, &storage_info); + let mut reader = append_vec::new_scan_accounts_reader(); + accounts.generate_index_for_slot(&mut reader, &storage, slot0, 0, &storage_info); assert_eq!(storage_info.len(), 1); for entry in storage_info.iter() { let expected_stored_size = @@ -4813,7 +4961,8 @@ define_accounts_db_test!( // empty store let storage = accounts.create_and_insert_store(0, 1, "test"); let storage_info = StorageSizeAndCountMap::default(); - accounts.generate_index_for_slot(&storage, 0, 0, &storage_info); + let mut reader = append_vec::new_scan_accounts_reader(); + accounts.generate_index_for_slot(&mut reader, &storage, 0, 0, &storage_info); assert!(storage_info.is_empty()); } ); @@ -4849,7 +4998,8 @@ define_accounts_db_test!( ); let storage_info = StorageSizeAndCountMap::default(); - accounts.generate_index_for_slot(&storage, 0, 0, &storage_info); + let mut reader = append_vec::new_scan_accounts_reader(); + accounts.generate_index_for_slot(&mut reader, &storage, 0, 0, &storage_info); assert_eq!(storage_info.len(), 1); for entry in storage_info.iter() { let expected_stored_size = @@ -4872,14 +5022,13 @@ define_accounts_db_test!(test_set_storage_count_and_alive_bytes, |accounts| { let shared_key = solana_pubkey::new_rand(); let account = AccountSharedData::new(1, 1, AccountSharedData::default().owner()); let slot0 = 0; - accounts.store_for_tests(slot0, &[(&shared_key, &account)]); + accounts.store_for_tests((slot0, [(&shared_key, &account)].as_slice())); accounts.add_root_and_flush_write_cache(slot0); // fake out the store count to avoid the assert for (_, store) in accounts.storage.iter() { store.alive_bytes.store(0, Ordering::Release); - let mut count_and_status = store.count_and_status.lock_write(); - count_and_status.0 = 0; + store.count.store(0, Ordering::Release); } // count needs to be <= approx stored count in store. @@ -4897,14 +5046,14 @@ define_accounts_db_test!(test_set_storage_count_and_alive_bytes, |accounts| { ); for (_, store) in accounts.storage.iter() { - assert_eq!(store.count_and_status.read().0, 0); + assert_eq!(store.count(), 0); assert_eq!(store.alive_bytes(), 0); } accounts.set_storage_count_and_alive_bytes(dashmap, &mut GenerateIndexTimings::default()); assert_eq!(accounts.storage.len(), 1); for (_, store) in accounts.storage.iter() { assert_eq!(store.id(), 0); - assert_eq!(store.count_and_status.read().0, count); + assert_eq!(store.count(), count); assert_eq!(store.alive_bytes(), 2); } }); @@ -4919,15 +5068,15 @@ define_accounts_db_test!(test_purge_alive_unrooted_slots_after_clean, |accounts| // Store accounts with greater than 0 lamports let account = AccountSharedData::new(1, 1, AccountSharedData::default().owner()); - accounts.store_for_tests(slot0, &[(&shared_key, &account)]); - accounts.store_for_tests(slot0, &[(&unrooted_key, &account)]); + accounts.store_for_tests((slot0, [(&shared_key, &account)].as_slice())); + accounts.store_for_tests((slot0, [(&unrooted_key, &account)].as_slice())); // Simulate adding dirty pubkeys on bank freeze. Note this is // not a rooted slot // On the next *rooted* slot, update the `shared_key` account to zero lamports let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner()); - accounts.store_for_tests(slot1, &[(&shared_key, &zero_lamport_account)]); + accounts.store_for_tests((slot1, [(&shared_key, &zero_lamport_account)].as_slice())); // Simulate adding dirty pubkeys on bank freeze, set root accounts.add_root_and_flush_write_cache(slot1); @@ -4974,32 +5123,32 @@ define_accounts_db_test!( let slot1: Slot = 1; let account = AccountSharedData::new(111, space, &owner); - accounts_db.store_cached((slot1, &[(&pubkey, &account)][..])); + accounts_db.store_for_tests((slot1, &[(&pubkey, &account)][..])); accounts_db.add_root_and_flush_write_cache(slot1); let slot2: Slot = 2; let account = AccountSharedData::new(222, space, &owner); - accounts_db.store_cached((slot2, &[(&pubkey, &account)][..])); + accounts_db.store_for_tests((slot2, &[(&pubkey, &account)][..])); accounts_db.add_root_and_flush_write_cache(slot2); let slot3: Slot = 3; let account = AccountSharedData::new(0, space, &owner); - accounts_db.store_cached((slot3, &[(&pubkey, &account)][..])); + accounts_db.store_for_tests((slot3, &[(&pubkey, &account)][..])); accounts_db.add_root_and_flush_write_cache(slot3); - assert_eq!(accounts_db.ref_count_for_pubkey(&pubkey), 3); + accounts_db.assert_ref_count(&pubkey, 3); accounts_db.set_latest_full_snapshot_slot(slot2); accounts_db.clean_accounts(Some(slot2), false, &EpochSchedule::default()); - assert_eq!(accounts_db.ref_count_for_pubkey(&pubkey), 2); + accounts_db.assert_ref_count(&pubkey, 2); accounts_db.set_latest_full_snapshot_slot(slot2); accounts_db.clean_accounts(None, false, &EpochSchedule::default()); - assert_eq!(accounts_db.ref_count_for_pubkey(&pubkey), 1); + accounts_db.assert_ref_count(&pubkey, 1); accounts_db.set_latest_full_snapshot_slot(slot3); accounts_db.clean_accounts(None, false, &EpochSchedule::default()); - assert_eq!(accounts_db.ref_count_for_pubkey(&pubkey), 0); + accounts_db.assert_ref_count(&pubkey, 0); } ); @@ -5026,7 +5175,7 @@ fn test_filter_zero_lamport_clean_for_incremental_snapshots() { candidates[0].insert( pubkey, CleaningInfo { - slot_list: vec![(slot, account_info)], + slot_list: SlotList::from([(slot, account_info)]), ref_count: 1, ..Default::default() }, @@ -5176,7 +5325,7 @@ fn test_unref_pubkeys_removed_from_accounts_index() { let db = AccountsDb::new_single_for_tests(); let mut purged_slot_pubkeys = HashSet::default(); purged_slot_pubkeys.insert((slot1, pk1)); - let mut reclaims = SlotList::default(); + let mut reclaims = ReclaimsSlotList::default(); db.accounts_index.upsert( slot1, slot1, @@ -5199,8 +5348,8 @@ fn test_unref_pubkeys_removed_from_accounts_index() { vec![(pk1, vec![slot1].into_iter().collect::>())], purged_stored_account_slots.into_iter().collect::>() ); - let expected = u64::from(already_removed); - assert_eq!(db.accounts_index.ref_count_from_storage(&pk1), expected); + let expected = RefCount::from(already_removed); + db.assert_ref_count(&pk1, expected); } } @@ -5230,7 +5379,7 @@ fn test_unref_accounts() { let db = AccountsDb::new_single_for_tests(); let mut purged_slot_pubkeys = HashSet::default(); purged_slot_pubkeys.insert((slot1, pk1)); - let mut reclaims = SlotList::default(); + let mut reclaims = ReclaimsSlotList::default(); db.accounts_index.upsert( slot1, slot1, @@ -5253,13 +5402,13 @@ fn test_unref_accounts() { vec![(pk1, vec![slot1].into_iter().collect::>())], purged_stored_account_slots.into_iter().collect::>() ); - assert_eq!(db.accounts_index.ref_count_from_storage(&pk1), 0); + db.assert_ref_count(&pk1, 0); } { let db = AccountsDb::new_single_for_tests(); let mut purged_stored_account_slots = AccountSlots::default(); let mut purged_slot_pubkeys = HashSet::default(); - let mut reclaims = SlotList::default(); + let mut reclaims = ReclaimsSlotList::default(); // pk1 and pk2 both in slot1 and slot2, so each has refcount of 2 for slot in [slot1, slot2] { for pk in [pk1, pk2] { @@ -5291,15 +5440,15 @@ fn test_unref_accounts() { assert_eq!(result, slots.into_iter().collect::>()); } assert!(purged_stored_account_slots.is_empty()); - assert_eq!(db.accounts_index.ref_count_from_storage(&pk1), 0); - assert_eq!(db.accounts_index.ref_count_from_storage(&pk2), 1); + db.assert_ref_count(&pk1, 0); + db.assert_ref_count(&pk2, 1); } } } define_accounts_db_test!(test_many_unrefs, |db| { let mut purged_stored_account_slots = AccountSlots::default(); - let mut reclaims = SlotList::default(); + let mut reclaims = ReclaimsSlotList::default(); let pk1 = Pubkey::from([1; 32]); // make sure we have > 1 batch. Bigger numbers cost more in test time here. let n = (UNREF_ACCOUNTS_BATCH_SIZE + 1) as Slot; @@ -5320,7 +5469,10 @@ define_accounts_db_test!(test_many_unrefs, |db| { }) .collect::>(); - assert_eq!(db.accounts_index.ref_count_from_storage(&pk1), n); + assert_eq!( + db.accounts_index.ref_count_from_storage(&pk1), + n as RefCount, + ); // unref all 'n' slots db.unref_accounts( purged_slot_pubkeys, @@ -5411,10 +5563,10 @@ fn test_sweep_get_oldest_non_ancient_slot_max() { ] { let db = AccountsDb::new_with_config( Vec::new(), - Some(AccountsDbConfig { + AccountsDbConfig { ancient_append_vec_offset: Some(ancient_append_vec_offset as i64), ..ACCOUNTS_DB_CONFIG_FOR_TESTING - }), + }, None, Arc::default(), ); @@ -5442,10 +5594,10 @@ fn test_sweep_get_oldest_non_ancient_slot() { let ancient_append_vec_offset = 50_000; let db = AccountsDb::new_with_config( Vec::new(), - Some(AccountsDbConfig { + AccountsDbConfig { ancient_append_vec_offset: Some(ancient_append_vec_offset), ..ACCOUNTS_DB_CONFIG_FOR_TESTING - }), + }, None, Arc::default(), ); @@ -5494,10 +5646,10 @@ fn test_sweep_get_oldest_non_ancient_slot2() { for starting_slot_offset in [0, avoid_saturation] { let db = AccountsDb::new_with_config( Vec::new(), - Some(AccountsDbConfig { + AccountsDbConfig { ancient_append_vec_offset: Some(ancient_append_vec_offset), ..ACCOUNTS_DB_CONFIG_FOR_TESTING - }), + }, None, Arc::default(), ); @@ -5653,7 +5805,7 @@ fn test_shrink_collect_simple() { account.set_lamports(u64::from(old_lamports == 0)); } - db.store_for_tests(slot5, &[(pubkey, &account)]); + db.store_for_tests((slot5, [(pubkey, &account)].as_slice())); account.set_lamports(old_lamports); let mut alive = alive; if append_opposite_alive_account @@ -5671,8 +5823,8 @@ fn test_shrink_collect_simple() { to_purge.iter().for_each(|pubkey| { db.accounts_index.purge_exact( pubkey, - &([slot5].into_iter().collect::>()), - &mut Vec::default(), + [slot5].into_iter().collect::>(), + &mut ReclaimsSlotList::new(), ); }); @@ -5821,7 +5973,7 @@ fn test_shrink_collect_with_obsolete_accounts() { account.set_lamports(200); regular_pubkeys.push(*pubkey); } - db.store_for_tests(slot, &[(pubkey, &account)]); + db.store_for_tests((slot, [(pubkey, &account)].as_slice())); } // Flush the cache @@ -5847,8 +5999,8 @@ fn test_shrink_collect_with_obsolete_accounts() { // Purge accounts via clean and ensure that they will be unreffed. db.accounts_index.purge_exact( pubkey, - &([slot].into_iter().collect::>()), - &mut Vec::default(), + [slot].into_iter().collect::>(), + &mut ReclaimsSlotList::new(), ); unref_pubkeys.push(*pubkey); } @@ -5907,12 +6059,13 @@ fn test_combine_ancient_slots_simple() { fn get_all_accounts_from_storages<'a>( storages: impl Iterator>, ) -> Vec<(Pubkey, AccountSharedData)> { + let mut reader = append_vec::new_scan_accounts_reader(); storages .flat_map(|storage| { let mut vec = Vec::default(); storage .accounts - .scan_accounts(|_offset, account| { + .scan_accounts(&mut reader, |_offset, account| { vec.push((*account.pubkey(), account.to_account_shared_data())); }) .expect("must scan accounts storage"); @@ -5985,147 +6138,6 @@ pub(crate) fn compare_all_accounts( ); } -#[test] -fn test_shrink_ancient_overflow_with_min_size() { - solana_logger::setup(); - - let ideal_av_size = ancient_append_vecs::get_ancient_append_vec_capacity(); - let num_normal_slots = 2; - - // build an ancient append vec at slot 'ancient_slot' with one `fat` - // account that's larger than the ideal size of ancient append vec to - // simulate the *oversized* append vec for shrinking. - let account_size = (1.5 * ideal_av_size as f64) as u64; - let (db, ancient_slot) = get_one_ancient_append_vec_and_others_with_account_size( - num_normal_slots, - Some(account_size), - ); - - let max_slot_inclusive = ancient_slot + (num_normal_slots as Slot); - let initial_accounts = get_all_accounts(&db, ancient_slot..(max_slot_inclusive + 1)); - - let ancient = db.storage.get_slot_storage_entry(ancient_slot).unwrap(); - - // assert that the min_size, which about 1.5 * ideal_av_size, kicked in - // and result that the ancient append vec capacity exceeds the ideal_av_size - assert!(ancient.capacity() > ideal_av_size); - - // combine 1 normal append vec into existing oversize ancient append vec. - db.combine_ancient_slots_packed( - (ancient_slot..max_slot_inclusive).collect(), - CAN_RANDOMLY_SHRINK_FALSE, - ); - - compare_all_accounts( - &initial_accounts, - &get_all_accounts(&db, ancient_slot..max_slot_inclusive), - ); - - // the append vec at max_slot_inclusive-1 should NOT have been removed - // since the append vec is already oversized and we created an ancient - // append vec there. - let ancient2 = db - .storage - .get_slot_storage_entry(max_slot_inclusive - 1) - .unwrap(); - assert!(ancient2.capacity() > ideal_av_size); // min_size kicked in, which cause the appendvec to be larger than the ideal_av_size - - // Combine normal append vec(s) into existing ancient append vec this - // will overflow the original ancient append vec because of the oversized - // ancient append vec is full. - db.combine_ancient_slots_packed( - (ancient_slot..=max_slot_inclusive).collect(), - CAN_RANDOMLY_SHRINK_FALSE, - ); - - compare_all_accounts( - &initial_accounts, - &get_all_accounts(&db, ancient_slot..(max_slot_inclusive + 1)), - ); - - // Nothing should be combined because the append vec are oversized. - // min_size kicked in, which cause the appendvecs to be larger than the ideal_av_size. - let ancient = db.storage.get_slot_storage_entry(ancient_slot).unwrap(); - assert!(ancient.capacity() > ideal_av_size); - - let ancient2 = db - .storage - .get_slot_storage_entry(max_slot_inclusive - 1) - .unwrap(); - assert!(ancient2.capacity() > ideal_av_size); - - let ancient3 = db - .storage - .get_slot_storage_entry(max_slot_inclusive) - .unwrap(); - assert!(ancient3.capacity() > ideal_av_size); -} - -#[test] -fn test_shrink_overflow_too_much() { - let num_normal_slots = 2; - let ideal_av_size = ancient_append_vecs::get_ancient_append_vec_capacity(); - let fat_account_size = (1.5 * ideal_av_size as f64) as u64; - - // Prepare 3 append vecs to combine [small, big, small] - let account_data_sizes = vec![100, fat_account_size, 100]; - let (db, slot1) = create_db_with_storages_and_index_with_customized_account_size_per_slot( - true, - num_normal_slots + 1, - account_data_sizes, - ); - let storage = db.get_storage_for_slot(slot1).unwrap(); - let created_accounts = db.get_unique_accounts_from_storage(&storage); - - // Adjust alive_ratio for slot2 to test it is shrinkable and is a - // candidate for squashing into the previous ancient append vec. - // However, due to the fact that this append vec is `oversized`, it can't - // be squashed into the ancient append vec at previous slot (exceeds the - // size limit). Therefore, a new "oversized" ancient append vec is - // created at slot2 as the overflow. This is where the "min_bytes" in - // `fn create_ancient_append_vec` is used. - let slot2 = slot1 + 1; - let storage2 = db.storage.get_slot_storage_entry(slot2).unwrap(); - let original_cap_slot2 = storage2.accounts.capacity(); - storage2 - .accounts - .set_current_len_for_tests(original_cap_slot2 as usize); - - // Combine append vec into ancient append vec. - let slots_to_combine: Vec = (slot1..slot1 + (num_normal_slots + 1) as Slot).collect(); - db.combine_ancient_slots_packed(slots_to_combine, CAN_RANDOMLY_SHRINK_FALSE); - - // slot2 is too big to fit into ideal ancient append vec at slot1. So slot2 won't be merged into slot1. - // slot1 will have its own ancient append vec. - assert!(db.storage.get_slot_storage_entry(slot1).is_some()); - let ancient = db.get_storage_for_slot(slot1).unwrap(); - assert!(ancient.capacity() <= ideal_av_size); - - let after_store = db.get_storage_for_slot(slot1).unwrap(); - let GetUniqueAccountsResult { - stored_accounts: after_stored_accounts, - capacity: after_capacity, - .. - } = db.get_unique_accounts_from_storage(&after_store); - assert!(created_accounts.capacity <= after_capacity); - assert_eq!(created_accounts.stored_accounts.len(), 1); - assert_eq!(after_stored_accounts.len(), 1); - - // slot2, even after shrinking, is still oversized. Therefore, slot 2 - // exists as an ancient append vec. - let storage2_after = db.storage.get_slot_storage_entry(slot2).unwrap(); - assert!(storage2_after.capacity() > ideal_av_size); - let after_store = db.get_storage_for_slot(slot2).unwrap(); - let GetUniqueAccountsResult { - stored_accounts: after_stored_accounts, - capacity: after_capacity, - .. - } = db.get_unique_accounts_from_storage(&after_store); - assert!(created_accounts.capacity <= after_capacity); - assert_eq!(created_accounts.stored_accounts.len(), 1); - assert_eq!(after_stored_accounts.len(), 1); -} - pub fn get_account_from_account_from_storage( account: &AccountFromStorage, db: &AccountsDb, @@ -6158,7 +6170,7 @@ fn populate_index(db: &AccountsDb, slots: Range) { &account, &AccountSecondaryIndexes::default(), info, - &mut Vec::default(), + &mut ReclaimsSlotList::new(), UpsertReclaim::IgnoreReclaims, ); }) @@ -6171,53 +6183,6 @@ pub(crate) fn remove_account_for_tests(storage: &AccountStorageEntry, num_bytes: storage.remove_accounts(num_bytes, 1); } -pub(crate) fn create_storages_and_update_index_with_customized_account_size_per_slot( - db: &AccountsDb, - tf: Option<&TempFile>, - starting_slot: Slot, - num_slots: usize, - alive: bool, - account_data_sizes: Vec, -) { - if num_slots == 0 { - return; - } - assert!(account_data_sizes.len() == num_slots); - let local_tf = (tf.is_none()).then(|| { - crate::append_vec::test_utils::get_append_vec_path("create_storages_and_update_index") - }); - let tf = tf.unwrap_or_else(|| local_tf.as_ref().unwrap()); - - let starting_id = db - .storage - .iter() - .map(|storage| storage.1.id()) - .max() - .unwrap_or(999); - for (i, account_data_size) in account_data_sizes.iter().enumerate().take(num_slots) { - let id = starting_id + (i as AccountsFileId); - let pubkey1 = solana_pubkey::new_rand(); - let storage = sample_storage_with_entries_id_fill_percentage( - tf, - starting_slot + (i as Slot), - &pubkey1, - id, - alive, - Some(*account_data_size), - 50, - ); - insert_store(db, Arc::clone(&storage)); - } - - let storage = db.get_storage_for_slot(starting_slot).unwrap(); - let created_accounts = db.get_unique_accounts_from_storage(&storage); - assert_eq!(created_accounts.stored_accounts.len(), 1); - - if alive { - populate_index(db, starting_slot..(starting_slot + (num_slots as Slot) + 1)); - } -} - pub(crate) fn create_storages_and_update_index( db: &AccountsDb, tf: Option<&TempFile>, @@ -6251,6 +6216,7 @@ pub(crate) fn create_storages_and_update_index( id, alive, account_data_size, + db.storage_access(), ); insert_store(db, Arc::clone(&storage)); } @@ -6285,34 +6251,6 @@ pub(crate) fn create_db_with_storages_and_index( (db, slot1) } -pub(crate) fn create_db_with_storages_and_index_with_customized_account_size_per_slot( - alive: bool, - num_slots: usize, - account_data_size: Vec, -) -> (AccountsDb, Slot) { - solana_logger::setup(); - - let db = AccountsDb::new_single_for_tests(); - - // create a single append vec with a single account in a slot - // add the pubkey to index if alive - // call combine_ancient_slots with the slot - // verify we create an ancient appendvec that has alive accounts and does not have dead accounts - - let slot1 = 1; - create_storages_and_update_index_with_customized_account_size_per_slot( - &db, - None, - slot1, - num_slots, - alive, - account_data_size, - ); - - let slot1 = slot1 as Slot; - (db, slot1) -} - fn get_one_ancient_append_vec_and_others_with_account_size( num_normal_slots: usize, account_data_size: Option, @@ -6365,9 +6303,10 @@ fn insert_store(db: &AccountsDb, append_vec: Arc) { db.storage.insert(append_vec.slot(), append_vec); } -#[test] +#[test_case(StorageAccess::Mmap)] +#[test_case(StorageAccess::File)] #[should_panic(expected = "self.storage.remove")] -fn test_handle_dropped_roots_for_ancient_assert() { +fn test_handle_dropped_roots_for_ancient_assert(storage_access: StorageAccess) { solana_logger::setup(); let common_store_path = Path::new(""); let store_file_size = 10_000; @@ -6377,6 +6316,7 @@ fn test_handle_dropped_roots_for_ancient_assert() { 1, store_file_size, AccountsFileProvider::AppendVec, + storage_access, )); let db = AccountsDb::new_single_for_tests(); let slot0 = 0; @@ -6392,7 +6332,17 @@ fn test_handle_dropped_roots_for_ancient_assert() { /// `clean`. In this case, `clean` should still reclaim the old versions of these accounts. #[test] fn test_clean_old_storages_with_reclaims_rooted() { - let accounts_db = AccountsDb::new_single_for_tests(); + // Test is testing clean behaviour that is specific to obsolete accounts disabled + // Only run in obsolete accounts disabled mode + let accounts_db = AccountsDb::new_with_config( + Vec::new(), + AccountsDbConfig { + mark_obsolete_accounts: MarkObsoleteAccounts::Disabled, + ..ACCOUNTS_DB_CONFIG_FOR_TESTING + }, + None, + Arc::default(), + ); let pubkey = Pubkey::new_unique(); let old_slot = 11; let new_slot = 22; @@ -6401,10 +6351,10 @@ fn test_clean_old_storages_with_reclaims_rooted() { let account = AccountSharedData::new(slot, 0, &Pubkey::new_unique()); // store `pubkey` into multiple slots, and also store another unique pubkey // to prevent the whole storage from being marked as dead by `clean`. - accounts_db.store_for_tests( + accounts_db.store_for_tests(( slot, - &[(&pubkey, &account), (&Pubkey::new_unique(), &account)], - ); + [(&pubkey, &account), (&Pubkey::new_unique(), &account)].as_slice(), + )); accounts_db.add_root_and_flush_write_cache(slot); accounts_db.uncleaned_pubkeys.remove(&slot); // ensure this slot is *not* in the dirty_stores nor uncleaned_pubkeys, because we want to @@ -6461,10 +6411,10 @@ fn test_clean_old_storages_with_reclaims_unrooted() { let account = AccountSharedData::new(slot, 0, &Pubkey::new_unique()); // store `pubkey` into multiple slots, and also store another unique pubkey // to prevent the whole storage from being marked as dead by `clean`. - accounts_db.store_for_tests( + accounts_db.store_for_tests(( slot, - &[(&pubkey, &account), (&Pubkey::new_unique(), &account)], - ); + [(&pubkey, &account), (&Pubkey::new_unique(), &account)].as_slice(), + )); } // only `old_slot` should be rooted, not `new_slot` @@ -6505,20 +6455,22 @@ fn test_clean_old_storages_with_reclaims_unrooted() { #[test] fn test_calculate_capitalization_simple() { let accounts_db = AccountsDb::new_single_for_tests(); - accounts_db.store_for_tests( + accounts_db.store_for_tests(( 0, - &[( + [( &Pubkey::new_unique(), &AccountSharedData::new(123, 0, &Pubkey::default()), - )], - ); - accounts_db.store_for_tests( + )] + .as_slice(), + )); + accounts_db.store_for_tests(( 1, - &[( + [( &Pubkey::new_unique(), &AccountSharedData::new(456, 0, &Pubkey::default()), - )], - ); + )] + .as_slice(), + )); assert_eq!( accounts_db.calculate_capitalization_at_startup_from_index(&Ancestors::from(vec![0, 1]), 1), 123 + 456, @@ -6532,8 +6484,8 @@ fn test_calculate_capitalization_simple() { fn test_calculate_capitalization_overflow_intra_slot() { let accounts_db = AccountsDb::new_single_for_tests(); let account = AccountSharedData::new(u64::MAX - 1, 0, &Pubkey::default()); - accounts_db.store_for_tests(0, &[(&Pubkey::new_unique(), &account)]); - accounts_db.store_for_tests(0, &[(&Pubkey::new_unique(), &account)]); + accounts_db.store_for_tests((0, [(&Pubkey::new_unique(), &account)].as_slice())); + accounts_db.store_for_tests((0, [(&Pubkey::new_unique(), &account)].as_slice())); accounts_db.calculate_capitalization_at_startup_from_index(&Ancestors::from(vec![0]), 0); } @@ -6544,8 +6496,8 @@ fn test_calculate_capitalization_overflow_intra_slot() { fn test_calculate_capitalization_overflow_inter_slot() { let accounts_db = AccountsDb::new_single_for_tests(); let account = AccountSharedData::new(u64::MAX - 1, 0, &Pubkey::default()); - accounts_db.store_for_tests(0, &[(&Pubkey::new_unique(), &account)]); - accounts_db.store_for_tests(1, &[(&Pubkey::new_unique(), &account)]); + accounts_db.store_for_tests((0, [(&Pubkey::new_unique(), &account)].as_slice())); + accounts_db.store_for_tests((1, [(&Pubkey::new_unique(), &account)].as_slice())); accounts_db.calculate_capitalization_at_startup_from_index(&Ancestors::from(vec![0, 1]), 1); } @@ -6576,11 +6528,11 @@ fn test_mark_obsolete_accounts_at_startup_purge_slot() { // Store the same pubkey in multiple slots // Store other pubkey in slot0 to ensure slot is not purged - accounts_db.store_for_tests(0, &[(&pubkey1, &account), (&pubkey2, &account)]); + accounts_db.store_for_tests((0, [(&pubkey1, &account), (&pubkey2, &account)].as_slice())); accounts_db.flush_accounts_cache_slot_for_tests(0); - accounts_db.store_for_tests(1, &[(&pubkey1, &account)]); + accounts_db.store_for_tests((1, [(&pubkey1, &account)].as_slice())); accounts_db.flush_accounts_cache_slot_for_tests(1); - accounts_db.store_for_tests(2, &[(&pubkey1, &account)]); + accounts_db.store_for_tests((2, [(&pubkey1, &account)].as_slice())); accounts_db.flush_accounts_cache_slot_for_tests(2); let pubkeys_with_duplicates_by_bin = vec![vec![pubkey1]]; @@ -6595,10 +6547,7 @@ fn test_mark_obsolete_accounts_at_startup_purge_slot() { assert!(accounts_db.storage.get_slot_storage_entry(1).is_none()); // Verify that the pubkey ref1's count is 1 - assert_eq!( - accounts_db.accounts_index.ref_count_from_storage(&pubkey1), - 1 - ); + accounts_db.assert_ref_count(&pubkey1, 1); assert_eq!(obsolete_stats.accounts_marked_obsolete, 2); } @@ -6612,7 +6561,10 @@ fn test_mark_obsolete_accounts_at_startup_multiple_bins() { let account = AccountSharedData::new(100, 0, &Pubkey::default()); for slot in 0..2 { - accounts_db.store_for_tests(slot, &[(&pubkey1, &account), (&pubkey2, &account)]); + accounts_db.store_for_tests(( + slot, + [(&pubkey1, &account), (&pubkey2, &account)].as_slice(), + )); accounts_db.flush_accounts_cache_slot_for_tests(slot); } @@ -6628,68 +6580,46 @@ fn test_mark_obsolete_accounts_at_startup_multiple_bins() { assert!(accounts_db.storage.get_slot_storage_entry(1).is_some()); // Verify that both pubkeys ref_counts are 1 - assert_eq!( - accounts_db.accounts_index.ref_count_from_storage(&pubkey1), - 1 - ); - assert_eq!( - accounts_db.accounts_index.ref_count_from_storage(&pubkey2), - 1 - ); + accounts_db.assert_ref_count(&pubkey1, 1); + accounts_db.assert_ref_count(&pubkey2, 1); // Ensure that stats were accumulated correctly assert_eq!(obsolete_stats.accounts_marked_obsolete, 2); assert_eq!(obsolete_stats.slots_removed, 1); } -// This test verifies that when obsolete accounts are marked, the duplicates lt hash is set to the -// default value. When they are not marked, it is populated. The second case ensures test validity. -#[test_case(true; "mark_obsolete_accounts")] -#[test_case(false; "do_not_mark_obsolete_accounts")] -fn test_obsolete_accounts_empty_default_duplicate_hash(mark_obsolete_accounts: bool) { - let slot0 = 0; - let slot1 = 1; - - let db = AccountsDb::new_with_config( - Vec::new(), - Some(AccountsDbConfig { - mark_obsolete_accounts, - ..ACCOUNTS_DB_CONFIG_FOR_TESTING - }), - None, - Arc::default(), - ); - - let pubkey = Pubkey::new_unique(); - - let storage = db.create_and_insert_store(slot0, 1000, "test"); - - let account0 = AccountSharedData::new(1, 0, &Pubkey::default()); - let account1 = AccountSharedData::new(100, 0, &Pubkey::default()); - - storage - .accounts - .write_accounts(&(slot0, &[(&pubkey, &account0)][..]), 0); - - let storage = db.create_and_insert_store(slot1, 1000, "test"); - - storage - .accounts - .write_accounts(&(slot1, &[(&pubkey, &account1)][..]), 0); - - assert!(!db.accounts_index.contains(&pubkey)); - let result = db.generate_index(None, false); - if mark_obsolete_accounts { - // If obsolete accounts are marked, the duplicates lt hash should be the default value - // This is because all duplicates are marked as obsolete and skipped during lt hash calculation. - assert_eq!( - *result.duplicates_lt_hash.unwrap(), - DuplicatesLtHash::default() - ); - } else { - assert_ne!( - *result.duplicates_lt_hash.unwrap(), - DuplicatesLtHash::default() - ); - } +#[test] +fn test_batch_insert_zero_lamport_single_ref_account_offsets() { + let accounts = AccountsDb::new_single_for_tests(); + let storage = accounts.create_and_insert_store(1, 100, "test"); + + // Test inserting new offsets + let offsets1 = vec![10, 20, 30]; + let count1 = storage.batch_insert_zero_lamport_single_ref_account_offsets(&offsets1); + assert_eq!(count1, 3, "Should insert all 3 new offsets"); + assert_eq!(storage.num_zero_lamport_single_ref_accounts(), 3); + + // Test inserting some duplicate and some new offsets + let offsets2 = vec![20, 30, 40, 50]; // 20,30 are duplicates, 40,50 are new + let count2 = storage.batch_insert_zero_lamport_single_ref_account_offsets(&offsets2); + assert_eq!(count2, 2, "Should insert only 2 new offsets (40, 50)"); + assert_eq!(storage.num_zero_lamport_single_ref_accounts(), 5); + + // Test inserting all duplicates + let offsets3 = vec![10, 20]; + let count3 = storage.batch_insert_zero_lamport_single_ref_account_offsets(&offsets3); + assert_eq!(count3, 0, "Should not insert any duplicates"); + assert_eq!(storage.num_zero_lamport_single_ref_accounts(), 5); + + // Test inserting empty slice + let empty_offsets: Vec = vec![]; + let count4 = storage.batch_insert_zero_lamport_single_ref_account_offsets(&empty_offsets); + assert_eq!(count4, 0, "Should handle empty slice"); + assert_eq!(storage.num_zero_lamport_single_ref_accounts(), 5); + + // Test inserting large batch with mixed duplicates + let offsets5 = vec![10, 60, 20, 70, 30, 80, 40]; // 10,20,30,40 duplicates, 60,70,80 new + let count5 = storage.batch_insert_zero_lamport_single_ref_account_offsets(&offsets5); + assert_eq!(count5, 3, "Should insert only 3 new offsets (60, 70, 80)"); + assert_eq!(storage.num_zero_lamport_single_ref_accounts(), 8); } diff --git a/accounts-db/src/accounts_file.rs b/accounts-db/src/accounts_file.rs index a6869356bb80cb..e14f9f3a281470 100644 --- a/accounts-db/src/accounts_file.rs +++ b/accounts-db/src/accounts_file.rs @@ -1,18 +1,18 @@ #[cfg(feature = "dev-context-only-utils")] -use crate::append_vec::StoredAccountMeta; +use crate::append_vec::{self, StoredAccountMeta}; use { crate::{ account_info::{AccountInfo, Offset}, account_storage::stored_account_info::{StoredAccountInfo, StoredAccountInfoWithoutData}, accounts_db::AccountsFileId, - accounts_update_notifier_interface::AccountForGeyser, append_vec::{AppendVec, AppendVecError}, + buffered_reader::RequiredLenBufFileRead, storable_accounts::StorableAccounts, tiered_storage::{ error::TieredStorageError, hot::HOT_FORMAT, index::IndexOffset, TieredStorage, }, }, - solana_account::{AccountSharedData, ReadableAccount as _}, + solana_account::AccountSharedData, solana_clock::Slot, solana_pubkey::Pubkey, std::{ @@ -46,14 +46,6 @@ pub enum AccountsFileError { TieredStorageError(#[from] TieredStorageError), } -#[derive(Error, Debug, PartialEq, Eq)] -pub enum MatchAccountOwnerError { - #[error("The account owner does not match with the provided list")] - NoMatch, - #[error("Unable to load the account")] - UnableToLoad, -} - #[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] pub enum StorageAccess { /// storages should be accessed by Mmap @@ -103,22 +95,10 @@ impl AccountsFile { Ok(Self::AppendVec(av)) } - /// true if this storage can possibly be appended to (independent of capacity check) - // - // NOTE: Only used by ancient append vecs "append" method, which is test-only now. - #[cfg(test)] - pub(crate) fn can_append(&self) -> bool { - match self { - Self::AppendVec(av) => av.can_append(), - // once created, tiered storages cannot be appended to - Self::TieredStorage(_) => false, - } - } - /// if storage is not readonly, reopen another instance that is read only pub(crate) fn reopen_as_readonly(&self) -> Option { match self { - Self::AppendVec(av) => av.reopen_as_readonly().map(Self::AppendVec), + Self::AppendVec(av) => av.reopen_as_readonly_file_io().map(Self::AppendVec), Self::TieredStorage(_) => None, } } @@ -132,6 +112,7 @@ impl AccountsFile { } } + /// Flushes contents to disk pub fn flush(&self) -> Result<()> { match self { Self::AppendVec(av) => av.flush(), @@ -139,13 +120,6 @@ impl AccountsFile { } } - pub fn reset(&self) { - match self { - Self::AppendVec(av) => av.reset(), - Self::TieredStorage(_) => {} - } - } - pub fn remaining_bytes(&self) -> u64 { match self { Self::AppendVec(av) => av.remaining_bytes(), @@ -153,6 +127,7 @@ impl AccountsFile { } } + /// Returns the number of bytes, *not accounts*, used in the AccountsFile pub fn len(&self) -> usize { match self { Self::AppendVec(av) => av.len(), @@ -167,6 +142,7 @@ impl AccountsFile { } } + /// Returns the total number of bytes, *not accounts*, the AccountsFile can hold pub fn capacity(&self) -> u64 { match self { Self::AppendVec(av) => av.capacity(), @@ -262,28 +238,6 @@ impl AccountsFile { } } - pub fn account_matches_owners( - &self, - offset: usize, - owners: &[Pubkey], - ) -> std::result::Result { - match self { - Self::AppendVec(av) => av.account_matches_owners(offset, owners), - // Note: The conversion here is needed as the AccountsDB currently - // assumes all offsets are multiple of 8 while TieredStorage uses - // IndexOffset that is equivalent to AccountInfo::reduced_offset. - Self::TieredStorage(ts) => { - let Some(reader) = ts.reader() else { - return Err(MatchAccountOwnerError::UnableToLoad); - }; - reader.account_matches_owners( - IndexOffset(AccountInfo::get_reduced_offset(offset)), - owners, - ) - } - } - } - /// Return the path of the underlying account file. pub fn path(&self) -> &Path { match self { @@ -322,12 +276,13 @@ impl AccountsFile { /// /// Prefer scan_accounts_without_data() when account data is not needed, /// as it can potentially read less and be faster. - pub fn scan_accounts( - &self, + pub(crate) fn scan_accounts<'a>( + &'a self, + reader: &mut impl RequiredLenBufFileRead<'a>, callback: impl for<'local> FnMut(Offset, StoredAccountInfo<'local>), ) -> Result<()> { match self { - Self::AppendVec(av) => av.scan_accounts(callback), + Self::AppendVec(av) => av.scan_accounts(reader, callback), Self::TieredStorage(ts) => { if let Some(reader) = ts.reader() { reader.scan_accounts(callback)?; @@ -346,38 +301,20 @@ impl AccountsFile { &self, callback: impl for<'local> FnMut(StoredAccountMeta<'local>), ) -> Result<()> { + let mut reader = append_vec::new_scan_accounts_reader(); match self { - Self::AppendVec(av) => av.scan_accounts_stored_meta(callback), + Self::AppendVec(av) => av.scan_accounts_stored_meta(&mut reader, callback), Self::TieredStorage(_) => { unimplemented!("StoredAccountMeta is only implemented for AppendVec") } } } - /// Iterate over all accounts and call `callback` with each account. - /// Only intended to be used by Geyser. - pub fn scan_accounts_for_geyser( - &self, - mut callback: impl for<'local> FnMut(AccountForGeyser<'local>), - ) -> Result<()> { - self.scan_accounts(|_offset, account| { - let account_for_geyser = AccountForGeyser { - pubkey: account.pubkey(), - lamports: account.lamports(), - owner: account.owner(), - executable: account.executable(), - rent_epoch: account.rent_epoch(), - data: account.data(), - }; - callback(account_for_geyser) - }) - } - /// Calculate the amount of storage required for an account with the passed /// in data_len pub(crate) fn calculate_stored_size(&self, data_len: usize) -> usize { match self { - Self::AppendVec(av) => av.calculate_stored_size(data_len), + Self::AppendVec(_) => AppendVec::calculate_stored_size(data_len), Self::TieredStorage(ts) => ts .reader() .expect("Reader must be initialized as stored size is specific to format") @@ -460,11 +397,19 @@ pub enum AccountsFileProvider { } impl AccountsFileProvider { - pub fn new_writable(&self, path: impl Into, file_size: u64) -> AccountsFile { + pub fn new_writable( + &self, + path: impl Into, + file_size: u64, + storage_access: StorageAccess, + ) -> AccountsFile { match self { - Self::AppendVec => { - AccountsFile::AppendVec(AppendVec::new(path, true, file_size as usize)) - } + Self::AppendVec => AccountsFile::AppendVec(AppendVec::new( + path, + true, + file_size as usize, + storage_access, + )), Self::HotStorage => AccountsFile::TieredStorage(TieredStorage::new_writable(path)), } } @@ -487,16 +432,3 @@ pub struct StoredAccountsInfo { /// total size of all the stored accounts pub size: usize, } - -#[cfg(test)] -pub mod tests { - use crate::accounts_file::AccountsFile; - impl AccountsFile { - pub(crate) fn set_current_len_for_tests(&self, len: usize) { - match self { - Self::AppendVec(av) => av.set_current_len_for_tests(len), - Self::TieredStorage(_) => {} - } - } - } -} diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index 36abe9c90321c3..ccbb37b8fe5ca9 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -24,6 +24,7 @@ use { rayon::iter::{IntoParallelIterator, ParallelIterator}, roots_tracker::RootsTracker, secondary::{RwLockSecondaryIndexEntry, SecondaryIndex, SecondaryIndexEntry}, + smallvec::SmallVec, solana_account::ReadableAccount, solana_clock::{BankId, Slot}, solana_measure::measure::Measure, @@ -35,7 +36,7 @@ use { ops::{Bound, Range, RangeBounds}, path::PathBuf, sync::{ - atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, + atomic::{AtomicBool, AtomicU32, AtomicU64, AtomicUsize, Ordering}, Arc, Mutex, RwLock, }, }, @@ -70,9 +71,16 @@ pub const ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS: AccountsIndexConfig = AccountsIn scan_results_limit_bytes: None, }; pub type ScanResult = Result; -pub type SlotList = Vec<(Slot, T)>; -pub type RefCount = u64; -pub type AtomicRefCount = AtomicU64; +pub type SlotList = SmallVec<[(Slot, T); 1]>; +pub type ReclaimsSlotList = Vec<(Slot, T)>; + +// The ref count cannot be higher than the total number of storages, and we should never have more +// than 1 million storages. A 32-bit ref count should be *significantly* more than enough. +// (We already effectively limit the number of storages to 2^32 since the storage ID type is a u32.) +// The majority of accounts should only exist in one storage, so the most common ref count is '1'. +// Heavily updated accounts should still have a ref count that is < 100. +pub type RefCount = u32; +pub type AtomicRefCount = AtomicU32; /// values returned from `insert_new_if_missing_into_primary_index()` #[derive(Default, Debug, PartialEq, Eq)] @@ -122,6 +130,9 @@ pub enum UpsertReclaim { PopulateReclaims, /// overwrite existing data in the same slot and do not return in 'reclaims' IgnoreReclaims, + // Reclaim all older versions of the account from the index and return + // in the 'reclaims' + ReclaimOldSlots, } #[derive(Debug)] @@ -668,21 +679,6 @@ impl + Into> AccountsIndex { } } - #[cfg(feature = "dev-context-only-utils")] - pub fn do_unchecked_scan_accounts( - &self, - metric_name: &'static str, - ancestors: &Ancestors, - func: F, - range: Option, - config: &ScanConfig, - ) where - F: FnMut(&Pubkey, (&T, Slot)), - R: RangeBounds + std::fmt::Debug, - { - self.do_scan_accounts(metric_name, ancestors, func, range, None, config); - } - // Scan accounts and return latest version of each account that is either: // 1) rooted or // 2) present in ancestors @@ -869,15 +865,15 @@ impl + Into> AccountsIndex { #[must_use] pub fn handle_dead_keys( &self, - dead_keys: &[&Pubkey], + dead_keys: &[Pubkey], account_indexes: &AccountSecondaryIndexes, ) -> HashSet { let mut pubkeys_removed_from_accounts_index = HashSet::default(); if !dead_keys.is_empty() { for key in dead_keys.iter() { let w_index = self.get_bin(key); - if w_index.remove_if_slot_list_empty(**key) { - pubkeys_removed_from_accounts_index.insert(**key); + if w_index.remove_if_slot_list_empty(*key) { + pubkeys_removed_from_accounts_index.insert(*key); // Note it's only safe to remove all the entries for this key // because we have the lock for this key's entry in the AccountsIndex, // so no other thread is also updating the index @@ -910,24 +906,6 @@ impl + Into> AccountsIndex { ) } - #[cfg(feature = "dev-context-only-utils")] - pub(crate) fn unchecked_scan_accounts( - &self, - metric_name: &'static str, - ancestors: &Ancestors, - func: F, - config: &ScanConfig, - ) where - F: FnMut(&Pubkey, (&T, Slot)), - { - self.do_unchecked_scan_accounts( - metric_name, - ancestors, - func, - None::>, - config, - ); - } /// call func with every pubkey and index visible from a given set of ancestors pub(crate) fn index_scan_accounts( &self, @@ -968,15 +946,12 @@ impl + Into> AccountsIndex { /// returns true if, after this fn call: /// accounts index entry for `pubkey` has an empty slot list /// or `pubkey` does not exist in accounts index - pub(crate) fn purge_exact<'a, C>( - &'a self, + pub(crate) fn purge_exact( + &self, pubkey: &Pubkey, - slots_to_purge: &'a C, - reclaims: &mut SlotList, - ) -> bool - where - C: Contains<'a, Slot>, - { + slots_to_purge: impl for<'a> Contains<'a, Slot>, + reclaims: &mut ReclaimsSlotList, + ) -> bool { self.slot_list_mut(pubkey, |slot_list| { slot_list.retain(|(slot, item)| { let should_purge = slots_to_purge.contains(slot); @@ -1364,6 +1339,7 @@ impl + Into> AccountsIndex { .then_with(|| pubkey_a.cmp(pubkey_b)) }); + let storage = self.storage.storage.as_ref(); while !items.is_empty() { let mut start_index = items.len() - 1; let mut last_pubkey = &items[start_index].0; @@ -1383,7 +1359,7 @@ impl + Into> AccountsIndex { last_pubkey = next_pubkey; } - let r_account_maps = &self.account_maps[pubkey_bin]; + let r_account_maps = self.account_maps[pubkey_bin].as_ref(); // count only considers non-duplicate accounts count += items.len() - start_index; @@ -1395,12 +1371,8 @@ impl + Into> AccountsIndex { // this is no longer the default case let mut duplicates_from_in_memory = vec![]; items.for_each(|(pubkey, account_info)| { - let new_entry = PreAllocatedAccountMapEntry::new( - slot, - account_info, - &self.storage.storage, - use_disk, - ); + let new_entry = + PreAllocatedAccountMapEntry::new(slot, account_info, storage, use_disk); match r_account_maps.insert_new_entry_if_missing_with_lock(pubkey, new_entry) { InsertNewEntryResults::DidNotExist => { num_did_not_exist += 1; @@ -1471,7 +1443,7 @@ impl + Into> AccountsIndex { account: &impl ReadableAccount, account_indexes: &AccountSecondaryIndexes, account_info: T, - reclaims: &mut SlotList, + reclaims: &mut ReclaimsSlotList, reclaim: UpsertReclaim, ) { // vast majority of updates are to item already in accounts index, so store as raw to avoid unnecessary allocations @@ -1531,7 +1503,7 @@ impl + Into> AccountsIndex { fn purge_older_root_entries( &self, slot_list: &mut SlotList, - reclaims: &mut SlotList, + reclaims: &mut ReclaimsSlotList, max_clean_root_inclusive: Option, ) { if slot_list.len() <= 1 { @@ -1572,7 +1544,7 @@ impl + Into> AccountsIndex { pub fn clean_rooted_entries( &self, pubkey: &Pubkey, - reclaims: &mut SlotList, + reclaims: &mut ReclaimsSlotList, max_clean_root_inclusive: Option, ) -> bool { let mut is_slot_list_empty = false; @@ -1603,7 +1575,7 @@ impl + Into> AccountsIndex { fn clean_and_unref_slot_list_on_startup( &self, entry: &AccountMapEntry, - reclaims: &mut SlotList, + reclaims: &mut ReclaimsSlotList, ) -> (u64, T) { let mut slot_list = entry.slot_list.write().unwrap(); let max_slot = slot_list @@ -1648,9 +1620,8 @@ impl + Into> AccountsIndex { pub fn clean_and_unref_rooted_entries_by_bin( &self, pubkeys_by_bin: &[Pubkey], - callback: impl Fn(Slot, T), - ) -> SlotList { - let mut reclaims = Vec::new(); + ) -> ReclaimsSlotList { + let mut reclaims = ReclaimsSlotList::new(); let map = match pubkeys_by_bin.first() { Some(pubkey) => self.get_bin(pubkey), @@ -1660,9 +1631,7 @@ impl + Into> AccountsIndex { for pubkey in pubkeys_by_bin { map.get_internal_inner(pubkey, |entry| { let entry = entry.expect("Expected entry to exist in accounts index"); - let (slot, account_info) = - self.clean_and_unref_slot_list_on_startup(entry, &mut reclaims); - callback(slot, account_info); + self.clean_and_unref_slot_list_on_startup(entry, &mut reclaims); (false, ()) }); } @@ -1795,6 +1764,7 @@ pub mod tests { Bound::{Excluded, Included, Unbounded}, RangeInclusive, }, + test_case::test_matrix, }; pub enum SecondaryIndexTypes<'a> { @@ -1855,7 +1825,7 @@ pub mod tests { age: AtomicAge::new(entry.age()), }; PreAllocatedAccountMapEntry::Entry(Arc::new(AccountMapEntry::new( - vec![(slot, account_info)], + SlotList::from([(slot, account_info)]), entry.ref_count(), meta, ))) @@ -1875,12 +1845,14 @@ pub mod tests { assert!(!index.contains_with(key, None, None)); let mut num = 0; - index.unchecked_scan_accounts( - "", - &ancestors, - |_pubkey, _index| num += 1, - &ScanConfig::default(), - ); + index + .scan_accounts( + &ancestors, + 0, + |_pubkey, _index| num += 1, + &ScanConfig::default(), + ) + .expect("scan should succeed"); assert_eq!(num, 0); } @@ -1935,7 +1907,7 @@ pub mod tests { fn test_insert_no_ancestors() { let key = solana_pubkey::new_rand(); let index = AccountsIndex::::default_for_tests(); - let mut gc = Vec::new(); + let mut gc = ReclaimsSlotList::new(); index.upsert( 0, 0, @@ -1953,12 +1925,14 @@ pub mod tests { assert!(!index.contains_with(&key, None, None)); let mut num = 0; - index.unchecked_scan_accounts( - "", - &ancestors, - |_pubkey, _index| num += 1, - &ScanConfig::default(), - ); + index + .scan_accounts( + &ancestors, + 0, + |_pubkey, _index| num += 1, + &ScanConfig::default(), + ) + .expect("scan should succeed"); assert_eq!(num, 0); } @@ -2015,22 +1989,26 @@ pub mod tests { assert!(!index.contains_with(pubkey, None, None)); let mut num = 0; - index.unchecked_scan_accounts( - "", - &ancestors, - |_pubkey, _index| num += 1, - &ScanConfig::default(), - ); + index + .scan_accounts( + &ancestors, + 0, + |_pubkey, _index| num += 1, + &ScanConfig::default(), + ) + .expect("scan should succeed"); assert_eq!(num, 0); ancestors.insert(slot, 0); assert!(index.contains_with(pubkey, Some(&ancestors), None)); assert_eq!(index.ref_count_from_storage(pubkey), 1); - index.unchecked_scan_accounts( - "", - &ancestors, - |_pubkey, _index| num += 1, - &ScanConfig::default(), - ); + index + .scan_accounts( + &ancestors, + 0, + |_pubkey, _index| num += 1, + &ScanConfig::default(), + ) + .expect("scan should succeed"); assert_eq!(num, 1); // not zero lamports @@ -2048,22 +2026,26 @@ pub mod tests { assert!(!index.contains_with(pubkey, None, None)); let mut num = 0; - index.unchecked_scan_accounts( - "", - &ancestors, - |_pubkey, _index| num += 1, - &ScanConfig::default(), - ); + index + .scan_accounts( + &ancestors, + 0, + |_pubkey, _index| num += 1, + &ScanConfig::default(), + ) + .expect("scan should succeed"); assert_eq!(num, 0); ancestors.insert(slot, 0); assert!(index.contains_with(pubkey, Some(&ancestors), None)); assert_eq!(index.ref_count_from_storage(pubkey), 1); - index.unchecked_scan_accounts( - "", - &ancestors, - |_pubkey, _index| num += 1, - &ScanConfig::default(), - ); + index + .scan_accounts( + &ancestors, + 0, + |_pubkey, _index| num += 1, + &ScanConfig::default(), + ) + .expect("scan should succeed"); assert_eq!(num, 1); } @@ -2091,8 +2073,7 @@ pub mod tests { let index: AccountsIndex = AccountsIndex::::default_for_tests(); let pubkeys_by_bin: Vec = vec![]; - let reclaims = - index.clean_and_unref_rooted_entries_by_bin(&pubkeys_by_bin, |_slot, _info| {}); + let reclaims = index.clean_and_unref_rooted_entries_by_bin(&pubkeys_by_bin); assert!(reclaims.is_empty()); } @@ -2104,7 +2085,7 @@ pub mod tests { let slot = 0; let account_info = true; - let mut gc = Vec::new(); + let mut gc = ReclaimsSlotList::new(); index.upsert( slot, slot, @@ -2118,10 +2099,7 @@ pub mod tests { assert!(gc.is_empty()); - let reclaims = index.clean_and_unref_rooted_entries_by_bin(&[pubkey], |slot, info| { - assert_eq!(slot, 0); - assert!(info); - }); + let reclaims = index.clean_and_unref_rooted_entries_by_bin(&[pubkey]); assert_eq!(reclaims.len(), 0); } @@ -2135,7 +2113,7 @@ pub mod tests { let account_info1 = 0; let account_info2 = 1; - let mut gc = Vec::new(); + let mut gc = ReclaimsSlotList::new(); for (slot, account_info) in [(slot1, account_info1), (slot2, account_info2)] { index.upsert( slot, @@ -2151,12 +2129,8 @@ pub mod tests { assert!(gc.is_empty()); - let reclaims = index.clean_and_unref_rooted_entries_by_bin(&[pubkey], |slot, info| { - assert_eq!(slot, slot2); - assert_eq!(info, account_info2); - }); - - assert_eq!(reclaims, vec![(slot1, account_info1)]); + let reclaims = index.clean_and_unref_rooted_entries_by_bin(&[pubkey]); + assert_eq!(reclaims, ReclaimsSlotList::from([(slot1, account_info1)])); } #[test] @@ -2164,8 +2138,8 @@ pub mod tests { let index: AccountsIndex = AccountsIndex::::default_for_tests(); let bin_index = 0; let mut pubkeys = Vec::new(); - let mut expected_reclaims = Vec::new(); - let mut gc: Vec<(u64, bool)> = Vec::new(); + let mut expected_reclaims = ReclaimsSlotList::new(); + let mut gc = ReclaimsSlotList::new(); while pubkeys.len() < 10 { let new_pubkey = solana_pubkey::new_rand(); @@ -2198,7 +2172,7 @@ pub mod tests { assert!(gc.is_empty()); - let mut reclaims = index.clean_and_unref_rooted_entries_by_bin(&pubkeys, |_slot, _info| {}); + let mut reclaims = index.clean_and_unref_rooted_entries_by_bin(&pubkeys); reclaims.sort_unstable(); expected_reclaims.sort_unstable(); @@ -2281,10 +2255,10 @@ pub mod tests { fn test_new_entry_code_paths_helper( account_infos: [T; 2], is_cached: bool, - upsert: bool, + upsert_method: Option, use_disk: bool, ) { - if is_cached && !upsert { + if is_cached && upsert_method.is_none() { // This is an illegal combination when we are using queued lazy inserts. // Cached items don't ever leave the in-mem cache. // But the queued lazy insert code relies on there being nothing in the in-mem cache. @@ -2302,27 +2276,30 @@ pub mod tests { IndexLimitMb::InMemOnly // in-mem only }; let index = AccountsIndex::::new(&config, Arc::default()); - let mut gc = Vec::new(); + let mut gc = ReclaimsSlotList::new(); - if upsert { - // insert first entry for pubkey. This will use new_entry_after_update and not call update. - index.upsert( - slot0, - slot0, - &key, - &AccountSharedData::default(), - &AccountSecondaryIndexes::default(), - account_infos[0], - &mut gc, - UPSERT_RECLAIM_TEST_DEFAULT, - ); - } else { - let items = vec![(key, account_infos[0])]; - index.set_startup(Startup::Startup); - let expected_len = items.len(); - let (_, result) = index.insert_new_if_missing_into_primary_index(slot0, items); - assert_eq!(result.count, expected_len); - index.set_startup(Startup::Normal); + match upsert_method { + Some(upsert_method) => { + // insert first entry for pubkey. This will use new_entry_after_update and not call update. + index.upsert( + slot0, + slot0, + &key, + &AccountSharedData::default(), + &AccountSecondaryIndexes::default(), + account_infos[0], + &mut gc, + upsert_method, + ); + } + None => { + let items = vec![(key, account_infos[0])]; + index.set_startup(Startup::Startup); + let expected_len = items.len(); + let (_, result) = index.insert_new_if_missing_into_primary_index(slot0, items); + assert_eq!(result.count, expected_len); + index.set_startup(Startup::Normal); + } } assert!(gc.is_empty()); @@ -2330,7 +2307,7 @@ pub mod tests { { let entry = index.get_cloned(&key).unwrap(); let slot_list = entry.slot_list.read().unwrap(); - assert_eq!(entry.ref_count(), u64::from(!is_cached)); + assert_eq!(entry.ref_count(), RefCount::from(!is_cached)); assert_eq!(slot_list.as_slice(), &[(slot0, account_infos[0])]); let new_entry = PreAllocatedAccountMapEntry::new( slot0, @@ -2345,43 +2322,63 @@ pub mod tests { ); } - // insert second entry for pubkey. This will use update and NOT use new_entry_after_update. - if upsert { - index.upsert( - slot1, - slot1, - &key, - &AccountSharedData::default(), - &AccountSecondaryIndexes::default(), - account_infos[1], - &mut gc, - UPSERT_RECLAIM_TEST_DEFAULT, - ); - } else { - // this has the effect of aging out everything in the in-mem cache - for _ in 0..5 { + match upsert_method { + Some(upsert_method) => { + // insert second entry for pubkey. This will use update and NOT use new_entry_after_update. + index.upsert( + slot1, + slot1, + &key, + &AccountSharedData::default(), + &AccountSecondaryIndexes::default(), + account_infos[1], + &mut gc, + upsert_method, + ); + } + None => { + // this has the effect of aging out everything in the in-mem cache + for _ in 0..5 { + index.set_startup(Startup::Startup); + index.set_startup(Startup::Normal); + } + + let items = vec![(key, account_infos[1])]; index.set_startup(Startup::Startup); + let expected_len = items.len(); + let (_, result) = index.insert_new_if_missing_into_primary_index(slot1, items); + assert_eq!(result.count, expected_len); index.set_startup(Startup::Normal); } + } + + // There should be reclaims if entries are uncached and old slots are being reclaimed + let should_have_reclaims = + upsert_method == Some(UpsertReclaim::ReclaimOldSlots) && !is_cached; - let items = vec![(key, account_infos[1])]; - index.set_startup(Startup::Startup); - let expected_len = items.len(); - let (_, result) = index.insert_new_if_missing_into_primary_index(slot1, items); - assert_eq!(result.count, expected_len); - index.set_startup(Startup::Normal); + if should_have_reclaims { + assert!(!gc.is_empty()); + assert_eq!(gc.len(), 1); + assert_eq!(gc[0], (slot0, account_infos[0])); + } else { + assert!(gc.is_empty()); } - assert!(gc.is_empty()); + index.populate_and_retrieve_duplicate_keys_from_startup(|_slot_keys| {}); let entry = index.get_cloned(&key).unwrap(); let slot_list = entry.slot_list.read().unwrap(); - assert_eq!(entry.ref_count(), if is_cached { 0 } else { 2 }); - assert_eq!( - slot_list.as_slice(), - &[(slot0, account_infos[0]), (slot1, account_infos[1])], - ); + if should_have_reclaims { + assert_eq!(entry.ref_count(), 1); + assert_eq!(slot_list.as_slice(), &[(slot1, account_infos[1])],); + } else { + assert_eq!(entry.ref_count(), if is_cached { 0 } else { 2 }); + assert_eq!( + slot_list.as_slice(), + &[(slot0, account_infos[0]), (slot1, account_infos[1])], + ); + } let new_entry = PreAllocatedAccountMapEntry::new( slot1, @@ -2389,19 +2386,26 @@ pub mod tests { &index.storage.storage, false, ); - assert_eq!(slot_list[1], new_entry.into()); - } - #[test] - fn test_new_entry_and_update_code_paths() { - for use_disk in [false, true] { - for is_upsert in &[false, true] { - // account_info type that IS cached - test_new_entry_code_paths_helper([1.0, 2.0], true, *is_upsert, use_disk); + assert_eq!(slot_list.last().unwrap(), &new_entry.into()); + } - // account_info type that is NOT cached - test_new_entry_code_paths_helper([1, 2], false, *is_upsert, use_disk); - } + #[test_matrix( + [false, true], + [None, Some(UpsertReclaim::PopulateReclaims), Some(UpsertReclaim::ReclaimOldSlots)], + [true, false] + )] + fn test_new_entry_and_update_code_paths( + use_disk: bool, + upsert_method: Option, + is_cached: bool, + ) { + if is_cached { + // account_info type that IS cached + test_new_entry_code_paths_helper([1.0, 2.0], true, upsert_method, use_disk); + } else { + // account_info type that is NOT cached + test_new_entry_code_paths_helper([1, 2], false, upsert_method, use_disk); } } @@ -2423,7 +2427,7 @@ pub mod tests { &key, new_entry, None, - &mut SlotList::default(), + &mut ReclaimsSlotList::default(), UPSERT_RECLAIM_TEST_DEFAULT, ); assert_eq!(1, account_maps_stats_len(&index)); @@ -2433,21 +2437,25 @@ pub mod tests { assert!(!index.contains_with(&key, None, None)); let mut num = 0; - index.unchecked_scan_accounts( - "", - &ancestors, - |_pubkey, _index| num += 1, - &ScanConfig::default(), - ); + index + .scan_accounts( + &ancestors, + 0, + |_pubkey, _index| num += 1, + &ScanConfig::default(), + ) + .expect("scan should succeed"); assert_eq!(num, 0); ancestors.insert(slot, 0); assert!(index.contains_with(&key, Some(&ancestors), None)); - index.unchecked_scan_accounts( - "", - &ancestors, - |_pubkey, _index| num += 1, - &ScanConfig::default(), - ); + index + .scan_accounts( + &ancestors, + 0, + |_pubkey, _index| num += 1, + &ScanConfig::default(), + ) + .expect("scan should succeed"); assert_eq!(num, 1); } @@ -2455,7 +2463,7 @@ pub mod tests { fn test_insert_wrong_ancestors() { let key = solana_pubkey::new_rand(); let index = AccountsIndex::::default_for_tests(); - let mut gc = Vec::new(); + let mut gc = ReclaimsSlotList::new(); index.upsert( 0, 0, @@ -2472,12 +2480,14 @@ pub mod tests { assert!(!index.contains_with(&key, Some(&ancestors), None)); let mut num = 0; - index.unchecked_scan_accounts( - "", - &ancestors, - |_pubkey, _index| num += 1, - &ScanConfig::default(), - ); + index + .scan_accounts( + &ancestors, + 0, + |_pubkey, _index| num += 1, + &ScanConfig::default(), + ) + .expect("scan should succeed"); assert_eq!(num, 0); } #[test] @@ -2486,7 +2496,7 @@ pub mod tests { // non-cached let key = solana_pubkey::new_rand(); let index = AccountsIndex::::default_for_tests(); - let mut reclaims = Vec::new(); + let mut reclaims = ReclaimsSlotList::new(); let slot = 0; let value = 1; assert!(!value.is_cached()); @@ -2532,7 +2542,7 @@ pub mod tests { // cached let key = solana_pubkey::new_rand(); let index = AccountsIndex::::default_for_tests(); - let mut reclaims = Vec::new(); + let mut reclaims = ReclaimsSlotList::new(); let slot = 0; let value = 1.0; assert!(value.is_cached()); @@ -2579,7 +2589,7 @@ pub mod tests { fn test_insert_with_ancestors() { let key = solana_pubkey::new_rand(); let index = AccountsIndex::::default_for_tests(); - let mut gc = Vec::new(); + let mut gc = ReclaimsSlotList::new(); index.upsert( 0, 0, @@ -2608,17 +2618,20 @@ pub mod tests { let mut num = 0; let mut found_key = false; - index.unchecked_scan_accounts( - "", - &ancestors, - |pubkey, _index| { - if pubkey == &key { - found_key = true - }; - num += 1 - }, - &ScanConfig::default(), - ); + index + .scan_accounts( + &ancestors, + 0, + |pubkey, _index| { + if pubkey == &key { + found_key = true + }; + num += 1 + }, + &ScanConfig::default(), + ) + .expect("scan should succeed"); + assert_eq!(num, 1); assert!(found_key); } @@ -2636,7 +2649,7 @@ pub mod tests { &AccountSharedData::default(), &AccountSecondaryIndexes::default(), true, - &mut vec![], + &mut ReclaimsSlotList::new(), UPSERT_RECLAIM_TEST_DEFAULT, ); new_pubkey @@ -2653,7 +2666,7 @@ pub mod tests { &AccountSharedData::default(), &AccountSecondaryIndexes::default(), true, - &mut vec![], + &mut ReclaimsSlotList::new(), UPSERT_RECLAIM_TEST_DEFAULT, ); } @@ -2665,17 +2678,18 @@ pub mod tests { fn run_test_scan_accounts(num_pubkeys: usize) { let (index, _) = setup_accounts_index_keys(num_pubkeys); - let ancestors = Ancestors::default(); let mut scanned_keys = HashSet::new(); - index.unchecked_scan_accounts( - "", - &ancestors, - |pubkey, _index| { - scanned_keys.insert(*pubkey); - }, - &ScanConfig::default(), - ); + index + .scan_accounts( + &Ancestors::default(), + 0, + |pubkey, _index| { + scanned_keys.insert(*pubkey); + }, + &ScanConfig::default(), + ) + .expect("scan should succeed"); assert_eq!(scanned_keys.len(), num_pubkeys); } @@ -2700,7 +2714,7 @@ pub mod tests { fn test_insert_with_root() { let key = solana_pubkey::new_rand(); let index = AccountsIndex::::default_for_tests(); - let mut gc = Vec::new(); + let mut gc = ReclaimsSlotList::new(); index.upsert( 0, 0, @@ -2748,7 +2762,7 @@ pub mod tests { let key = solana_pubkey::new_rand(); let index = AccountsIndex::::default_for_tests(); let ancestors = vec![(0, 0)].into_iter().collect(); - let mut gc = Vec::new(); + let mut gc = ReclaimsSlotList::new(); index.upsert( 0, 0, @@ -2773,7 +2787,7 @@ pub mod tests { ) .unwrap(); - let mut gc = Vec::new(); + let mut gc = ReclaimsSlotList::new(); index.upsert( 0, 0, @@ -2784,7 +2798,7 @@ pub mod tests { &mut gc, UPSERT_RECLAIM_TEST_DEFAULT, ); - assert_eq!(gc, vec![(0, 1)]); + assert_eq!(gc, ReclaimsSlotList::from([(0, 1)])); index .get_with_and_then( &key, @@ -2805,7 +2819,7 @@ pub mod tests { let key = solana_pubkey::new_rand(); let index = AccountsIndex::::default_for_tests(); let ancestors = vec![(0, 0)].into_iter().collect(); - let mut gc = Vec::new(); + let mut gc = ReclaimsSlotList::new(); index.upsert( 0, 0, @@ -2814,7 +2828,7 @@ pub mod tests { &AccountSecondaryIndexes::default(), true, &mut gc, - UPSERT_RECLAIM_TEST_DEFAULT, + UpsertReclaim::PopulateReclaims, ); assert!(gc.is_empty()); index.upsert( @@ -2825,7 +2839,7 @@ pub mod tests { &AccountSecondaryIndexes::default(), false, &mut gc, - UPSERT_RECLAIM_TEST_DEFAULT, + UpsertReclaim::PopulateReclaims, ); assert!(gc.is_empty()); index @@ -2859,7 +2873,7 @@ pub mod tests { fn test_update_gc_purged_slot() { let key = solana_pubkey::new_rand(); let index = AccountsIndex::::default_for_tests(); - let mut gc = Vec::new(); + let mut gc = ReclaimsSlotList::new(); index.upsert( 0, 0, @@ -2868,7 +2882,7 @@ pub mod tests { &AccountSecondaryIndexes::default(), true, &mut gc, - UPSERT_RECLAIM_TEST_DEFAULT, + UpsertReclaim::PopulateReclaims, ); assert!(gc.is_empty()); index.upsert( @@ -2879,7 +2893,7 @@ pub mod tests { &AccountSecondaryIndexes::default(), false, &mut gc, - UPSERT_RECLAIM_TEST_DEFAULT, + UpsertReclaim::PopulateReclaims, ); index.upsert( 2, @@ -2889,7 +2903,7 @@ pub mod tests { &AccountSecondaryIndexes::default(), true, &mut gc, - UPSERT_RECLAIM_TEST_DEFAULT, + UpsertReclaim::PopulateReclaims, ); index.upsert( 3, @@ -2899,7 +2913,7 @@ pub mod tests { &AccountSecondaryIndexes::default(), true, &mut gc, - UPSERT_RECLAIM_TEST_DEFAULT, + UpsertReclaim::PopulateReclaims, ); index.add_root(0); index.add_root(1); @@ -2912,12 +2926,12 @@ pub mod tests { &AccountSecondaryIndexes::default(), true, &mut gc, - UPSERT_RECLAIM_TEST_DEFAULT, + UpsertReclaim::PopulateReclaims, ); // Updating index should not purge older roots, only purges // previous updates within the same slot - assert_eq!(gc, vec![]); + assert_eq!(gc, ReclaimsSlotList::new()); index .get_with_and_then(&key, None, None, false, |(slot, account_info)| { assert_eq!(slot, 3); @@ -2927,22 +2941,84 @@ pub mod tests { let mut num = 0; let mut found_key = false; - index.unchecked_scan_accounts( - "", - &Ancestors::default(), - |pubkey, index| { - if pubkey == &key { - found_key = true; - assert_eq!(index, (&true, 3)); - }; - num += 1 - }, - &ScanConfig::default(), - ); + index + .scan_accounts( + &Ancestors::default(), + 0, + |pubkey, index| { + if pubkey == &key { + found_key = true; + assert_eq!(index, (&true, 3)); + }; + num += 1 + }, + &ScanConfig::default(), + ) + .expect("scan should succeed"); assert_eq!(num, 1); assert!(found_key); } + #[test] + fn test_upsert_reclaims() { + let key = solana_pubkey::new_rand(); + let index = + AccountsIndex::::default_for_tests(); + let mut reclaims = ReclaimsSlotList::new(); + index.upsert( + 0, + 0, + &key, + &AccountSharedData::default(), + &AccountSecondaryIndexes::default(), + CacheableIndexValueTest(true), + &mut reclaims, + UPSERT_RECLAIM_TEST_DEFAULT, + ); + // No reclaims should be returned on the first item + assert!(reclaims.is_empty()); + + index.upsert( + 0, + 0, + &key, + &AccountSharedData::default(), + &AccountSecondaryIndexes::default(), + CacheableIndexValueTest(false), + &mut reclaims, + UPSERT_RECLAIM_TEST_DEFAULT, + ); + // Cached item should not be reclaimed + assert!(reclaims.is_empty()); + + // Slot list should only have a single entry + // Using brackets to limit scope of read lock + { + let entry = index.get_cloned(&key).unwrap(); + let slot_list = entry.slot_list.read().unwrap(); + assert_eq!(slot_list.len(), 1); + } + + index.upsert( + 0, + 0, + &key, + &AccountSharedData::default(), + &AccountSecondaryIndexes::default(), + CacheableIndexValueTest(false), + &mut reclaims, + UPSERT_RECLAIM_TEST_DEFAULT, + ); + + // Uncached item should be returned as reclaim + assert!(!reclaims.is_empty()); + + // Slot list should only have a single entry + let entry = index.get_cloned(&key).unwrap(); + let slot_list = entry.slot_list.read().unwrap(); + assert_eq!(slot_list.len(), 1); + } + fn account_maps_stats_len(index: &AccountsIndex) -> usize { index.storage.storage.stats.total_count() } @@ -2951,7 +3027,7 @@ pub mod tests { fn test_purge() { let key = solana_pubkey::new_rand(); let index = AccountsIndex::::default_for_tests(); - let mut gc = Vec::new(); + let mut gc = ReclaimsSlotList::new(); assert_eq!(0, account_maps_stats_len(&index)); index.upsert( 1, @@ -2978,11 +3054,11 @@ pub mod tests { assert_eq!(1, account_maps_stats_len(&index)); let purges = index.purge_roots(&key); - assert_eq!(purges, (vec![], false)); + assert_eq!(purges, (SlotList::new(), false)); index.add_root(1); let purges = index.purge_roots(&key); - assert_eq!(purges, (vec![(1, 10)], true)); + assert_eq!(purges, (SlotList::from([(1, 10)]), true)); assert_eq!(1, account_maps_stats_len(&index)); index.upsert( @@ -3081,7 +3157,7 @@ pub mod tests { ), secondary_indexes, true, - &mut vec![], + &mut ReclaimsSlotList::new(), UPSERT_RECLAIM_TEST_DEFAULT, ); } @@ -3105,15 +3181,191 @@ pub mod tests { index.purge_exact( &account_key, - &slots.into_iter().collect::>(), - &mut vec![], + slots.into_iter().collect::>(), + &mut ReclaimsSlotList::new(), ); - let _ = index.handle_dead_keys(&[&account_key], secondary_indexes); + let _ = index.handle_dead_keys(&[account_key], secondary_indexes); assert!(secondary_index.index.is_empty()); assert!(secondary_index.reverse_index.is_empty()); } + #[test] + fn test_reclaim_older_items_in_slot_list() { + solana_logger::setup(); + let key = solana_pubkey::new_rand(); + let index = AccountsIndex::::default_for_tests(); + let mut gc = ReclaimsSlotList::new(); + let reclaim_slot = 5; + let account_value = 50; + + // Insert multiple older items into the slot list + for slot in 0..reclaim_slot { + index.upsert( + slot, + slot, + &key, + &AccountSharedData::default(), + &AccountSecondaryIndexes::default(), + slot, + &mut gc, + UpsertReclaim::IgnoreReclaims, + ); + } + let entry = index.get_cloned(&key).unwrap(); + assert_eq!(entry.slot_list.read().unwrap().len(), reclaim_slot as usize); + + // Insert an item newer than the one that we will reclaim old slots on + index.upsert( + reclaim_slot + 1, + reclaim_slot + 1, + &key, + &AccountSharedData::default(), + &AccountSecondaryIndexes::default(), + account_value + 1, + &mut gc, + UpsertReclaim::IgnoreReclaims, + ); + let entry = index.get_cloned(&key).unwrap(); + assert_eq!( + entry.slot_list.read().unwrap().len(), + (reclaim_slot + 1) as usize + ); + + // Reclaim all older slots + index.upsert( + reclaim_slot, + reclaim_slot, + &key, + &AccountSharedData::default(), + &AccountSecondaryIndexes::default(), + account_value, + &mut gc, + UpsertReclaim::ReclaimOldSlots, + ); + + // Verify that older items are reclaimed + assert_eq!(gc.len(), reclaim_slot as usize); + for (slot, value) in gc.iter() { + assert!(*slot < reclaim_slot); + assert_eq!(*value, *slot); + } + + // Verify that the item added is in in the slot list + let ancestors = vec![(reclaim_slot, 0)].into_iter().collect(); + index + .get_with_and_then( + &key, + Some(&ancestors), + None, + false, + |(slot, account_info)| { + assert_eq!(slot, reclaim_slot); + assert_eq!(account_info, account_value); + }, + ) + .unwrap(); + + // Verify that the newer item remains in the slot list + let ancestors = vec![((reclaim_slot + 1), 0)].into_iter().collect(); + index + .get_with_and_then( + &key, + Some(&ancestors), + None, + false, + |(slot, account_info)| { + assert_eq!(slot, reclaim_slot + 1); + assert_eq!(account_info, account_value + 1); + }, + ) + .unwrap(); + } + + #[test] + fn test_reclaim_do_not_reclaim_cached_other_slot() { + solana_logger::setup(); + let key = solana_pubkey::new_rand(); + let index = + AccountsIndex::::default_for_tests(); + let mut gc = ReclaimsSlotList::new(); + + // Insert an uncached account at slot 0 and an cached account at slot 1 + index.upsert( + 0, + 0, + &key, + &AccountSharedData::default(), + &AccountSecondaryIndexes::default(), + CacheableIndexValueTest(false), + &mut gc, + UpsertReclaim::IgnoreReclaims, + ); + + index.upsert( + 1, + 1, + &key, + &AccountSharedData::default(), + &AccountSecondaryIndexes::default(), + CacheableIndexValueTest(true), + &mut gc, + UpsertReclaim::IgnoreReclaims, + ); + + // Now insert a cached account at slot 2 + index.upsert( + 2, + 2, + &key, + &AccountSharedData::default(), + &AccountSecondaryIndexes::default(), + CacheableIndexValueTest(true), + &mut gc, + UpsertReclaim::IgnoreReclaims, + ); + + // Replace the cached account at slot 2 with a uncached account + index.upsert( + 2, + 2, + &key, + &AccountSharedData::default(), + &AccountSecondaryIndexes::default(), + CacheableIndexValueTest(false), + &mut gc, + UpsertReclaim::ReclaimOldSlots, + ); + + // Verify that the slot list is length two and consists of the cached account at slot 1 + // and the uncached account at slot 2 + let entry = index.get_cloned(&key).unwrap(); + assert_eq!(entry.slot_list.read().unwrap().len(), 2); + assert_eq!( + entry.slot_list.read().unwrap()[0], + PreAllocatedAccountMapEntry::new( + 1, + CacheableIndexValueTest(true), + &index.storage.storage, + false + ) + .into() + ); + assert_eq!( + entry.slot_list.read().unwrap()[1], + PreAllocatedAccountMapEntry::new( + 2, + CacheableIndexValueTest(false), + &index.storage.storage, + false + ) + .into() + ); + // Verify that the uncached account at slot 0 was reclaimed + assert_eq!(gc.len(), 1); + assert_eq!(gc[0], (0, CacheableIndexValueTest(false))); + } + #[test] fn test_purge_exact_spl_token_mint_secondary_index() { let (key_start, key_end, secondary_indexes) = create_spl_token_mint_secondary_index_state(); @@ -3145,68 +3397,77 @@ pub mod tests { fn test_purge_older_root_entries() { // No roots, should be no reclaims let index = AccountsIndex::::default_for_tests(); - let mut slot_list = vec![(1, true), (2, true), (5, true), (9, true)]; - let mut reclaims = vec![]; + let mut slot_list = SlotList::from_iter([(1, true), (2, true), (5, true), (9, true)]); + let mut reclaims = ReclaimsSlotList::new(); index.purge_older_root_entries(&mut slot_list, &mut reclaims, None); assert!(reclaims.is_empty()); - assert_eq!(slot_list, vec![(1, true), (2, true), (5, true), (9, true)]); + assert_eq!( + slot_list, + SlotList::from_iter([(1, true), (2, true), (5, true), (9, true)]) + ); // Add a later root, earlier slots should be reclaimed - slot_list = vec![(1, true), (2, true), (5, true), (9, true)]; + slot_list = SlotList::from_iter([(1, true), (2, true), (5, true), (9, true)]); index.add_root(1); // Note 2 is not a root index.add_root(5); - reclaims = vec![]; + reclaims = ReclaimsSlotList::new(); index.purge_older_root_entries(&mut slot_list, &mut reclaims, None); - assert_eq!(reclaims, vec![(1, true), (2, true)]); - assert_eq!(slot_list, vec![(5, true), (9, true)]); + assert_eq!(reclaims, ReclaimsSlotList::from([(1, true), (2, true)])); + assert_eq!(slot_list, SlotList::from_iter([(5, true), (9, true)])); // Add a later root that is not in the list, should not affect the outcome - slot_list = vec![(1, true), (2, true), (5, true), (9, true)]; + slot_list = SlotList::from_iter([(1, true), (2, true), (5, true), (9, true)]); index.add_root(6); - reclaims = vec![]; + reclaims = ReclaimsSlotList::new(); index.purge_older_root_entries(&mut slot_list, &mut reclaims, None); - assert_eq!(reclaims, vec![(1, true), (2, true)]); - assert_eq!(slot_list, vec![(5, true), (9, true)]); + assert_eq!(reclaims, ReclaimsSlotList::from([(1, true), (2, true)])); + assert_eq!(slot_list, SlotList::from_iter([(5, true), (9, true)])); // Pass a max root >= than any root in the slot list, should not affect // outcome - slot_list = vec![(1, true), (2, true), (5, true), (9, true)]; - reclaims = vec![]; + slot_list = SlotList::from_iter([(1, true), (2, true), (5, true), (9, true)]); + reclaims = ReclaimsSlotList::new(); index.purge_older_root_entries(&mut slot_list, &mut reclaims, Some(6)); - assert_eq!(reclaims, vec![(1, true), (2, true)]); - assert_eq!(slot_list, vec![(5, true), (9, true)]); + assert_eq!(reclaims, ReclaimsSlotList::from([(1, true), (2, true)])); + assert_eq!(slot_list, SlotList::from_iter([(5, true), (9, true)])); // Pass a max root, earlier slots should be reclaimed - slot_list = vec![(1, true), (2, true), (5, true), (9, true)]; - reclaims = vec![]; + slot_list = SlotList::from_iter([(1, true), (2, true), (5, true), (9, true)]); + reclaims = ReclaimsSlotList::new(); index.purge_older_root_entries(&mut slot_list, &mut reclaims, Some(5)); - assert_eq!(reclaims, vec![(1, true), (2, true)]); - assert_eq!(slot_list, vec![(5, true), (9, true)]); + assert_eq!(reclaims, ReclaimsSlotList::from([(1, true), (2, true)])); + assert_eq!(slot_list, SlotList::from_iter([(5, true), (9, true)])); // Pass a max root 2. This means the latest root < 2 is 1 because 2 is not a root // so nothing will be purged - slot_list = vec![(1, true), (2, true), (5, true), (9, true)]; - reclaims = vec![]; + slot_list = SlotList::from_iter([(1, true), (2, true), (5, true), (9, true)]); + reclaims = ReclaimsSlotList::new(); index.purge_older_root_entries(&mut slot_list, &mut reclaims, Some(2)); assert!(reclaims.is_empty()); - assert_eq!(slot_list, vec![(1, true), (2, true), (5, true), (9, true)]); + assert_eq!( + slot_list, + SlotList::from_iter([(1, true), (2, true), (5, true), (9, true)]) + ); // Pass a max root 1. This means the latest root < 3 is 1 because 2 is not a root // so nothing will be purged - slot_list = vec![(1, true), (2, true), (5, true), (9, true)]; - reclaims = vec![]; + slot_list = SlotList::from_iter([(1, true), (2, true), (5, true), (9, true)]); + reclaims = ReclaimsSlotList::new(); index.purge_older_root_entries(&mut slot_list, &mut reclaims, Some(1)); assert!(reclaims.is_empty()); - assert_eq!(slot_list, vec![(1, true), (2, true), (5, true), (9, true)]); + assert_eq!( + slot_list, + SlotList::from_iter([(1, true), (2, true), (5, true), (9, true)]) + ); // Pass a max root that doesn't exist in the list but is greater than // some of the roots in the list, shouldn't return those smaller roots - slot_list = vec![(1, true), (2, true), (5, true), (9, true)]; - reclaims = vec![]; + slot_list = SlotList::from_iter([(1, true), (2, true), (5, true), (9, true)]); + reclaims = ReclaimsSlotList::new(); index.purge_older_root_entries(&mut slot_list, &mut reclaims, Some(7)); - assert_eq!(reclaims, vec![(1, true), (2, true)]); - assert_eq!(slot_list, vec![(5, true), (9, true)]); + assert_eq!(reclaims, ReclaimsSlotList::from([(1, true), (2, true)])); + assert_eq!(slot_list, SlotList::from_iter([(5, true), (9, true)])); } fn check_secondary_index_mapping_correct( @@ -3256,7 +3517,7 @@ pub mod tests { &AccountSharedData::create(0, account_data.to_vec(), Pubkey::default(), false, 0), &secondary_indexes, true, - &mut vec![], + &mut ReclaimsSlotList::new(), UPSERT_RECLAIM_TEST_DEFAULT, ); assert!(secondary_index.index.is_empty()); @@ -3270,7 +3531,7 @@ pub mod tests { &AccountSharedData::create(0, account_data[1..].to_vec(), *token_id, false, 0), &secondary_indexes, true, - &mut vec![], + &mut ReclaimsSlotList::new(), UPSERT_RECLAIM_TEST_DEFAULT, ); assert!(secondary_index.index.is_empty()); @@ -3328,7 +3589,7 @@ pub mod tests { index.slot_list_mut(&account_key, |slot_list| slot_list.clear()); // Everything should be deleted - let _ = index.handle_dead_keys(&[&account_key], &secondary_indexes); + let _ = index.handle_dead_keys(&[account_key], &secondary_indexes); assert!(secondary_index.index.is_empty()); assert!(secondary_index.reverse_index.is_empty()); } @@ -3395,7 +3656,7 @@ pub mod tests { &AccountSharedData::create(0, account_data1.to_vec(), *token_id, false, 0), secondary_indexes, true, - &mut vec![], + &mut ReclaimsSlotList::new(), UPSERT_RECLAIM_TEST_DEFAULT, ); @@ -3407,7 +3668,7 @@ pub mod tests { &AccountSharedData::create(0, account_data2.to_vec(), *token_id, false, 0), secondary_indexes, true, - &mut vec![], + &mut ReclaimsSlotList::new(), UPSERT_RECLAIM_TEST_DEFAULT, ); @@ -3427,7 +3688,7 @@ pub mod tests { &AccountSharedData::create(0, account_data1.to_vec(), *token_id, false, 0), secondary_indexes, true, - &mut vec![], + &mut ReclaimsSlotList::new(), UPSERT_RECLAIM_TEST_DEFAULT, ); assert_eq!(secondary_index.get(&secondary_key1), vec![account_key]); @@ -3437,7 +3698,7 @@ pub mod tests { // so both secondary keys will still be kept alive. index.add_root(later_slot); index.slot_list_mut(&account_key, |slot_list| { - index.purge_older_root_entries(slot_list, &mut vec![], None) + index.purge_older_root_entries(slot_list, &mut ReclaimsSlotList::new(), None) }); check_secondary_index_mapping_correct( @@ -3448,9 +3709,9 @@ pub mod tests { // Removing the remaining entry for this pubkey in the index should mark the // pubkey as dead and finally remove all the secondary indexes - let mut reclaims = vec![]; - index.purge_exact(&account_key, &later_slot, &mut reclaims); - let _ = index.handle_dead_keys(&[&account_key], secondary_indexes); + let mut reclaims = ReclaimsSlotList::new(); + index.purge_exact(&account_key, later_slot, &mut reclaims); + let _ = index.handle_dead_keys(&[account_key], secondary_indexes); assert!(secondary_index.index.is_empty()); assert!(secondary_index.reverse_index.is_empty()); } @@ -3513,6 +3774,24 @@ pub mod tests { } } + /// Type that supports caching for tests. Used to test upsert behaviour + /// when the slot list has mixed cached and uncached items. + #[derive(Default, Debug, Clone, Copy, PartialEq, Eq)] + struct CacheableIndexValueTest(bool); + impl IndexValue for CacheableIndexValueTest {} + impl DiskIndexValue for CacheableIndexValueTest {} + impl IsCached for CacheableIndexValueTest { + fn is_cached(&self) -> bool { + // Return self value as whether the item is cached or not + self.0 + } + } + impl IsZeroLamport for CacheableIndexValueTest { + fn is_zero_lamport(&self) -> bool { + false + } + } + #[test] fn test_get_newest_root_in_slot_list() { let index = AccountsIndex::::default_for_tests(); @@ -3586,7 +3865,16 @@ pub mod tests { impl AccountsIndex { fn upsert_simple_test(&self, key: &Pubkey, slot: Slot, value: T) { - let mut gc = Vec::new(); + let mut gc = ReclaimsSlotList::new(); + + // It is invalid to reclaim older slots if the slot being upserted + // is unrooted + let reclaim_method = if self.is_alive_root(slot) { + UPSERT_RECLAIM_TEST_DEFAULT + } else { + UpsertReclaim::IgnoreReclaims + }; + self.upsert( slot, slot, @@ -3595,7 +3883,7 @@ pub mod tests { &AccountSecondaryIndexes::default(), value, &mut gc, - UPSERT_RECLAIM_TEST_DEFAULT, + reclaim_method, ); assert!(gc.is_empty()); } @@ -3651,7 +3939,7 @@ pub mod tests { let index = AccountsIndex::::default_for_tests(); let slot1 = 1; - let mut gc = Vec::new(); + let mut gc = ReclaimsSlotList::new(); // return true if we don't know anything about 'key_unknown' // the item did not exist in the accounts index at all, so index is up to date assert!(index.clean_rooted_entries(&key_unknown, &mut gc, None)); @@ -3666,9 +3954,9 @@ pub mod tests { // this will delete the entry because it is <= max_root_inclusive and NOT a root // note this has to be slot2 because of inclusive vs exclusive in the call to can_purge_older_entries { - let mut gc = Vec::new(); + let mut gc = ReclaimsSlotList::new(); assert!(index.clean_rooted_entries(&key, &mut gc, Some(slot2))); - assert_eq!(gc, vec![(slot1, value)]); + assert_eq!(gc, ReclaimsSlotList::from([(slot1, value)])); } // re-add it @@ -3699,7 +3987,7 @@ pub mod tests { { { let roots_tracker = &index.roots_tracker.read().unwrap(); - let slot_list = vec![(slot2, value)]; + let slot_list = SlotList::from([(slot2, value)]); assert_eq!( 0, AccountsIndex::::get_newest_root_in_slot_list( @@ -3712,7 +4000,7 @@ pub mod tests { index.add_root(slot2); { let roots_tracker = &index.roots_tracker.read().unwrap(); - let slot_list = vec![(slot2, value)]; + let slot_list = SlotList::from([(slot2, value)]); assert_eq!( slot2, AccountsIndex::::get_newest_root_in_slot_list( @@ -3734,12 +4022,12 @@ pub mod tests { assert!(gc.is_empty()); assert!(!index.clean_rooted_entries(&key, &mut gc, Some(slot2))); - assert_eq!(gc, vec![(slot1, value)]); + assert_eq!(gc, ReclaimsSlotList::from([(slot1, value)])); gc.clear(); index.clean_dead_slot(slot2); let slot3 = 3; assert!(index.clean_rooted_entries(&key, &mut gc, Some(slot3))); - assert_eq!(gc, vec![(slot2, value)]); + assert_eq!(gc, ReclaimsSlotList::from([(slot2, value)])); } #[test] @@ -3748,7 +4036,7 @@ pub mod tests { let index = AccountsIndex::::default_for_tests(); assert_eq!( - index.handle_dead_keys(&[&key], &AccountSecondaryIndexes::default()), + index.handle_dead_keys(&[key], &AccountSecondaryIndexes::default()), vec![key].into_iter().collect::>() ); } diff --git a/accounts-db/src/accounts_index/account_map_entry.rs b/accounts-db/src/accounts_index/account_map_entry.rs index 85b2a984286afa..edad7b78456cec 100644 --- a/accounts-db/src/accounts_index/account_map_entry.rs +++ b/accounts-db/src/accounts_index/account_map_entry.rs @@ -13,7 +13,7 @@ use { /// one entry in the in-mem accounts index /// Represents the value for an account key in the in-memory accounts index -#[derive(Debug, Default)] +#[derive(Debug)] pub struct AccountMapEntry { /// number of alive slots that contain >= 1 instances of account data for this pubkey /// where alive represents a slot that has not yet been removed by clean via AccountsDB::clean_stored_dead_slots() for containing no up to date account information @@ -34,12 +34,24 @@ impl AccountMapEntry { meta, } } + + #[cfg(test)] + pub(super) fn empty_for_tests() -> Self { + Self { + slot_list: RwLock::default(), + ref_count: AtomicRefCount::default(), + meta: AccountMapEntryMeta::default(), + } + } + pub fn ref_count(&self) -> RefCount { self.ref_count.load(Ordering::Acquire) } pub fn addref(&self) { - self.ref_count.fetch_add(1, Ordering::Release); + let previous = self.ref_count.fetch_add(1, Ordering::Release); + // ensure ref count does not overflow + assert_ne!(previous, RefCount::MAX); self.set_dirty(true); } @@ -52,7 +64,7 @@ impl AccountMapEntry { /// decrement the ref count by the passed in amount /// return the refcount prior to the ref count change - pub fn unref_by_count(&self, count: u64) -> RefCount { + pub fn unref_by_count(&self, count: RefCount) -> RefCount { let previous = self.ref_count.fetch_sub(count, Ordering::Release); self.set_dirty(true); assert!( @@ -109,7 +121,7 @@ pub struct AccountMapEntryMeta { impl AccountMapEntryMeta { pub fn new_dirty + Into>( - storage: &Arc>, + storage: &BucketMapHolder, is_cached: bool, ) -> Self { AccountMapEntryMeta { @@ -118,7 +130,7 @@ impl AccountMapEntryMeta { } } pub fn new_clean + Into>( - storage: &Arc>, + storage: &BucketMapHolder, ) -> Self { AccountMapEntryMeta { dirty: AtomicBool::new(false), @@ -162,7 +174,7 @@ impl PreAllocatedAccountMapEntry { pub fn new + Into>( slot: Slot, account_info: T, - storage: &Arc>, + storage: &BucketMapHolder, store_raw: bool, ) -> PreAllocatedAccountMapEntry { if store_raw { @@ -175,13 +187,13 @@ impl PreAllocatedAccountMapEntry { fn allocate + Into>( slot: Slot, account_info: T, - storage: &Arc>, + storage: &BucketMapHolder, ) -> Arc> { let is_cached = account_info.is_cached(); let ref_count = RefCount::from(!is_cached); let meta = AccountMapEntryMeta::new_dirty(storage, is_cached); Arc::new(AccountMapEntry::new( - vec![(slot, account_info)], + SlotList::from([(slot, account_info)]), ref_count, meta, )) @@ -189,7 +201,7 @@ impl PreAllocatedAccountMapEntry { pub fn into_account_map_entry + Into>( self, - storage: &Arc>, + storage: &BucketMapHolder, ) -> Arc> { match self { Self::Entry(entry) => entry, diff --git a/accounts-db/src/accounts_index/in_mem_accounts_index.rs b/accounts-db/src/accounts_index/in_mem_accounts_index.rs index 146546b5b54e96..b0edb06340740d 100644 --- a/accounts-db/src/accounts_index/in_mem_accounts_index.rs +++ b/accounts-db/src/accounts_index/in_mem_accounts_index.rs @@ -4,7 +4,7 @@ use { account_map_entry::{ AccountMapEntry, AccountMapEntryMeta, PreAllocatedAccountMapEntry, }, - DiskIndexValue, IndexValue, RefCount, SlotList, UpsertReclaim, + DiskIndexValue, IndexValue, ReclaimsSlotList, RefCount, SlotList, UpsertReclaim, }, bucket_map_holder::{Age, AtomicAge, BucketMapHolder}, bucket_map_holder_stats::BucketMapHolderStats, @@ -16,6 +16,7 @@ use { solana_measure::measure::Measure, solana_pubkey::Pubkey, std::{ + cmp, collections::{hash_map::Entry, HashMap, HashSet}, fmt::Debug, sync::{ @@ -271,7 +272,10 @@ impl + Into> InMemAccountsIndex + Into> InMemAccountsIndex callback(Some(occupied.get())).1, Entry::Vacant(vacant) => { debug_assert!(!disk_entry.dirty()); @@ -398,7 +403,11 @@ impl + Into> InMemAccountsIndex + Into> InMemAccountsIndex bool { let mut m = Measure::start("entry"); let mut map = self.map_internal.write().unwrap(); + let capacity_pre = map.capacity(); let entry = map.entry(pubkey); m.stop(); let found = matches!(entry, Entry::Occupied(_)); let result = self.remove_if_slot_list_empty_entry(entry); + let capacity_post = map.capacity(); drop(map); - + self.stats() + .update_in_mem_capacity(capacity_pre, capacity_post); self.update_entry_stats(m, found); result } @@ -499,35 +511,18 @@ impl + Into> InMemAccountsIndex, - new_value: (Slot, T), - other_slot: Option, - reclaims: &mut SlotList, - reclaim: UpsertReclaim, - ) { - let mut upsert_cached = new_value.1.is_cached(); - if Self::lock_and_update_slot_list(entry, new_value, other_slot, reclaims, reclaim) > 1 { - // if slot list > 1, then we are going to hold this entry in memory until it gets set back to 1 - upsert_cached = true; - } - self.set_age_to_future(entry, upsert_cached); - } - /// Insert a cached entry into the accounts index /// If the entry is already present, just mark dirty and set the age to the future - fn cache_entry_at_slot(&self, entry: &AccountMapEntry, slot: Slot, account_info: T) { - let mut slot_list = entry.slot_list.write().unwrap(); + fn cache_entry_at_slot(current: &AccountMapEntry, new_value: (Slot, T)) { + let mut slot_list = current.slot_list.write().unwrap(); + let (slot, new_entry) = new_value; if !slot_list .iter() .any(|(existing_slot, _)| *existing_slot == slot) { - slot_list.push((slot, account_info)); + slot_list.push((slot, new_entry)); } - entry.set_dirty(true); - self.set_age_to_future(entry, true); + current.set_dirty(true); } pub fn upsert( @@ -535,7 +530,7 @@ impl + Into> InMemAccountsIndex, other_slot: Option, - reclaims: &mut SlotList, + reclaims: &mut ReclaimsSlotList, reclaim: UpsertReclaim, ) { let (slot, account_info) = new_value.into(); @@ -543,15 +538,17 @@ impl + Into> InMemAccountsIndex 1); } }); } @@ -570,8 +567,10 @@ impl + Into> InMemAccountsIndex + Into> InMemAccountsIndex + Into> InMemAccountsIndex + Into> InMemAccountsIndex, new_value: (Slot, T), other_slot: Option, - reclaims: &mut SlotList, + reclaims: &mut ReclaimsSlotList, reclaim: UpsertReclaim, ) -> usize { let mut slot_list = current.slot_list.write().unwrap(); let (slot, new_entry) = new_value; - let addref = Self::update_slot_list( + let ref_count_change = Self::update_slot_list( &mut slot_list, slot, new_entry, @@ -654,31 +656,46 @@ impl + Into> InMemAccountsIndex { + // Do nothing + } + cmp::Ordering::Greater => { + // If the ref count change is positive, it must be 1 as only one entry is being added + assert_eq!(ref_count_change, 1); + current.addref(); + } + cmp::Ordering::Less => { + current.unref_by_count(ref_count_change.unsigned_abs()); + } } current.set_dirty(true); slot_list.len() } - /// modifies slot_list - /// any entry at 'slot' or slot 'other_slot' is replaced with 'account_info'. - /// or, 'account_info' is appended to the slot list if the slot did not exist previously. - /// returns true if caller should addref - /// conditions when caller should addref: - /// 'account_info' does NOT represent a cached storage (the slot is being flushed from the cache) - /// AND - /// previous slot_list entry AT 'slot' did not exist (this is the first time this account was modified in this "slot"), or was previously cached (the storage is now being flushed from the cache) - /// Note that even if entry DID exist at 'other_slot', the above conditions apply. + /// Modifies the slot_list by replacing or appending entries. + /// + /// - Replaces any entry at `slot` or `other_slot` with `account_info`. + /// - Appends `account_info` to the slot list if `slot` did not exist previously. + /// - If UpsertReclaim is ReclaimOldSlots, remove all uncached entries older than `slot` + /// and add them to reclaims + /// + /// Returns the reference count change as an `i32`. The reference count change + /// is the number of entries added (1) - the number of uncached entries removed + /// or replaced fn update_slot_list( slot_list: &mut SlotList, slot: Slot, account_info: T, other_slot: Option, - reclaims: &mut SlotList, + reclaims: &mut ReclaimsSlotList, reclaim: UpsertReclaim, - ) -> bool { - let mut addref = !account_info.is_cached(); + ) -> i32 { + let mut ref_count_change = 1; + + // Cached accounts are not expected by this function, use cache_entry_at_slot instead + assert!(!account_info.is_cached()); let old_slot = other_slot.unwrap_or(slot); @@ -697,7 +714,7 @@ impl + Into> InMemAccountsIndex { + UpsertReclaim::ReclaimOldSlots | UpsertReclaim::PopulateReclaims => { // Reclaims are used to reclaim other versions of accounts when they are // rewritten elsewhere. Cached accounts are not in storage, so there is // no reason to store the reclaim. @@ -716,10 +733,16 @@ impl + Into> InMemAccountsIndex + Into> InMemAccountsIndex + Into> InMemAccountsIndex::lock_and_update_slot_list( + let updated_slot_list_len = Self::lock_and_update_slot_list( occupied.get(), (slot, account_info), None, // should be None because we don't expect a different slot # during index generation - &mut Vec::default(), + &mut ReclaimsSlotList::new(), UpsertReclaim::IgnoreReclaims, ); @@ -847,7 +870,7 @@ impl + Into> InMemAccountsIndex + Into> InMemAccountsIndex usize { - size_of::<(Slot, T)>() + size_of::() + size_of::>() + // with only one entry in the slot list, it is stored inline in the SmallVec + size_of::() + size_of::>() } fn should_evict_based_on_age( @@ -1059,10 +1083,11 @@ impl + Into> InMemAccountsIndex { num_did_not_exist += 1; @@ -1233,7 +1258,7 @@ impl + Into> InMemAccountsIndex>(), - ref_count, + ref_count.into(), // ref count on disk is u64 ), ) }; @@ -1305,12 +1330,14 @@ impl + Into> InMemAccountsIndex + Into> InMemAccountsIndex &BucketMapHolderStats { @@ -1366,6 +1396,13 @@ impl + Into> InMemAccountsIndex usize { + self.map_internal.read().unwrap().capacity() + } } /// An RAII implementation of a scoped lock for the `flushing_active` atomic flag in @@ -1401,7 +1438,7 @@ impl Drop for FlushGuard<'_> { mod tests { use { super::*, - crate::accounts_index::{AccountsIndexConfig, BINS_FOR_TESTING}, + crate::accounts_index::{AccountsIndexConfig, IndexLimitMb, BINS_FOR_TESTING}, assert_matches::assert_matches, itertools::Itertools, test_case::test_case, @@ -1418,11 +1455,11 @@ mod tests { } fn new_disk_buckets_for_test() -> InMemAccountsIndex { - let holder = Arc::new(BucketMapHolder::new( - BINS_FOR_TESTING, - &AccountsIndexConfig::default(), - 1, - )); + let config = AccountsIndexConfig { + index_limit_mb: IndexLimitMb::Minimal, + ..Default::default() + }; + let holder = Arc::new(BucketMapHolder::new(BINS_FOR_TESTING, &config, 1)); let bin = 0; let bucket = InMemAccountsIndex::new(&holder, bin); assert!(bucket.storage.is_disk_index_enabled()); @@ -1440,7 +1477,7 @@ mod tests { assert!(entry.slot_list.read().unwrap().is_empty()); assert_eq!(entry.ref_count(), 0); assert!(entry.dirty()); - accounts_index.cache_entry_at_slot(entry, slot, 0); + InMemAccountsIndex::::cache_entry_at_slot(entry, (slot, 0)); callback_called = true; }); @@ -1461,7 +1498,7 @@ mod tests { // Insert an entry manually let entry = Arc::new(AccountMapEntry::new( - vec![(0, 42)], + SlotList::from([(0, 42)]), 1, AccountMapEntryMeta::new_dirty(&accounts_index.storage, true), )); @@ -1509,7 +1546,7 @@ mod tests { assert_eq!(entry.slot_list.read().unwrap().len(), 1); assert_eq!(entry.ref_count(), 1); assert!(!entry.dirty()); // Entry loaded from disk should not be dirty - accounts_index.cache_entry_at_slot(entry, slot, 0); + InMemAccountsIndex::::cache_entry_at_slot(entry, (slot, 0)); callback_called = true; }); @@ -1536,13 +1573,180 @@ mod tests { }); } + #[test] + fn test_update_slot_list_other_populate_reclaims() { + solana_logger::setup(); + let reclaim = UpsertReclaim::PopulateReclaims; + let new_slot = 5; + let info = 1; + let other_value = info + 1; + let at_new_slot = (new_slot, info); + let unique_other_slot = new_slot + 1; + for other_slot in [Some(new_slot), Some(unique_other_slot), None] { + let mut reclaims = ReclaimsSlotList::new(); + let mut slot_list = SlotList::new(); + // upserting into empty slot_list, so always addref + assert_eq!( + InMemAccountsIndex::::update_slot_list( + &mut slot_list, + new_slot, + info, + other_slot, + &mut reclaims, + reclaim + ), + 1, + "other_slot: {other_slot:?}" + ); + assert_eq!(slot_list, SlotList::from([at_new_slot])); + assert!(reclaims.is_empty()); + } + + // replace other + let mut slot_list = SlotList::from([(unique_other_slot, other_value)]); + let expected_reclaims = ReclaimsSlotList::from(slot_list.as_slice()); + let other_slot = Some(unique_other_slot); + let mut reclaims = ReclaimsSlotList::new(); + assert_eq!( + // upserting into slot_list that does NOT contain an entry at 'new_slot' + // but, it DOES contain an entry at other_slot, so we do NOT add-ref. The assumption is that 'other_slot' is going away + // and that the previously held add-ref is now used by 'new_slot' + InMemAccountsIndex::::update_slot_list( + &mut slot_list, + new_slot, + info, + other_slot, + &mut reclaims, + reclaim + ), + 0, + "other_slot: {other_slot:?}" + ); + assert_eq!(slot_list, SlotList::from([at_new_slot])); + assert_eq!(reclaims, expected_reclaims); + + // nothing will exist at this slot + let missing_other_slot = unique_other_slot + 1; + let ignored_slot = 10; // bigger than is used elsewhere in the test + let ignored_value = info + 10; + + // build a list of possible contents in the slot_list prior to calling 'update_slot_list' + let possible_initial_slot_list_contents = { + let mut possible_initial_slot_list_contents = Vec::new(); + + // Add ignored slot account_info entries (slots with larger slot #s than 'new_slot' or 'other_slot') + possible_initial_slot_list_contents + .extend((0..3).map(|i| (ignored_slot + i, ignored_value + i))); + + // Add account_info for 'new_slot' + possible_initial_slot_list_contents.push(at_new_slot); + // Add account_info for 'other_slot' + possible_initial_slot_list_contents.push((unique_other_slot, other_value)); + possible_initial_slot_list_contents + }; + + /* + * loop over all possible permutations of 'possible_initial_slot_list_contents' + * some examples: + * [] + * [other] + * [other, new_slot] + * [new_slot, other] + * [dummy0, new_slot, dummy1, other] (and all permutation of this order) + * [other, dummy1, new_slot] (and all permutation of this order) + * ... + * [dummy0, new_slot, dummy1, other_slot, dummy2] (and all permutation of this order) + */ + let mut attempts = 0; + // loop over each initial size of 'slot_list' + for initial_slot_list_len in 0..=possible_initial_slot_list_contents.len() { + // loop over every permutation of possible_initial_slot_list_contents within a list of len 'initial_slot_list_len' + for content_source_indexes in + (0..possible_initial_slot_list_contents.len()).permutations(initial_slot_list_len) + { + // loop over each possible parameter for 'other_slot' + for other_slot in [ + Some(new_slot), + Some(unique_other_slot), + Some(missing_other_slot), + None, + ] { + if other_slot.is_some() + && new_slot != other_slot.unwrap() + && slot_list.contains(&(new_slot, info)) + { + // skip this permutation if 'new_slot' is already in the slot_list, but we are trying to reclaim other slot + // This is an assert case as only one of new_slot and other_slot should be in the slot list + continue; + } + + attempts += 1; + // initialize slot_list prior to call to 'InMemAccountsIndex::update_slot_list' + // by inserting each possible entry at each possible position + let mut slot_list = content_source_indexes + .iter() + .map(|i| possible_initial_slot_list_contents[*i]) + .collect::>(); + let mut expected = slot_list.clone(); + let original = slot_list.clone(); + let mut reclaims = ReclaimsSlotList::new(); + + let result = InMemAccountsIndex::::update_slot_list( + &mut slot_list, + new_slot, + info, + other_slot, + &mut reclaims, + reclaim, + ); + + // calculate expected reclaims + let mut expected_reclaims = ReclaimsSlotList::new(); + expected.retain(|(slot, info)| { + let retain = slot != &new_slot && Some(*slot) != other_slot; + if !retain { + expected_reclaims.push((*slot, *info)); + } + retain + }); + expected.push((new_slot, info)); + + // Calculate the expected ref count change. It is expected to be 1 - the number of reclaims + let expected_result = 1 - expected_reclaims.len() as i32; + assert_eq!( + expected_result, result, + "return value different. other: {other_slot:?}, {expected:?}, \ + {slot_list:?}, original: {original:?}" + ); + // sort for easy comparison + expected_reclaims.sort_unstable(); + reclaims.sort_unstable(); + assert_eq!( + expected_reclaims, reclaims, + "reclaims different. other: {other_slot:?}, {expected:?}, {slot_list:?}, \ + original: {original:?}" + ); + // sort for easy comparison + slot_list.sort_unstable(); + expected.sort_unstable(); + assert_eq!( + slot_list, expected, + "slot_list different. other: {other_slot:?}, {expected:?}, {slot_list:?}, \ + original: {original:?}" + ); + } + } + } + assert_eq!(attempts, 652); // complicated permutations, so make sure we ran the right # + } + #[test] fn test_should_evict_from_mem_ref_count() { for ref_count in [0, 1, 2] { let bucket = new_for_test::(); let startup = false; let current_age = 0; - let one_element_slot_list = vec![(0, 0)]; + let one_element_slot_list = SlotList::from([(0, 0)]); let one_element_slot_list_entry = Arc::new(AccountMapEntry::new( one_element_slot_list, ref_count, @@ -1575,7 +1779,7 @@ mod tests { .collect::>(); let accounts = (0..=255) .map(|age| { - let one_element_slot_list = vec![(0, 0)]; + let one_element_slot_list = SlotList::from([(0, 0)]); let one_element_slot_list_entry = Arc::new(AccountMapEntry::new( one_element_slot_list, ref_count, @@ -1629,7 +1833,7 @@ mod tests { let mut startup = false; let mut current_age = 0; let ref_count = 1; - let one_element_slot_list = vec![(0, 0)]; + let one_element_slot_list = SlotList::from([(0, 0)]); let one_element_slot_list_entry = Arc::new(AccountMapEntry::new( one_element_slot_list, ref_count, @@ -1642,7 +1846,7 @@ mod tests { .should_evict_from_mem( current_age, &Arc::new(AccountMapEntry::new( - vec![], + SlotList::new(), ref_count, AccountMapEntryMeta::default() )), @@ -1670,7 +1874,7 @@ mod tests { .should_evict_from_mem( current_age, &Arc::new(AccountMapEntry::new( - vec![(0, 0), (1, 1)], + SlotList::from_iter([(0, 0u64), (1, 1)]), ref_count, AccountMapEntryMeta::default() )), @@ -1689,7 +1893,7 @@ mod tests { .should_evict_from_mem( current_age, &Arc::new(AccountMapEntry::new( - vec![(0, 0.0)], + SlotList::from([(0, 0.0)]), ref_count, AccountMapEntryMeta::default() )), @@ -1767,19 +1971,19 @@ mod tests { } #[test] - fn test_update_slot_list_other() { + fn test_update_slot_list_other_reclaim_old_slots() { solana_logger::setup(); - let reclaim = UpsertReclaim::PopulateReclaims; - let new_slot = 0; + let reclaim = UpsertReclaim::ReclaimOldSlots; + let new_slot = 5; let info = 1; let other_value = info + 1; let at_new_slot = (new_slot, info); let unique_other_slot = new_slot + 1; for other_slot in [Some(new_slot), Some(unique_other_slot), None] { - let mut reclaims = Vec::default(); - let mut slot_list = Vec::default(); + let mut reclaims = ReclaimsSlotList::new(); + let mut slot_list = SlotList::new(); // upserting into empty slot_list, so always addref - assert!( + assert_eq!( InMemAccountsIndex::::update_slot_list( &mut slot_list, new_slot, @@ -1788,22 +1992,23 @@ mod tests { &mut reclaims, reclaim ), + 1, "other_slot: {other_slot:?}" ); - assert_eq!(slot_list, vec![at_new_slot]); + assert_eq!(slot_list, SlotList::from([at_new_slot])); assert!(reclaims.is_empty()); } // replace other - let mut slot_list = vec![(unique_other_slot, other_value)]; - let expected_reclaims = slot_list.clone(); + let mut slot_list = SlotList::from([(unique_other_slot, other_value)]); + let expected_reclaims = ReclaimsSlotList::from(slot_list.as_slice()); let other_slot = Some(unique_other_slot); - let mut reclaims = Vec::default(); - assert!( + let mut reclaims = ReclaimsSlotList::new(); + assert_eq!( // upserting into slot_list that does NOT contain an entry at 'new_slot' // but, it DOES contain an entry at other_slot, so we do NOT add-ref. The assumption is that 'other_slot' is going away // and that the previously held add-ref is now used by 'new_slot' - !InMemAccountsIndex::::update_slot_list( + InMemAccountsIndex::::update_slot_list( &mut slot_list, new_slot, info, @@ -1811,28 +2016,37 @@ mod tests { &mut reclaims, reclaim ), + 0, "other_slot: {other_slot:?}" ); - assert_eq!(slot_list, vec![at_new_slot]); + assert_eq!(slot_list, SlotList::from([at_new_slot])); assert_eq!(reclaims, expected_reclaims); // nothing will exist at this slot let missing_other_slot = unique_other_slot + 1; let ignored_slot = 10; // bigger than is used elsewhere in the test let ignored_value = info + 10; + let reclaimed_slot = 1; // less than is used elsewhere in the test + let reclaimed_value = info + 10; - let mut possible_initial_slot_list_contents; // build a list of possible contents in the slot_list prior to calling 'update_slot_list' - { - // up to 3 ignored slot account_info (ignored means not 'new_slot', not 'other_slot', but different slot #s which could exist in the slot_list initially) - possible_initial_slot_list_contents = (0..3) - .map(|i| (ignored_slot + i, ignored_value + i)) - .collect::>(); - // account_info that already exists in the slot_list AT 'new_slot' + let possible_initial_slot_list_contents = { + let mut possible_initial_slot_list_contents = Vec::new(); + + // Add ignored slot account_info entries (slots with larger slot #s than 'new_slot' or 'other_slot') + possible_initial_slot_list_contents + .extend((0..3).map(|i| (ignored_slot + i, ignored_value + i))); + + // Add reclaimed slot account_info entries (slots with smaller slot #s than 'new_slot' or 'other_slot') + possible_initial_slot_list_contents + .extend((0..3).map(|i| (reclaimed_slot + i, reclaimed_value + i))); + + // Add account_info for 'new_slot' possible_initial_slot_list_contents.push(at_new_slot); - // account_info that already exists in the slot_list AT 'other_slot' + // Add account_info for 'other_slot' possible_initial_slot_list_contents.push((unique_other_slot, other_value)); - } + possible_initial_slot_list_contents + }; /* * loop over all possible permutations of 'possible_initial_slot_list_contents' @@ -1875,10 +2089,10 @@ mod tests { let mut slot_list = content_source_indexes .iter() .map(|i| possible_initial_slot_list_contents[*i]) - .collect::>(); + .collect::>(); let mut expected = slot_list.clone(); let original = slot_list.clone(); - let mut reclaims = Vec::default(); + let mut reclaims = ReclaimsSlotList::new(); let result = InMemAccountsIndex::::update_slot_list( &mut slot_list, @@ -1889,23 +2103,19 @@ mod tests { reclaim, ); - // calculate expected results - let mut expected_reclaims = Vec::default(); - // addref iff the slot_list did NOT previously contain an entry at 'new_slot' and it also did not contain an entry at 'other_slot' - let expected_result = !expected - .iter() - .any(|(slot, _info)| slot == &new_slot || Some(*slot) == other_slot); - { - // this is the logical equivalent of 'InMemAccountsIndex::update_slot_list', but slower (and ignoring addref) - expected.retain(|(slot, info)| { - let retain = slot != &new_slot && Some(*slot) != other_slot; - if !retain { - expected_reclaims.push((*slot, *info)); - } - retain - }); - expected.push((new_slot, info)); - } + // calculate expected reclaims + let mut expected_reclaims = ReclaimsSlotList::new(); + expected.retain(|(slot, info)| { + let retain = *slot > new_slot; + if !retain { + expected_reclaims.push((*slot, *info)); + } + retain + }); + expected.push((new_slot, info)); + + // Calculate the expected ref count change. It is expected to be 1 - the number of reclaims + let expected_result = 1 - expected_reclaims.len() as i32; assert_eq!( expected_result, result, "return value different. other: {other_slot:?}, {expected:?}, \ @@ -1930,7 +2140,7 @@ mod tests { } } } - assert_eq!(attempts, 652); // complicated permutations, so make sure we ran the right # + assert_eq!(attempts, 219202); // complicated permutations, so make sure we ran the right # } #[should_panic(expected = "slot_list has slot in slot_list but is not replacing it")] @@ -1939,8 +2149,8 @@ mod tests { fn test_update_slot_list_new_slot_duplicate_panic(slot_to_replace: u64) { let new_slot = 1; // This slot already exists in the list let old_slot = 2; // This slot already exists in the list - let mut slot_list = vec![(new_slot, 0), (old_slot, 0)]; - let mut reclaims = Vec::default(); + let mut slot_list = SlotList::from_iter([(new_slot, 0u64), (old_slot, 0)]); + let mut reclaims = ReclaimsSlotList::new(); let new_info = 1; // Attempt to update the slot list with a duplicate slot, which should trigger the panic @@ -2001,7 +2211,7 @@ mod tests { { // add an entry with an empty slot list - let val = Arc::new(AccountMapEntry::::default()); + let val = Arc::new(AccountMapEntry::::empty_for_tests()); map.insert(key, val); let entry = map.entry(key); assert_matches!(entry, Entry::Occupied(_)); @@ -2015,7 +2225,7 @@ mod tests { { // add an entry with a NON empty slot list - it will NOT get removed - let val = Arc::new(AccountMapEntry::::default()); + let val = Arc::new(AccountMapEntry::::empty_for_tests()); val.slot_list.write().unwrap().push((1, 1)); map.insert(key, val); // does NOT remove it since it has a non-empty slot list @@ -2028,9 +2238,9 @@ mod tests { #[test] fn test_lock_and_update_slot_list() { - let test = AccountMapEntry::::default(); + let test = AccountMapEntry::::empty_for_tests(); let info = 65; - let mut reclaims = Vec::default(); + let mut reclaims = ReclaimsSlotList::new(); // first upsert, should increase let len = InMemAccountsIndex::::lock_and_update_slot_list( &test, diff --git a/accounts-db/src/accounts_index/iter.rs b/accounts-db/src/accounts_index/iter.rs index 05fa52a5e1ba11..6be62130b11aef 100644 --- a/accounts-db/src/accounts_index/iter.rs +++ b/accounts-db/src/accounts_index/iter.rs @@ -105,6 +105,7 @@ mod tests { super::{secondary::AccountSecondaryIndexes, UpsertReclaim}, *, }, + crate::accounts_index::ReclaimsSlotList, solana_account::AccountSharedData, std::ops::Range, }; @@ -122,7 +123,7 @@ mod tests { for key in pubkeys { let slot = 0; let value = true; - let mut gc = Vec::new(); + let mut gc = ReclaimsSlotList::new(); index.upsert( slot, slot, @@ -161,7 +162,7 @@ mod tests { index.add_root(0); let mut iter = index.iter(None::<&Range>, AccountsIndexPubkeyIterOrder::Sorted); assert!(iter.next().is_none()); - let mut gc = vec![]; + let mut gc = ReclaimsSlotList::new(); index.upsert( 0, 0, diff --git a/accounts-db/src/accounts_index/secondary.rs b/accounts-db/src/accounts_index/secondary.rs index a09a42791b5fae..dfadb2b2ef05b6 100644 --- a/accounts-db/src/accounts_index/secondary.rs +++ b/accounts-db/src/accounts_index/secondary.rs @@ -13,7 +13,7 @@ use { }, }; -#[derive(Debug, Default, Clone)] +#[derive(Debug, Default, Clone, PartialEq)] pub struct AccountSecondaryIndexes { pub keys: Option, pub indexes: HashSet, diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index 4e77f7b5a1aa62..862d73c5d4eb3a 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -15,6 +15,7 @@ use { }, active_stats::ActiveStatItem, storable_accounts::{StorableAccounts, StorableAccountsBySlot}, + u64_align, }, rand::{thread_rng, Rng}, rayon::prelude::{IntoParallelRefIterator, ParallelIterator}, @@ -162,7 +163,19 @@ impl AncientSlotInfos { self.shrink_indexes.sort_unstable_by(|l, r| { let amount_shrunk = |index: &usize| { let item = &self.all_infos[*index]; - item.capacity - item.alive_bytes + // alive_bytes assumes the accounts are aligned. `capacity` may + // not be aligned for the last account. Therefore, we need to + // align it. + let aligned_capacity = u64_align!(item.capacity as usize) as u64; + if aligned_capacity < item.alive_bytes { + // should not happen, but if it does, submit warn log it and continue + datapoint_warn!( + "aligned_capacity_less_than_alive_bytes", + ("aligned_capacity", aligned_capacity, i64), + ("alive_bytes", item.alive_bytes, i64) + ); + } + item.capacity.saturating_sub(item.alive_bytes) }; amount_shrunk(r).cmp(&amount_shrunk(l)) }); @@ -655,7 +668,7 @@ impl AccountsDb { Ordering::Relaxed, ); - self.thread_pool_clean.install(|| { + self.thread_pool_background.install(|| { packer.par_iter().for_each(|(target_slot, pack)| { let mut write_ancient_accounts_local = WriteAncientAccounts::default(); self.write_one_packed_storage( @@ -1109,8 +1122,10 @@ pub mod tests { ShrinkCollectRefs, }, accounts_file::StorageAccess, - accounts_index::{AccountsIndexScanResult, ScanFilter, UpsertReclaim}, - append_vec::aligned_stored_size, + accounts_index::{ + AccountsIndexScanResult, ReclaimsSlotList, RefCount, ScanFilter, UpsertReclaim, + }, + append_vec::{self, aligned_stored_size}, storable_accounts::StorableAccountsBySlot, }, rand::seq::SliceRandom as _, @@ -1595,14 +1610,7 @@ pub mod tests { let storage = db.storage.get_slot_storage_entry(slot); if all_slots_shrunk { assert!(storage.is_some()); - // Here we use can_append() as a proxy to assert the backup storage of the accounts after shrinking. - // When storage_access is set to `File`, after shrinking an ancient slot, the backup storage should be - // open as File, which means can_append() will return false. - // When storage_access is set to `Mmap`, backup storage is still Mmap, and can_append() will return true. - assert_eq!( - storage.unwrap().accounts.can_append(), - storage_access == StorageAccess::Mmap - ); + assert!(!storage.unwrap().has_accounts()); } else { assert!(storage.is_none()); } @@ -1782,10 +1790,10 @@ pub mod tests { ); assert!(db.accounts_index.purge_exact( &pk, - &[storage.slot()] + [storage.slot()] .into_iter() .collect::>(), - &mut Vec::default() + &mut ReclaimsSlotList::new() )); }); } @@ -2099,13 +2107,15 @@ pub mod tests { .map(|storage| storage.id()) .collect::>() ); + let mut reader = append_vec::new_scan_accounts_reader(); + // assert that we wrote the 2_ref account to the newly shrunk append vec let shrink_in_progress = shrinks_in_progress.first().unwrap().1; let mut count = 0; shrink_in_progress .new_storage() .accounts - .scan_accounts(|_, _| { + .scan_accounts(&mut reader, |_offset, _| { count += 1; }) .expect("must scan accounts storage"); @@ -2266,10 +2276,11 @@ pub mod tests { (*account.pubkey(), account.to_account_shared_data()) }) .unwrap(); + let mut reader = append_vec::new_scan_accounts_reader(); let mut count = 0; storage .accounts - .scan_accounts(|_, _| { + .scan_accounts(&mut reader, |_, _| { count += 1; }) .expect("must scan accounts storage"); @@ -3206,6 +3217,8 @@ pub mod tests { one.first().unwrap().1.old_storage().id(), storages[combine_into].id() ); + let mut reader = append_vec::new_scan_accounts_reader(); + // make sure the single new append vec contains all the same accounts let mut two = Vec::default(); one.first() @@ -3213,7 +3226,7 @@ pub mod tests { .1 .new_storage() .accounts - .scan_accounts(|_offset, meta| { + .scan_accounts(&mut reader, |_offset, meta| { two.push((*meta.pubkey(), meta.to_account_shared_data())); }) .expect("must scan accounts storage"); @@ -3728,8 +3741,8 @@ pub mod tests { .map(|_| solana_pubkey::new_rand()) .collect::>(); // how many of `many_ref_accounts` should be found in the index with ref_count=1 - let mut expected_ref_counts_before_unref = HashMap::::default(); - let mut expected_ref_counts_after_unref = HashMap::::default(); + let mut expected_ref_counts_before_unref = HashMap::::default(); + let mut expected_ref_counts_after_unref = HashMap::::default(); pubkeys_to_unref.iter().for_each(|k| { for slot in 0..2 { @@ -3741,7 +3754,7 @@ pub mod tests { &empty_account, &crate::accounts_index::AccountSecondaryIndexes::default(), AccountInfo::default(), - &mut Vec::default(), + &mut ReclaimsSlotList::new(), UpsertReclaim::IgnoreReclaims, ); } diff --git a/accounts-db/src/append_vec.rs b/accounts-db/src/append_vec.rs index de748729bb20f3..03f7eaeb238d06 100644 --- a/accounts-db/src/append_vec.rs +++ b/accounts-db/src/append_vec.rs @@ -2,7 +2,7 @@ //! //! For more information, see: //! -//! +//! mod meta; pub mod test_utils; @@ -19,13 +19,13 @@ use { account_info::Offset, account_storage::stored_account_info::{StoredAccountInfo, StoredAccountInfoWithoutData}, accounts_file::{ - AccountsFileError, InternalsForArchive, MatchAccountOwnerError, Result, StorageAccess, - StoredAccountsInfo, + AccountsFileError, InternalsForArchive, Result, StorageAccess, StoredAccountsInfo, }, buffered_reader::{ - BufReaderWithOverflow, BufferedReader, FileBufRead as _, RequiredLenBufRead as _, Stack, + BufReaderWithOverflow, BufferedReader, FileBufRead as _, RequiredLenBufFileRead, + RequiredLenBufRead as _, Stack, }, - file_io::read_into_buffer, + file_io::{read_into_buffer, write_buffer_to_file}, is_zero_lamport::IsZeroLamport, storable_accounts::StorableAccounts, u64_align, @@ -40,13 +40,13 @@ use { self, convert::TryFrom, fs::{remove_file, File, OpenOptions}, - io::{BufRead, Seek, SeekFrom, Write}, + io::{self, BufRead, Seek, SeekFrom, Write}, mem::{self, MaybeUninit}, path::{Path, PathBuf}, ptr, slice, sync::{ atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, - Mutex, + Mutex, MutexGuard, }, }, thiserror::Error, @@ -124,38 +124,6 @@ impl<'a> ValidSlice<'a> { } } -/// info from an entry useful for building an index -pub(crate) struct IndexInfo { - /// size of entry, aligned to next u64 - /// This matches the return of `get_account` - pub stored_size_aligned: usize, - /// info on the entry - pub index_info: IndexInfoInner, -} - -/// info from an entry useful for building an index -pub(crate) struct IndexInfoInner { - /// offset to this entry - pub offset: usize, - pub pubkey: Pubkey, - pub lamports: u64, - pub data_len: u64, -} - -impl IsZeroLamport for IndexInfoInner { - #[inline(always)] - fn is_zero_lamport(&self) -> bool { - self.lamports == 0 - } -} - -impl IsZeroLamport for IndexInfo { - #[inline(always)] - fn is_zero_lamport(&self) -> bool { - self.index_info.is_zero_lamport() - } -} - /// offsets to help navigate the persisted format of `AppendVec` #[derive(Debug)] struct AccountOffsets { @@ -172,10 +140,40 @@ enum AppendVecFileBacking { File(File), } +/// Validates and serializes appends (when `append_guard` is called) such that only +/// writable AppendVec is updated and only from a single thread at a time. +#[derive(Debug)] +enum ReadWriteState { + ReadOnly, + Writable { + /// A lock used to serialize append operations. + append_lock: Mutex<()>, + }, +} + +impl ReadWriteState { + fn new(allow_writes: bool) -> Self { + if allow_writes { + Self::Writable { + append_lock: Mutex::new(()), + } + } else { + Self::ReadOnly + } + } + + fn append_guard(&self) -> MutexGuard<()> { + match self { + Self::ReadOnly => panic!("append not allowed in read-only state"), + Self::Writable { append_lock } => append_lock.lock().unwrap(), + } + } +} + /// A thread-safe, file-backed block of memory used to store `Account` instances. Append operations -/// are serialized such that only one thread updates the internal `append_lock` at a time. No -/// restrictions are placed on reading. That is, one may read items from one thread while another -/// is appending new items. +/// are serialized using `read_write_state`'s internal lock such that only one thread updates the +/// file at a time. No restrictions are placed on reading. That is, one may read items from one +/// thread while another is appending new items. #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] #[derive(Debug)] pub struct AppendVec { @@ -185,8 +183,8 @@ pub struct AppendVec { /// access the file data backing: AppendVecFileBacking, - /// A lock used to serialize append operations. - append_lock: Mutex<()>, + /// Guards and serializes writes if allowed + read_write_state: ReadWriteState, /// The number of bytes used to store items, not the number of items. current_len: AtomicUsize, @@ -256,7 +254,12 @@ impl Drop for AppendVec { } impl AppendVec { - pub fn new(file: impl Into, create: bool, size: usize) -> Self { + pub fn new( + file: impl Into, + create: bool, + size: usize, + storage_access: StorageAccess, + ) -> Self { let file = file.into(); let initial_len = 0; AppendVec::sanitize_len_and_size(initial_len, size).unwrap(); @@ -289,26 +292,36 @@ impl AppendVec { data.rewind().unwrap(); data.flush().unwrap(); - //UNSAFE: Required to create a Mmap - let mmap = unsafe { MmapMut::map_mut(&data) }; - let mmap = mmap.unwrap_or_else(|err| { - panic!( - "Failed to map the data file (size: {size}): {err}. Please increase sysctl \ - vm.max_map_count or equivalent for your platform.", - ); - }); + let backing = match storage_access { + StorageAccess::Mmap => { + //UNSAFE: Required to create a Mmap + let mmap = unsafe { MmapMut::map_mut(&data) }; + let mmap = mmap.unwrap_or_else(|err| { + panic!( + "Failed to map the data file (size: {size}): {err}. Please increase \ + sysctl vm.max_map_count or equivalent for your platform.", + ); + }); + APPEND_VEC_STATS + .open_as_mmap + .fetch_add(1, Ordering::Relaxed); + AppendVecFileBacking::Mmap(mmap) + } + StorageAccess::File => { + APPEND_VEC_STATS + .open_as_file_io + .fetch_add(1, Ordering::Relaxed); + AppendVecFileBacking::File(data) + } + }; APPEND_VEC_STATS.files_open.fetch_add(1, Ordering::Relaxed); - APPEND_VEC_STATS - .open_as_mmap - .fetch_add(1, Ordering::Relaxed); - AppendVec { path: file, - backing: AppendVecFileBacking::Mmap(mmap), - // This mutex forces append to be single threaded, but concurrent with reads - // See UNSAFE usage in `append_ptr` - append_lock: Mutex::new(()), + backing, + // writable state's mutex forces append to be single threaded, but concurrent with + // reads. See UNSAFE usage in `append_ptr` + read_write_state: ReadWriteState::new(true), current_len: AtomicUsize::new(initial_len), file_size: size as u64, remove_file_on_drop: AtomicBool::new(true), @@ -341,6 +354,7 @@ impl AppendVec { aligned_stored_size(0) * count } + /// Flushes contents to disk pub fn flush(&self) -> Result<()> { // Check to see if we're actually dirty before flushing. let should_flush = self.is_dirty.swap(false, Ordering::AcqRel); @@ -358,44 +372,39 @@ impl AppendVec { Ok(()) } + #[cfg(feature = "dev-context-only-utils")] pub fn reset(&self) { - // This mutex forces append to be single threaded, but concurrent with reads - // See UNSAFE usage in `append_ptr` - let _lock = self.append_lock.lock().unwrap(); + // Writable state's mutex forces append to be single threaded, but concurrent + // with reads. See UNSAFE usage in `append_ptr` + let _lock = self.read_write_state.append_guard(); self.current_len.store(0, Ordering::Release); } - /// when we can use file i/o as opposed to mmap, this is the trigger to tell us - /// that no more appending will occur and we can close the initial mmap. - pub(crate) fn reopen_as_readonly(&self) -> Option { - match &self.backing { - AppendVecFileBacking::File(_file) => { - // already a file, so already read-only - None - } - AppendVecFileBacking::Mmap(_mmap) => { - // we are an mmap, so re-open as a file - // we are re-opening the file, so don't remove the file on disk when the old mmapped one is dropped - self.remove_file_on_drop.store(false, Ordering::Release); - - // add a memory barrier to ensure the the last mmap writes - // happen before the first file-io reads - std::sync::atomic::fence(Ordering::AcqRel); - - // The file should have already been sanitized. Don't need to check when we open the file again. - let mut new = AppendVec::new_from_file_unchecked( - self.path.clone(), - self.len(), - StorageAccess::File, - ) + /// Return AppendVec opened in read-only file-io mode or `None` if it already is such + pub(crate) fn reopen_as_readonly_file_io(&self) -> Option { + if matches!(self.read_write_state, ReadWriteState::ReadOnly) + && matches!(self.backing, AppendVecFileBacking::File(_)) + { + // Early return if already in read-only mode *and* already a file-io + return None; + } + + // we are re-opening the file, so don't remove the file on disk when the old one is dropped + self.remove_file_on_drop.store(false, Ordering::Release); + + // add a memory barrier to ensure the the last mmap writes + // happen before the first file-io reads + std::sync::atomic::fence(Ordering::AcqRel); + + // The file should have already been sanitized. Don't need to check when we open the file again. + let mut new = + AppendVec::new_from_file_unchecked(self.path.clone(), self.len(), StorageAccess::File) .ok()?; - if self.is_dirty.swap(false, Ordering::AcqRel) { - // *move* the dirty-ness to the new append vec - *new.is_dirty.get_mut() = true; - } - Some(new) - } + if self.is_dirty.swap(false, Ordering::AcqRel) { + // *move* the dirty-ness to the new append vec + *new.is_dirty.get_mut() = true; } + Some(new) } /// how many more bytes can be stored in this append vec @@ -404,6 +413,7 @@ impl AppendVec { .saturating_sub(u64_align!(self.len()) as u64) } + /// Returns the number of bytes, *not items*, used in the AppendVec pub fn len(&self) -> usize { self.current_len.load(Ordering::Acquire) } @@ -412,6 +422,7 @@ impl AppendVec { self.len() == 0 } + /// Returns the total number of bytes, *not items*, the AppendVec can hold pub fn capacity(&self) -> u64 { self.file_size } @@ -468,7 +479,9 @@ impl AppendVec { } } - /// Creates an appendvec from file without performing sanitize checks or counting the number of accounts + /// Creates an appendvec from existing file in read-only mode and without full data checks + /// + /// Validation of account data and counting the number of accounts is skipped. pub fn new_from_file_unchecked( path: impl Into, current_len: usize, @@ -478,15 +491,16 @@ impl AppendVec { let file_size = std::fs::metadata(&path)?.len(); Self::sanitize_len_and_size(current_len, file_size as usize)?; + // AppendVec is in read-only mode, but mmap access requires file to be writable let data = OpenOptions::new() .read(true) - .write(true) + .write(storage_access == StorageAccess::Mmap) .create(false) .open(&path)?; - if storage_access == StorageAccess::File { - APPEND_VEC_STATS.files_open.fetch_add(1, Ordering::Relaxed); + APPEND_VEC_STATS.files_open.fetch_add(1, Ordering::Relaxed); + if storage_access == StorageAccess::File { APPEND_VEC_STATS .open_as_file_io .fetch_add(1, Ordering::Relaxed); @@ -494,7 +508,7 @@ impl AppendVec { return Ok(AppendVec { path, backing: AppendVecFileBacking::File(data), - append_lock: Mutex::new(()), + read_write_state: ReadWriteState::ReadOnly, current_len: AtomicUsize::new(current_len), file_size, remove_file_on_drop: AtomicBool::new(true), @@ -514,8 +528,6 @@ impl AppendVec { result? }; - APPEND_VEC_STATS.files_open.fetch_add(1, Ordering::Relaxed); - APPEND_VEC_STATS .open_as_mmap .fetch_add(1, Ordering::Relaxed); @@ -523,7 +535,7 @@ impl AppendVec { Ok(AppendVec { path, backing: AppendVecFileBacking::Mmap(mmap), - append_lock: Mutex::new(()), + read_write_state: ReadWriteState::ReadOnly, current_len: AtomicUsize::new(current_len), file_size, remove_file_on_drop: AtomicBool::new(true), @@ -584,7 +596,7 @@ impl AppendVec { /// Copy `len` bytes from `src` to the first 64-byte boundary after position `offset` of /// the internal buffer. Then update `offset` to the first byte after the copied data. - fn append_ptr(&self, offset: &mut usize, src: *const u8, len: usize) { + fn append_ptr(&self, offset: &mut usize, src: *const u8, len: usize) -> io::Result<()> { let pos = u64_align!(*offset); match &self.backing { AppendVecFileBacking::Mmap(mmap) => { @@ -596,19 +608,26 @@ impl AppendVec { let dst = data.as_ptr() as *mut _; ptr::copy(src, dst, len); }; - *offset = pos + len; } - AppendVecFileBacking::File(_file) => { - unimplemented!(); + AppendVecFileBacking::File(file) => { + // Safety: caller should ensure the passed pointer and length are valid. + let data = unsafe { slice::from_raw_parts(src, len) }; + write_buffer_to_file(file, data, pos as u64)?; } } + *offset = pos + len; + Ok(()) } /// Copy each value in `vals`, in order, to the first 64-byte boundary after position `offset`. /// If there is sufficient space, then update `offset` and the internal `current_len` to the /// first byte after the copied data and return the starting position of the copied data. /// Otherwise return None and leave `offset` unchanged. - fn append_ptrs_locked(&self, offset: &mut usize, vals: &[(*const u8, usize)]) -> Option { + fn append_ptrs_locked( + &self, + offset: &mut usize, + vals: &[(*const u8, usize)], + ) -> io::Result> { let mut end = *offset; for val in vals { end = u64_align!(end); @@ -616,15 +635,15 @@ impl AppendVec { } if (self.file_size as usize) < end { - return None; + return Ok(None); } let pos = u64_align!(*offset); for val in vals { - self.append_ptr(offset, val.0, val.1) + self.append_ptr(offset, val.0, val.1)? } self.current_len.store(*offset, Ordering::Release); - Some(pos) + Ok(Some(pos)) } /// Return a reference to the type at `offset` if its data doesn't overrun the internal buffer. @@ -892,28 +911,6 @@ impl AppendVec { } } - /// Return Ok(index_of_matching_owner) if the account owner at `offset` is one of the pubkeys in `owners`. - /// Return Err(MatchAccountOwnerError::NoMatch) if the account has 0 lamports or the owner is not one of - /// the pubkeys in `owners`. - /// Return Err(MatchAccountOwnerError::UnableToLoad) if the `offset` value causes a data overrun. - pub fn account_matches_owners( - &self, - offset: usize, - owners: &[Pubkey], - ) -> std::result::Result { - self.get_stored_account_no_data_callback(offset, |stored_account_meta| { - if stored_account_meta.lamports() == 0 { - Err(MatchAccountOwnerError::NoMatch) - } else { - owners - .iter() - .position(|entry| stored_account_meta.owner() == entry) - .ok_or(MatchAccountOwnerError::NoMatch) - } - }) - .unwrap_or(Err(MatchAccountOwnerError::UnableToLoad)) - } - #[cfg(test)] pub fn get_account_test( &self, @@ -922,7 +919,7 @@ impl AppendVec { let data_len = self.get_account_data_lens(&[offset]); let sizes: usize = data_len .iter() - .map(|len| self.calculate_stored_size(*len)) + .map(|len| AppendVec::calculate_stored_size(*len)) .sum(); let result = self.get_stored_account_meta_callback(offset, |r_callback| { let r2 = self.get_account_shared_data(offset); @@ -1000,11 +997,12 @@ impl AppendVec { /// /// Prefer scan_accounts_without_data() when account data is not needed, /// as it can potentially read less and be faster. - pub fn scan_accounts( - &self, + pub(crate) fn scan_accounts<'a>( + &'a self, + reader: &mut impl RequiredLenBufFileRead<'a>, mut callback: impl for<'local> FnMut(Offset, StoredAccountInfo<'local>), ) -> Result<()> { - self.scan_accounts_stored_meta(|stored_account_meta| { + self.scan_accounts_stored_meta(reader, |stored_account_meta| { let offset = stored_account_meta.offset(); let account = StoredAccountInfo { pubkey: stored_account_meta.pubkey(), @@ -1023,8 +1021,9 @@ impl AppendVec { /// Prefer scan_accounts() when possible, as it does not contain file format /// implementation details, and thus potentially can read less and be faster. #[allow(clippy::blocks_in_conditions)] - pub fn scan_accounts_stored_meta( - &self, + pub(crate) fn scan_accounts_stored_meta<'a>( + &'a self, + reader: &mut impl RequiredLenBufFileRead<'a>, mut callback: impl for<'local> FnMut(StoredAccountMeta<'local>), ) -> Result<()> { match &self.backing { @@ -1045,18 +1044,8 @@ impl AppendVec { {} } AppendVecFileBacking::File(file) => { - // 128KiB covers a reasonably large distribution of typical account sizes. - // In a recent sample, 99.98% of accounts' data lengths were less than or equal to 128KiB. - const MIN_CAPACITY: usize = 1024 * 128; - const MAX_CAPACITY: usize = - STORE_META_OVERHEAD + MAX_PERMITTED_DATA_LENGTH as usize; - const BUFFER_SIZE: usize = PAGE_SIZE * 8; - let self_len = self.len(); - let mut reader = BufReaderWithOverflow::new( - BufferedReader::>::new_stack(self.len(), file), - MIN_CAPACITY.min(self_len), - MAX_CAPACITY.min(self_len), - ); + reader.set_file(file, self.len())?; + let mut min_buf_len = STORE_META_OVERHEAD; loop { let offset = reader.get_file_offset(); @@ -1099,7 +1088,7 @@ impl AppendVec { /// Calculate the amount of storage required for an account with the passed /// in data_len - pub(crate) fn calculate_stored_size(&self, data_len: usize) -> usize { + pub(crate) fn calculate_stored_size(data_len: usize) -> usize { aligned_stored_size(data_len) } @@ -1208,7 +1197,8 @@ impl AppendVec { AppendVecFileBacking::File(file) => { // Heuristic observed in benchmarking that maintains a reasonable balance between syscalls and data waste const BUFFER_SIZE: usize = PAGE_SIZE * 4; - let mut reader = BufferedReader::>::new_stack(self_len, file); + let mut reader = + BufferedReader::>::new_stack().with_file(file, self_len); const REQUIRED_READ_LEN: usize = mem::size_of::() + mem::size_of::(); loop { @@ -1258,7 +1248,7 @@ impl AppendVec { accounts: &impl StorableAccounts<'a>, skip: usize, ) -> Option { - let _lock = self.append_lock.lock().unwrap(); + let _lock = self.read_write_state.append_guard(); let mut offset = self.len(); let len = accounts.len(); // Here we have `len - skip` number of accounts. The +1 extra capacity @@ -1294,7 +1284,10 @@ impl AppendVec { (hash_ptr, mem::size_of::()), (data_ptr, stored_meta.data_len as usize), ]; - if let Some(start_offset) = self.append_ptrs_locked(&mut offset, &ptrs) { + if let Some(start_offset) = self + .append_ptrs_locked(&mut offset, &ptrs) + .expect("must append data to append_vec") + { offsets.push(start_offset) } else { stop = true; @@ -1324,15 +1317,6 @@ impl AppendVec { }) } - // NOTE: Only used by ancient append vecs "append" method, which is test-only now. - #[cfg(test)] - pub(crate) fn can_append(&self) -> bool { - match &self.backing { - AppendVecFileBacking::File(_file) => false, - AppendVecFileBacking::Mmap(_mmap) => true, - } - } - /// Returns the way to access this accounts file when archiving pub(crate) fn internals_for_archive(&self) -> InternalsForArchive { match &self.backing { @@ -1343,6 +1327,20 @@ impl AppendVec { } } +/// Create a reusable buffered reader tuned for scanning storages with account data. +pub(crate) fn new_scan_accounts_reader<'a>() -> impl RequiredLenBufFileRead<'a> { + // 128KiB covers a reasonably large distribution of typical account sizes. + // In a recent sample, 99.98% of accounts' data lengths were less than or equal to 128KiB. + const MIN_CAPACITY: usize = 1024 * 128; + const MAX_CAPACITY: usize = STORE_META_OVERHEAD + MAX_PERMITTED_DATA_LENGTH as usize; + const BUFFER_SIZE: usize = PAGE_SIZE * 8; + BufReaderWithOverflow::new( + BufferedReader::>::new_stack(), + MIN_CAPACITY, + MAX_CAPACITY, + ) +} + /// The per-account hash, stored in the AppendVec. /// /// This field is now obsolete, but it still lives in the file format. @@ -1364,14 +1362,10 @@ pub mod tests { solana_account::{Account, AccountSharedData}, solana_clock::Slot, std::{mem::ManuallyDrop, time::Instant}, - test_case::test_case, + test_case::{test_case, test_matrix}, }; impl AppendVec { - pub(crate) fn set_current_len_for_tests(&self, len: usize) { - self.current_len.store(len, Ordering::Release); - } - fn append_account_test(&self, data: &(StoredMeta, AccountSharedData)) -> Option { let slot_ignored = Slot::MAX; let accounts = [(&data.0.pubkey, &data.1)]; @@ -1426,11 +1420,12 @@ pub mod tests { assert_eq!(&def1, &def2); } - #[test] + #[test_case(StorageAccess::Mmap)] + #[test_case(StorageAccess::File)] #[should_panic(expected = "AppendVecError(FileSizeTooSmall(0))")] - fn test_append_vec_new_bad_size() { + fn test_append_vec_new_bad_size(storage_access: StorageAccess) { let path = get_append_vec_path("test_append_vec_new_bad_size"); - let _av = AppendVec::new(&path.path, true, 0); + let _av = AppendVec::new(&path.path, true, 0, storage_access); } #[test_case(StorageAccess::Mmap)] @@ -1490,10 +1485,11 @@ pub mod tests { assert_matches!(result, Err(ref message) if message.to_string().contains("is larger than file size (1048576)")); } - #[test] - fn test_append_vec_one() { + #[test_case(StorageAccess::Mmap)] + #[test_case(StorageAccess::File)] + fn test_append_vec_one(storage_access: StorageAccess) { let path = get_append_vec_path("test_append"); - let av = AppendVec::new(&path.path, true, 1024 * 1024); + let av = AppendVec::new(&path.path, true, 1024 * 1024, storage_access); let account = create_test_account(0); let index = av.append_account_test(&account).unwrap(); assert_eq!(av.get_account_test(index).unwrap(), account); @@ -1514,10 +1510,11 @@ pub mod tests { assert_eq!(av.get_account_test(index), None); } - #[test] - fn test_append_vec_one_with_data() { + #[test_case(StorageAccess::Mmap)] + #[test_case(StorageAccess::File)] + fn test_append_vec_one_with_data(storage_access: StorageAccess) { let path = get_append_vec_path("test_append"); - let av = AppendVec::new(&path.path, true, 1024 * 1024); + let av = AppendVec::new(&path.path, true, 1024 * 1024, storage_access); let data_len = 1; let account = create_test_account(data_len); let index = av.append_account_test(&account).unwrap(); @@ -1530,12 +1527,13 @@ pub mod tests { truncate_and_test(av, index); } - #[test] - fn test_remaining_bytes() { + #[test_case(StorageAccess::Mmap)] + #[test_case(StorageAccess::File)] + fn test_remaining_bytes(storage_access: StorageAccess) { let path = get_append_vec_path("test_append"); let sz = 1024 * 1024; let sz64 = sz as u64; - let av = AppendVec::new(&path.path, true, sz); + let av = AppendVec::new(&path.path, true, sz, storage_access); assert_eq!(av.capacity(), sz64); assert_eq!(av.remaining_bytes(), sz64); @@ -1570,10 +1568,11 @@ pub mod tests { assert_eq!(av.remaining_bytes(), sz64 - u64_align!(av_len) as u64); } - #[test] - fn test_append_vec_data() { + #[test_case(StorageAccess::Mmap)] + #[test_case(StorageAccess::File)] + fn test_append_vec_data(storage_access: StorageAccess) { let path = get_append_vec_path("test_append_data"); - let av = AppendVec::new(&path.path, true, 1024 * 1024); + let av = AppendVec::new(&path.path, true, 1024 * 1024, storage_access); let account = create_test_account(5); let index = av.append_account_test(&account).unwrap(); assert_eq!(av.get_account_test(index).unwrap(), account); @@ -1583,46 +1582,6 @@ pub mod tests { assert_eq!(av.get_account_test(index1).unwrap(), account1); } - #[test] - fn test_account_matches_owners() { - let path = get_append_vec_path("test_append_data"); - let av = AppendVec::new(&path.path, true, 1024 * 1024); - let owners: Vec = (0..2).map(|_| Pubkey::new_unique()).collect(); - - let mut account = create_test_account(5); - account.1.set_owner(owners[0]); - let index = av.append_account_test(&account).unwrap(); - assert_eq!(av.account_matches_owners(index, &owners), Ok(0)); - - let mut account1 = create_test_account(6); - account1.1.set_owner(owners[1]); - let index1 = av.append_account_test(&account1).unwrap(); - assert_eq!(av.account_matches_owners(index1, &owners), Ok(1)); - assert_eq!(av.account_matches_owners(index, &owners), Ok(0)); - - let mut account2 = create_test_account(6); - account2.1.set_owner(Pubkey::new_unique()); - let index2 = av.append_account_test(&account2).unwrap(); - assert_eq!( - av.account_matches_owners(index2, &owners), - Err(MatchAccountOwnerError::NoMatch) - ); - - // tests for overflow - assert_eq!( - av.account_matches_owners(usize::MAX - mem::size_of::(), &owners), - Err(MatchAccountOwnerError::UnableToLoad) - ); - - assert_eq!( - av.account_matches_owners( - usize::MAX - mem::size_of::() - mem::size_of::() + 1, - &owners - ), - Err(MatchAccountOwnerError::UnableToLoad) - ); - } - impl AppendVec { /// return how many accounts in the storage fn accounts_count(&self) -> usize { @@ -1688,7 +1647,12 @@ pub mod tests { } let path = get_append_vec_path("test_scan_accounts_stored_meta_correctness"); - let av = ManuallyDrop::new(AppendVec::new(&path.path, true, file_size)); + let av = ManuallyDrop::new(AppendVec::new( + &path.path, + true, + file_size, + StorageAccess::File, + )); let slot = 42; av.append_accounts(&(slot, test_accounts.as_slice()), 0) .unwrap(); @@ -1703,9 +1667,10 @@ pub mod tests { let av_file = AppendVec::new_from_file(&path.path, av_mmap.len(), StorageAccess::File) .unwrap() .0; + let mut reader = new_scan_accounts_reader(); for av in [&av_mmap, &av_file] { let mut index = 0; - av.scan_accounts_stored_meta(|v| { + av.scan_accounts_stored_meta(&mut reader, |v| { let (pubkey, account) = &test_accounts[index]; let recovered = v.to_account_shared_data(); assert_eq!(&recovered, account); @@ -1716,10 +1681,11 @@ pub mod tests { } } - #[test] - fn test_append_vec_append_many() { + #[test_case(StorageAccess::Mmap)] + #[test_case(StorageAccess::File)] + fn test_append_vec_append_many(storage_access: StorageAccess) { let path = get_append_vec_path("test_append_many"); - let av = AppendVec::new(&path.path, true, 1024 * 1024); + let av = AppendVec::new(&path.path, true, 1024 * 1024, storage_access); let size = 1000; let mut indexes = vec![]; let now = Instant::now(); @@ -1735,7 +1701,7 @@ pub mod tests { let stored_size = av .get_account_data_lens(indexes.as_slice()) .iter() - .map(|len| av.calculate_stored_size(*len)) + .map(|len| AppendVec::calculate_stored_size(*len)) .sum::(); assert_eq!(sizes.iter().sum::(), stored_size); } @@ -1752,9 +1718,11 @@ pub mod tests { assert_eq!(indexes[0], 0); assert_eq!(av.accounts_count(), size); + let mut reader = new_scan_accounts_reader(); + let mut sample = 0; let now = Instant::now(); - av.scan_accounts_stored_meta(|v| { + av.scan_accounts_stored_meta(&mut reader, |v| { let account = create_test_account(sample + 1); let recovered = v.to_account_shared_data(); assert_eq!(recovered, account.1); @@ -1844,7 +1812,7 @@ pub mod tests { let path = &file.path; let accounts_len = { // wrap AppendVec in ManuallyDrop to ensure we do not remove the backing file when dropped - let av = ManuallyDrop::new(AppendVec::new(path, true, 1024 * 1024)); + let av = ManuallyDrop::new(AppendVec::new(path, true, 1024 * 1024, storage_access)); av.append_account_test(&create_test_account(10)).unwrap(); av.flush().unwrap(); @@ -1873,11 +1841,12 @@ pub mod tests { assert_matches!(result, Err(ref message) if message.to_string().contains("incorrect layout/length/data")); } - #[test] - fn test_append_vec_reset() { + #[test_case(StorageAccess::Mmap)] + #[test_case(StorageAccess::File)] + fn test_append_vec_reset(storage_access: StorageAccess) { let file = get_append_vec_path("test_append_vec_reset"); let path = &file.path; - let av = AppendVec::new(path, true, 1024 * 1024); + let av = AppendVec::new(path, true, 1024 * 1024, storage_access); av.append_account_test(&create_test_account(10)).unwrap(); assert!(!av.is_empty()); @@ -1892,7 +1861,7 @@ pub mod tests { let path = &file.path; let accounts_len = { // wrap AppendVec in ManuallyDrop to ensure we do not remove the backing file when dropped - let av = ManuallyDrop::new(AppendVec::new(path, true, 1024 * 1024)); + let av = ManuallyDrop::new(AppendVec::new(path, true, 1024 * 1024, storage_access)); av.append_account_test(&create_test_account(10)).unwrap(); av.len() }; @@ -1909,13 +1878,19 @@ pub mod tests { let file = get_append_vec_path("test_append_vec_flush"); let path = &file.path; let accounts_len = { - // wrap AppendVec in ManuallyDrop to ensure we do not remove the backing file when dropped - let av = ManuallyDrop::new(AppendVec::new(path, true, 1024 * 1024)); + let av = AppendVec::new(path, true, 1024 * 1024, storage_access); av.append_account_test(&create_test_account(10)).unwrap(); - av.len() + // wrap AppendVec in ManuallyDrop to ensure we do not remove the backing file when dropped + let ro_av = ManuallyDrop::new( + av.reopen_as_readonly_file_io() + .expect("appendable AppendVec should always re-open as read-only"), + ); + ro_av.len() }; + let (av, _) = AppendVec::new_from_file(path, accounts_len, storage_access).unwrap(); - let reopen = av.reopen_as_readonly(); + let reopen = av.reopen_as_readonly_file_io(); + // even if AppendVec is already read-only, but uses mmap, it should reopen as file_io if storage_access == StorageAccess::File { assert!(reopen.is_none()); } else { @@ -1930,7 +1905,7 @@ pub mod tests { let path = &file.path; let accounts_len = { // wrap AppendVec in ManuallyDrop to ensure we do not remove the backing file when dropped - let av = ManuallyDrop::new(AppendVec::new(path, true, 1024 * 1024)); + let av = ManuallyDrop::new(AppendVec::new(path, true, 1024 * 1024, storage_access)); av.append_account_test(&create_test_account(10)).unwrap(); @@ -1969,7 +1944,7 @@ pub mod tests { // Write a valid append vec file. let accounts_len = { // wrap AppendVec in ManuallyDrop to ensure we do not remove the backing file when dropped - let av = ManuallyDrop::new(AppendVec::new(path, true, 1024 * 1024)); + let av = ManuallyDrop::new(AppendVec::new(path, true, 1024 * 1024, storage_access)); av.append_account_test(&create_test_account(10)).unwrap(); let offset_1 = { let mut executable_account = create_test_account(10); @@ -2043,7 +2018,12 @@ pub mod tests { let data_len: usize = 2 * PAGE_SIZE; let account = create_test_account_with(data_len); // wrap AppendVec in ManuallyDrop to ensure we do not remove the backing file when dropped - let av = ManuallyDrop::new(AppendVec::new(path, true, aligned_stored_size(data_len))); + let av = ManuallyDrop::new(AppendVec::new( + path, + true, + aligned_stored_size(data_len), + storage_access, + )); av.append_account_test(&account).unwrap(); av.flush().unwrap(); } @@ -2083,7 +2063,8 @@ pub mod tests { let temp_file = get_append_vec_path("test_get_account_sizes"); let account_offsets = { - let append_vec = AppendVec::new(&temp_file.path, true, total_stored_size); + let append_vec = + AppendVec::new(&temp_file.path, true, total_stored_size, storage_access); // wrap AppendVec in ManuallyDrop to ensure we do not remove the backing file when dropped let append_vec = ManuallyDrop::new(append_vec); let slot = 77; // the specific slot does not matter @@ -2103,7 +2084,7 @@ pub mod tests { let account_sizes = append_vec .get_account_data_lens(account_offsets.as_slice()) .iter() - .map(|len| append_vec.calculate_stored_size(*len)) + .map(|len| AppendVec::calculate_stored_size(*len)) .sum::(); assert_eq!(account_sizes, total_stored_size); } @@ -2138,8 +2119,12 @@ pub mod tests { let temp_file = get_append_vec_path("test_scan"); let account_offsets = { // wrap AppendVec in ManuallyDrop to ensure we do not remove the backing file when dropped - let append_vec = - ManuallyDrop::new(AppendVec::new(&temp_file.path, true, total_stored_size)); + let append_vec = ManuallyDrop::new(AppendVec::new( + &temp_file.path, + true, + total_stored_size, + storage_access, + )); let slot = 42; // the specific slot does not matter let storable_accounts: Vec<_> = std::iter::zip(&pubkeys, &accounts).collect(); let stored_accounts_info = append_vec @@ -2350,12 +2335,11 @@ pub mod tests { // Test to make sure that `is_dirty` is tracked properly // * `reopen_as_readonly()` moves `is_dirty` // * `flush()` clears `is_dirty` - #[test_case(false)] - #[test_case(true)] - fn test_is_dirty(begins_dirty: bool) { + #[test_matrix([false, true], [StorageAccess::Mmap, StorageAccess::File])] + fn test_is_dirty(begins_dirty: bool, storage_access: StorageAccess) { let file = get_append_vec_path("test_is_dirty"); - let mut av1 = AppendVec::new(&file.path, true, 1024 * 1024); + let mut av1 = AppendVec::new(&file.path, true, 1024 * 1024, storage_access); // don't delete the file when the AppendVec is dropped (let TempFile do it) *av1.remove_file_on_drop.get_mut() = false; @@ -2367,7 +2351,7 @@ pub mod tests { } assert_eq!(*av1.is_dirty.get_mut(), begins_dirty); - let mut av2 = av1.reopen_as_readonly().unwrap(); + let mut av2 = av1.reopen_as_readonly_file_io().unwrap(); // don't delete the file when the AppendVec is dropped (let TempFile do it) *av2.remove_file_on_drop.get_mut() = false; diff --git a/accounts-db/src/bucket_map_holder.rs b/accounts-db/src/bucket_map_holder.rs index 62fa2158f12165..918af01a4c71fc 100644 --- a/accounts-db/src/bucket_map_holder.rs +++ b/accounts-db/src/bucket_map_holder.rs @@ -140,13 +140,13 @@ impl + Into> BucketMapHolder return; } - // when age has incremented twice, we know that we have made it through scanning all bins since we started waiting, - // so we are then 'idle' - let end_age = self.current_age().wrapping_add(2); + let start_age = self.current_age(); loop { self.wait_dirty_or_aged .wait_timeout(Duration::from_millis(self.age_interval_ms())); - if end_age == self.current_age() { + // when age has incremented twice or more from the starting age, we know that we have + // made it through scanning all bins since we started waiting, so we are then 'idle' + if self.current_age().wrapping_sub(start_age) > 1 { break; } } @@ -477,7 +477,10 @@ pub mod tests { #[test] fn test_disk_index_enabled() { let bins = 1; - let config = AccountsIndexConfig::default(); + let config = AccountsIndexConfig { + index_limit_mb: IndexLimitMb::Minimal, + ..Default::default() + }; let test = BucketMapHolder::::new(bins, &config, 1); assert!(test.is_disk_index_enabled()); } diff --git a/accounts-db/src/bucket_map_holder_stats.rs b/accounts-db/src/bucket_map_holder_stats.rs index c9a78202c3eb5b..beccf637e85013 100644 --- a/accounts-db/src/bucket_map_holder_stats.rs +++ b/accounts-db/src/bucket_map_holder_stats.rs @@ -48,6 +48,7 @@ pub struct BucketMapHolderStats { pub bg_waiting_us: AtomicU64, pub bg_throttling_wait_us: AtomicU64, pub count_in_mem: AtomicUsize, + pub capacity_in_mem: AtomicUsize, pub flush_entries_updated_on_disk: AtomicU64, pub flush_entries_evicted_from_mem: AtomicU64, pub active_threads: AtomicU64, @@ -102,6 +103,23 @@ impl BucketMapHolderStats { self.count_in_mem.fetch_sub(count, Ordering::Relaxed); } + /// Updates the 'in-mem capacity' stat, given a bin's pre and post values + pub fn update_in_mem_capacity(&self, pre: usize, post: usize) { + match post.cmp(&pre) { + std::cmp::Ordering::Equal => { + // nothing to do here + } + std::cmp::Ordering::Greater => { + self.capacity_in_mem + .fetch_add(post - pre, Ordering::Relaxed); + } + std::cmp::Ordering::Less => { + self.capacity_in_mem + .fetch_sub(pre - post, Ordering::Relaxed); + } + } + } + fn ms_per_age + Into>( &self, storage: &BucketMapHolder, @@ -203,6 +221,9 @@ impl BucketMapHolderStats { let startup = storage.get_startup(); let was_startup = self.last_was_startup.swap(startup, Ordering::Relaxed); + let count_in_mem = self.count_in_mem.load(Ordering::Relaxed); + let capacity_in_mem = self.capacity_in_mem.load(Ordering::Relaxed); + // sum of elapsed time in each thread let mut thread_time_elapsed_ms = elapsed_ms * storage.threads as u64; if storage.is_disk_index_enabled() { @@ -234,15 +255,20 @@ impl BucketMapHolderStats { ), ); } - let count_in_mem = self.count_in_mem.load(Ordering::Relaxed); + let held_in_mem_ref_count = self.held_in_mem.ref_count.swap(0, Ordering::Relaxed); let held_in_mem_slot_list_len = self.held_in_mem.slot_list_len.swap(0, Ordering::Relaxed); - // If an entry is held in-mem due to slot list length then it has (at least) two slot - // list entries. Since `approx_size_of_one_entry()` already includes the ref count & - // metadata sizes, only add in a second slot list entry. + // If an entry is held in-mem due to ref count or slot list length, + // then assume it has two slot list entries. + // Since `approx_size_of_one_entry()` assumes 'regular' entries + // (aka ref count == 1 and slot list len == 1), and the single slot list entry is + // stored inline in the slot list itself, then when we have larger slot lists, + // account for them here. let estimate_mem_bytes = count_in_mem * InMemAccountsIndex::::approx_size_of_one_entry() - + held_in_mem_slot_list_len as usize * size_of::<(Slot, T)>(); + + (held_in_mem_ref_count + held_in_mem_slot_list_len) as usize + * size_of::<(Slot, T)>() // <-- size of one slot list entry + * 2; // <-- and assume there are two entries datapoint_info!( if startup || was_startup { thread_time_elapsed_ms *= 2; // more threads are allocated during startup @@ -257,6 +283,7 @@ impl BucketMapHolderStats { i64 ), ("count_in_mem", count_in_mem, i64), + ("capacity_in_mem", capacity_in_mem, i64), ("count", self.total_count(), i64), ( "bg_waiting_percent", @@ -275,11 +302,7 @@ impl BucketMapHolderStats { f64 ), ("slot_list_len", held_in_mem_slot_list_len, i64), - ( - "ref_count", - self.held_in_mem.ref_count.swap(0, Ordering::Relaxed), - i64 - ), + ("ref_count", held_in_mem_ref_count, i64), ( "slot_list_cached", self.held_in_mem.slot_list_cached.swap(0, Ordering::Relaxed), @@ -542,11 +565,8 @@ impl BucketMapHolderStats { * InMemAccountsIndex::::approx_size_of_one_entry(), i64 ), - ( - "count_in_mem", - self.count_in_mem.load(Ordering::Relaxed), - i64 - ), + ("count_in_mem", count_in_mem, i64), + ("capacity_in_mem", capacity_in_mem, i64), ("count", self.total_count(), i64), ( "bg_waiting_percent", diff --git a/accounts-db/src/buffered_reader.rs b/accounts-db/src/buffered_reader.rs index 8f97ce87ce7831..5c8d6830b7f717 100644 --- a/accounts-db/src/buffered_reader.rs +++ b/accounts-db/src/buffered_reader.rs @@ -15,7 +15,7 @@ use { crate::file_io::{read_into_buffer, read_more_buffer}, std::{ fs::File, - io::{self, BufRead, BufReader}, + io::{self, BufRead}, mem::MaybeUninit, ops::Range, path::Path, @@ -67,7 +67,15 @@ impl Backing for Stack { /// An extension of the `BufRead` trait for file readers that allow tracking file /// read position offset. -pub(crate) trait FileBufRead: BufRead { +pub(crate) trait FileBufRead<'a>: BufRead { + /// Activate the given `file` as source of reads of this reader. + /// + /// Resets the internal buffer to an empty state and sets the file offset to 0. + /// + /// `read_limit` provides a pre-defined limit on the number of bytes that can be read + /// from the file (unless EOF is reached). + fn set_file(&mut self, file: &'a File, read_limit: usize) -> io::Result<()>; + /// Returns the current file offset corresponding to the start of the buffer /// that will be returned by the next call to `fill_buf`. /// @@ -97,6 +105,9 @@ pub(crate) trait RequiredLenBufRead: BufRead { fn fill_buf_required(&mut self, required_len: usize) -> io::Result<&[u8]>; } +pub(crate) trait RequiredLenBufFileRead<'a>: RequiredLenBufRead + FileBufRead<'a> {} +impl<'a, T: RequiredLenBufRead + FileBufRead<'a>> RequiredLenBufFileRead<'a> for T {} + /// read a file a large buffer at a time and provide access to a slice in that buffer pub struct BufferedReader<'a, T> { /// when we are next asked to read from file, start at this offset @@ -110,26 +121,41 @@ pub struct BufferedReader<'a, T> { /// how many bytes are valid in the file. The file's len may be longer. file_len_valid: usize, /// reference to file handle - file: &'a File, + file: Option<&'a File>, } -impl<'a, T> BufferedReader<'a, T> { - /// `buffer_size`: how much to try to read at a time - /// `file_len_valid`: # bytes that are valid in the file, may be less than overall file len - /// `default_min_read_requirement`: make sure we always have this much data available if we're asked to read - pub fn new(backing: T, file_len_valid: usize, file: &'a File) -> Self { +impl<'a, T: Backing> BufferedReader<'a, T> { + pub fn new(backing: T) -> Self { Self { file_offset_of_next_read: 0, buf: backing, buf_valid_bytes: 0..0, file_last_offset: 0, - file_len_valid, - file, + file_len_valid: 0, + file: None, } } + + pub fn with_file(mut self, file: &'a File, read_limit: usize) -> Self { + self.do_set_file(file, read_limit); + self + } + + fn do_set_file(&mut self, file: &'a File, read_limit: usize) { + self.file = Some(file); + self.file_len_valid = read_limit; + self.file_last_offset = 0; + self.file_offset_of_next_read = 0; + self.buf_valid_bytes = 0..0; + } } -impl FileBufRead for BufferedReader<'_, T> { +impl<'a, T: Backing> FileBufRead<'a> for BufferedReader<'a, T> { + fn set_file(&mut self, file: &'a File, read_limit: usize) -> io::Result<()> { + self.do_set_file(file, read_limit); + Ok(()) + } + #[inline(always)] fn get_file_offset(&self) -> usize { if self.buf_valid_bytes.is_empty() { @@ -150,8 +176,11 @@ where // we haven't used all the bytes we read last time, so adjust the effective offset debug_assert!(self.buf_valid_bytes.len() <= self.file_offset_of_next_read); self.file_last_offset = self.file_offset_of_next_read - self.buf_valid_bytes.len(); + let Some(file) = &self.file else { + return Err(io::Error::new(io::ErrorKind::BrokenPipe, "no open file")); + }; read_more_buffer( - self.file, + file, self.file_len_valid, &mut self.file_offset_of_next_read, // SAFETY: `read_more_buffer` will only _write_ to uninitialized memory and lifetime is tied to self. @@ -167,10 +196,10 @@ where } } -impl<'a, const N: usize> BufferedReader<'a, Stack> { +impl BufferedReader<'_, Stack> { /// create a new buffered reader with a stack-allocated buffer - pub fn new_stack(file_len_valid: usize, file: &'a File) -> Self { - BufferedReader::new(Stack::new(), file_len_valid, file) + pub fn new_stack() -> Self { + BufferedReader::new(Stack::new()) } } @@ -191,8 +220,11 @@ impl io::Read for BufferedReader<'_, T> { } // Read directly from file into space still left in the buf. + let Some(file) = &self.file else { + return Err(io::Error::new(io::ErrorKind::BrokenPipe, "no open file")); + }; let bytes_read = read_into_buffer( - self.file, + file, self.file_len_valid, self.file_offset_of_next_read, buf, @@ -317,7 +349,12 @@ impl BufRead for BufReaderWithOverflow { } } -impl FileBufRead for BufReaderWithOverflow { +impl<'a, R: FileBufRead<'a>> FileBufRead<'a> for BufReaderWithOverflow { + fn set_file(&mut self, file: &'a File, read_limit: usize) -> io::Result<()> { + self.overflow_buf.clear(); + self.reader.set_file(file, read_limit) + } + fn get_file_offset(&self) -> usize { self.reader.get_file_offset() - self.overflow_buf.len() } @@ -366,21 +403,22 @@ impl RequiredLenBufRead for BufReaderWithOverflow { } /// Open file at `path` with buffering reader using `buf_size` memory and doing -/// read-ahead IO reads (if `io_uring` is supported by the host) -pub fn large_file_buf_reader( - path: impl AsRef, - buf_size: usize, -) -> io::Result> { +/// read-ahead IO reads (if `io_uring` is supported by the platform) +pub fn large_file_buf_reader(path: &Path, buf_size: usize) -> io::Result { #[cfg(target_os = "linux")] - if agave_io_uring::io_uring_supported() { - use crate::io_uring::sequential_file_reader::SequentialFileReader; + { + assert!(agave_io_uring::io_uring_supported()); + use crate::io_uring::sequential_file_reader::{SequentialFileReader, DEFAULT_READ_SIZE}; - return Ok(Box::new(SequentialFileReader::with_capacity( - buf_size, path, - )?)); + let buf_size = buf_size.max(DEFAULT_READ_SIZE); + SequentialFileReader::with_capacity(buf_size, path) + } + #[cfg(not(target_os = "linux"))] + { + use std::io::BufReader; + let file = File::open(path)?; + Ok(BufReader::with_capacity(buf_size, file)) } - let file = File::open(path)?; - Ok(Box::new(BufReader::with_capacity(buf_size, file))) } #[cfg(test)] @@ -411,7 +449,7 @@ mod tests { // First read 16 bytes to fill buffer let file_len_valid = 32; let default_min_read = 8; - let mut reader = BufferedReader::new(backing, file_len_valid, &sample_file); + let mut reader = BufferedReader::new(backing).with_file(&sample_file, file_len_valid); let offset = reader.get_file_offset(); let slice = ValidSlice::new(reader.fill_buf_required(default_min_read).unwrap()); let mut expected_offset = 0; @@ -472,7 +510,7 @@ mod tests { // First read 16 bytes to fill buffer let default_min_read_size = 8; - let mut reader = BufferedReader::new(backing, valid_len, &sample_file); + let mut reader = BufferedReader::new(backing).with_file(&sample_file, valid_len); let offset = reader.get_file_offset(); let slice = ValidSlice::new(reader.fill_buf_required(default_min_read_size).unwrap()); let mut expected_offset = 0; @@ -552,7 +590,7 @@ mod tests { // First read 16 bytes to fill buffer let file_len_valid = 32; let default_min_read_size = 8; - let mut reader = BufferedReader::new(backing, file_len_valid, &sample_file); + let mut reader = BufferedReader::new(backing).with_file(&sample_file, file_len_valid); let offset = reader.get_file_offset(); let slice = ValidSlice::new(reader.fill_buf_required(default_min_read_size).unwrap()); let mut expected_offset = 0; @@ -625,7 +663,7 @@ mod tests { // First read 16 bytes to fill buffer let valid_len = 32; let default_min_read = 8; - let mut reader = BufferedReader::new(backing, valid_len, &sample_file); + let mut reader = BufferedReader::new(backing).with_file(&sample_file, valid_len); let offset = reader.get_file_offset(); let slice = ValidSlice::new(reader.fill_buf_required(default_min_read).unwrap()); let mut expected_offset = 0; @@ -671,9 +709,8 @@ mod tests { let bytes = rand_bytes::(); sample_file.write_all(&bytes).unwrap(); - let file_len_valid = 32; let mut reader = BufReaderWithOverflow::new( - BufferedReader::new(backing, file_len_valid, &sample_file), + BufferedReader::new(backing).with_file(&sample_file, FILE_SIZE), 0, usize::MAX, ); @@ -719,7 +756,7 @@ mod tests { sample_file.write_all(&bytes).unwrap(); let mut reader = BufReaderWithOverflow::new( - BufferedReader::new(backing, FILE_SIZE, &sample_file), + BufferedReader::new(backing).with_file(&sample_file, FILE_SIZE), 0, 32, ); diff --git a/accounts-db/src/file_io.rs b/accounts-db/src/file_io.rs index 5bc15a57180983..27dcfea31ffcbb 100644 --- a/accounts-db/src/file_io.rs +++ b/accounts-db/src/file_io.rs @@ -49,6 +49,35 @@ fn arch_read_at(file: &File, buffer: &mut [u8], offset: u64) -> std::io::Result< file.seek_read(buffer, offset) } +#[cfg(unix)] +fn arch_write_at(file: &File, buffer: &[u8], offset: u64) -> io::Result { + use std::os::unix::prelude::FileExt; + file.write_at(buffer, offset) +} + +#[cfg(windows)] +fn arch_write_at(file: &File, buffer: &[u8], offset: u64) -> io::Result { + use std::os::windows::fs::FileExt; + // Note: as opposed to unix `write_at` this call will update the internal file offset, + // so all callers should consistently use the file only through this module + file.seek_write(buffer, offset) +} + +/// Write, starting at `offset`, the whole buffer to a file irrespective of the file's current length. +/// +/// After this operation file size may be extended and the file cursor may be moved (platform-dependent). +pub fn write_buffer_to_file(file: &File, mut buffer: &[u8], mut offset: u64) -> io::Result<()> { + while !buffer.is_empty() { + let wrote_len = arch_write_at(file, buffer, offset)?; + if wrote_len == 0 { + return Err(io::ErrorKind::WriteZero.into()); + } + buffer = &buffer[wrote_len..]; + offset += wrote_len as u64; + } + Ok(()) +} + /// Read, starting at `start_offset`, until `buffer` is full or we read past `valid_file_len`/eof. /// `valid_file_len` is # of valid bytes in the file. This may be <= file length. /// return # bytes read @@ -116,10 +145,13 @@ pub fn file_creator<'a>( ) -> io::Result> { #[cfg(target_os = "linux")] if agave_io_uring::io_uring_supported() { - use crate::io_uring::file_creator::IoUringFileCreator; + use crate::io_uring::file_creator::{IoUringFileCreator, DEFAULT_WRITE_SIZE}; - let io_uring_creator = IoUringFileCreator::with_buffer_capacity(buf_size, file_complete)?; - return Ok(Box::new(io_uring_creator)); + if buf_size >= DEFAULT_WRITE_SIZE { + let io_uring_creator = + IoUringFileCreator::with_buffer_capacity(buf_size, file_complete)?; + return Ok(Box::new(io_uring_creator)); + } } Ok(Box::new(SyncIoFileCreator::new(buf_size, file_complete))) } @@ -178,6 +210,20 @@ impl FileCreator for SyncIoFileCreator<'_> { } } +pub fn validate_memlock_limit_for_disk_io(required_size: usize) -> io::Result<()> { + #[cfg(target_os = "linux")] + { + // memory locked requirement is only necessary on linux where io_uring is used + use crate::io_uring::memory::adjust_ulimit_memlock; + adjust_ulimit_memlock(required_size) + } + #[cfg(not(target_os = "linux"))] + { + let _ = required_size; + Ok(()) + } +} + #[cfg(test)] mod tests { use { diff --git a/accounts-db/src/hardened_unpack.rs b/accounts-db/src/hardened_unpack.rs index 5b5c72aaf6c40f..6a9b84e75a9d6f 100644 --- a/accounts-db/src/hardened_unpack.rs +++ b/accounts-db/src/hardened_unpack.rs @@ -53,8 +53,6 @@ const MAX_GENESIS_ARCHIVE_UNPACKED_COUNT: u64 = 100; // - Large files: their data may accumulate in backlog buffers while waiting for file open // operations to complete. const MAX_UNPACK_WRITE_BUF_SIZE: usize = 512 * 1024 * 1024; -// Minimum for unpacking small archives - allows ~2-4 write-capacity-sized operations concurrently. -const MIN_UNPACK_WRITE_BUF_SIZE: usize = 2 * 1024 * 1024; fn checked_total_size_sum(total_size: u64, entry_size: u64, limit_size: u64) -> Result { trace!("checked_total_size_sum: {total_size} + {entry_size} < {limit_size}"); @@ -95,6 +93,7 @@ pub enum UnpackPath<'a> { fn unpack_archive<'a, A, C, D>( mut archive: Archive, + memlock_budget_size: usize, apparent_limit_size: u64, actual_limit_size: u64, limit_count: u64, @@ -113,11 +112,10 @@ where let mut total_entries = 0; let mut open_dirs = Vec::new(); - // Bound the buffer based on provided limit of unpacked data (buffering a fraction, - // e.g. 25%, of absolute maximum won't be necessary) - this works well for genesis, - // while normal case hit the UNPACK_WRITE_BUF_SIZE tuned for it prod snapshot archive. - let buf_size = (apparent_limit_size.div_ceil(4) as usize) - .clamp(MIN_UNPACK_WRITE_BUF_SIZE, MAX_UNPACK_WRITE_BUF_SIZE); + // Bound the buffer based on provided limit of unpacked data and input archive size + // (decompression multiplies content size, but buffering more than origin isn't necessary). + let buf_size = + (memlock_budget_size.min(actual_limit_size as usize)).min(MAX_UNPACK_WRITE_BUF_SIZE); let mut files_creator = file_creator(buf_size, file_path_processor)?; for entry in archive.entries()? { @@ -344,12 +342,14 @@ pub type UnpackedAppendVecMap = HashMap; /// Unpacks snapshot and collects AppendVec file names & paths pub fn unpack_snapshot( archive: Archive, + memlock_budget_size: usize, ledger_dir: &Path, account_paths: &[PathBuf], ) -> Result { let mut unpacked_append_vec_map = UnpackedAppendVecMap::new(); unpack_snapshot_with_processors( archive, + memlock_budget_size, ledger_dir, account_paths, |file, path| { @@ -364,12 +364,14 @@ pub fn unpack_snapshot( /// sends entry file paths through the `sender` channel pub fn streaming_unpack_snapshot( archive: Archive, + memlock_budget_size: usize, ledger_dir: &Path, account_paths: &[PathBuf], sender: &Sender, ) -> Result<()> { unpack_snapshot_with_processors( archive, + memlock_budget_size, ledger_dir, account_paths, |_, _| {}, @@ -387,6 +389,7 @@ pub fn streaming_unpack_snapshot( fn unpack_snapshot_with_processors( archive: Archive, + memlock_budget_size: usize, ledger_dir: &Path, account_paths: &[PathBuf], mut accounts_path_processor: F, @@ -401,6 +404,7 @@ where unpack_archive( archive, + memlock_budget_size, MAX_SNAPSHOT_ARCHIVE_UNPACKED_APPARENT_SIZE, MAX_SNAPSHOT_ARCHIVE_UNPACKED_ACTUAL_SIZE, MAX_SNAPSHOT_ARCHIVE_UNPACKED_COUNT, @@ -538,6 +542,7 @@ fn unpack_genesis( ) -> Result<()> { unpack_archive( archive, + 0, /* don't provide memlock budget (forces sync IO), since genesis archives are small */ max_genesis_archive_unpacked_size, max_genesis_archive_unpacked_size, MAX_GENESIS_ARCHIVE_UNPACKED_COUNT, @@ -816,7 +821,8 @@ mod tests { fn finalize_and_unpack_snapshot(archive: tar::Builder>) -> Result<()> { with_finalize_and_unpack(archive, |a, b| { - unpack_snapshot_with_processors(a, b, &[PathBuf::new()], |_, _| {}, |_| {}).map(|_| ()) + unpack_snapshot_with_processors(a, 256, b, &[PathBuf::new()], |_, _| {}, |_| {}) + .map(|_| ()) }) } @@ -1072,6 +1078,7 @@ mod tests { let result = with_finalize_and_unpack(archive, |ar, tmp| { unpack_snapshot_with_processors( ar, + 256, tmp, &[tmp.join("accounts_dest")], |_, _| {}, diff --git a/accounts-db/src/io_uring/file_creator.rs b/accounts-db/src/io_uring/file_creator.rs index 4a0d10926767dc..ab0017d1f55207 100644 --- a/accounts-db/src/io_uring/file_creator.rs +++ b/accounts-db/src/io_uring/file_creator.rs @@ -28,7 +28,7 @@ use { // Based on transfers seen with `dd bs=SIZE` for NVME drives: values >=64KiB are fine, // but usually peak around 256KiB-1MiB. Also compare with particular NVME parameters, e.g. // 32 pages (Maximum Data Transfer Size) * page size (MPSMIN = Memory Page Size) = 128KiB. -const DEFAULT_WRITE_SIZE: usize = 512 * 1024; +pub const DEFAULT_WRITE_SIZE: usize = 512 * 1024; // 99.9% of accounts storage files are < 8MiB type BacklogVec = SmallVec<[PendingWrite; 8 * 1024 * 1024 / DEFAULT_WRITE_SIZE]>; @@ -154,7 +154,7 @@ impl FileCreator for IoUringFileCreator<'_, B> { } impl IoUringFileCreator<'_, B> { - /// Schedule opening file at `path` with `mode` permissons. + /// Schedule opening file at `path` with `mode` permissions. /// /// Returns key that can be used for scheduling writes for it. fn open( @@ -336,8 +336,8 @@ impl FileCreatorStats { .checked_div(self.no_buf_count as usize) .unwrap_or_default(); log::info!( - "files creation stats - large buf headroom: {}, no buf count: {},\ - avg pending writes at no buf: {avg_writes_at_no_buf}", + "files creation stats - large buf headroom: {}, no buf count: {}, avg pending writes \ + at no buf: {avg_writes_at_no_buf}", self.large_buf_headroom_count, self.no_buf_count, ); diff --git a/accounts-db/src/io_uring/memory.rs b/accounts-db/src/io_uring/memory.rs index a218593f76a45a..ecaac6003bd13e 100644 --- a/accounts-db/src/io_uring/memory.rs +++ b/accounts-db/src/io_uring/memory.rs @@ -49,7 +49,7 @@ impl AsMut<[u8]> for LargeBuffer { } impl LargeBuffer { - /// Allocare memory buffer optimized for io_uring operations, i.e. + /// Allocate memory buffer optimized for io_uring operations, i.e. /// using HugeTable when it is available on the host. pub fn new(size: usize) -> Self { if size > PageAlignedMemory::page_size() { @@ -198,12 +198,11 @@ impl FixedIoBuffer { } } - /// Registed provided buffer as fixed buffer in `io_uring`. + /// Register provided buffer as fixed buffer in `io_uring`. pub unsafe fn register>( buffer: &mut [u8], ring: &Ring, ) -> io::Result<()> { - adjust_ulimit_memlock(buffer.len())?; let iovecs = buffer .chunks(FIXED_BUFFER_LEN) .map(|buf| libc::iovec { @@ -221,11 +220,10 @@ impl AsRef<[u8]> for FixedIoBuffer { } } +/// Check kernel memory lock limit and increase it if necessary. +/// +/// Returns `Err` when current limit is below `min_required` and cannot be increased. pub fn adjust_ulimit_memlock(min_required: usize) -> io::Result<()> { - // This value reflects recommended memory lock limit documented in the validator's - // setup instructions at docs/src/operations/guides/validator-start.md - const DESIRED_MEMLOCK: u64 = 2_000_000_000; - fn get_memlock() -> libc::rlimit { let mut memlock = libc::rlimit { rlim_cur: 0, @@ -240,18 +238,17 @@ pub fn adjust_ulimit_memlock(min_required: usize) -> io::Result<()> { let mut memlock = get_memlock(); let current = memlock.rlim_cur as usize; if current < min_required { - memlock.rlim_cur = DESIRED_MEMLOCK; - memlock.rlim_max = DESIRED_MEMLOCK; + memlock.rlim_cur = min_required as u64; + memlock.rlim_max = min_required as u64; if unsafe { libc::setrlimit(libc::RLIMIT_MEMLOCK, &memlock) } != 0 { log::error!( - "Unable to increase the maximum memory lock limit to {} from {current}", - memlock.rlim_cur + "Unable to increase the maximum memory lock limit to {min_required} from {current}" ); if cfg!(target_os = "macos") { log::error!( - "On mac OS you may need to run \ - |sudo launchctl limit memlock {DESIRED_MEMLOCK} {DESIRED_MEMLOCK}| first" + "On mac OS you may need to run |sudo launchctl limit memlock {min_required} \ + {min_required}| first" ); } return Err(io::Error::new( diff --git a/accounts-db/src/io_uring/sequential_file_reader.rs b/accounts-db/src/io_uring/sequential_file_reader.rs index eaf6b64edf17a7..9a0d5d242a8b9b 100644 --- a/accounts-db/src/io_uring/sequential_file_reader.rs +++ b/accounts-db/src/io_uring/sequential_file_reader.rs @@ -20,7 +20,7 @@ use { // Based on transfers seen with `dd bs=SIZE` for NVME drives: values >=64KiB are fine, // but peak at 1MiB. Also compare with particular NVME parameters, e.g. // 32 pages (Maximum Data Transfer Size) * page size (MPSMIN = Memory Page Size) = 128KiB. -const DEFAULT_READ_SIZE: usize = 1024 * 1024; +pub const DEFAULT_READ_SIZE: usize = 1024 * 1024; const SQPOLL_IDLE_TIMEOUT: u32 = 50; // For large file we don't really use workers as few regularly submitted requests get handled // within sqpoll thread. Allow some workers just in case, but limit them. @@ -88,10 +88,8 @@ impl> SequentialFileReader { ) -> io::Result { let buffer = backing_buffer.as_mut(); assert!(buffer.len() >= read_capacity, "buffer too small"); - assert!( - buffer.len() % read_capacity == 0, - "buffer size must be a multiple of read_capacity" - ); + let read_aligned_buf_len = buffer.len() / read_capacity * read_capacity; + let buffer = &mut buffer[..read_aligned_buf_len]; let file = OpenOptions::new() .read(true) @@ -423,7 +421,7 @@ mod tests { // Verify the contents for (i, byte) in all_read_data.iter().enumerate() { - assert_eq!(*byte, pattern[i % pattern.len()], "Mismatch - pos {}", i); + assert_eq!(*byte, pattern[i % pattern.len()], "Mismatch - pos {i}"); } } diff --git a/accounts-db/src/lib.rs b/accounts-db/src/lib.rs index b88c75990b5768..8d92686b537b60 100644 --- a/accounts-db/src/lib.rs +++ b/accounts-db/src/lib.rs @@ -42,10 +42,9 @@ pub mod stake_rewards; pub mod storable_accounts; pub mod tiered_storage; pub mod utils; -mod verify_accounts_hash_in_background; pub mod waitable_condvar; -pub use buffered_reader::large_file_buf_reader; +pub use {buffered_reader::large_file_buf_reader, file_io::validate_memlock_limit_for_disk_io}; #[macro_use] extern crate solana_metrics; diff --git a/accounts-db/src/read_only_accounts_cache.rs b/accounts-db/src/read_only_accounts_cache.rs index be56a0058c9f35..61d4916bd6851a 100644 --- a/accounts-db/src/read_only_accounts_cache.rs +++ b/accounts-db/src/read_only_accounts_cache.rs @@ -27,7 +27,7 @@ use { #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] const CACHE_ENTRY_SIZE: usize = - std::mem::size_of::() + 2 * std::mem::size_of::(); + size_of::() + size_of::(); type ReadOnlyCacheKey = Pubkey; diff --git a/accounts-db/src/sorted_storages.rs b/accounts-db/src/sorted_storages.rs index 9d0ea561f22db8..104f5032b172d2 100644 --- a/accounts-db/src/sorted_storages.rs +++ b/accounts-db/src/sorted_storages.rs @@ -196,7 +196,7 @@ mod tests { super::*, crate::{ accounts_db::{AccountStorageEntry, AccountsFileId}, - accounts_file::{AccountsFile, AccountsFileProvider}, + accounts_file::{AccountsFile, AccountsFileProvider, StorageAccess}, append_vec::AppendVec, }, std::sync::Arc, @@ -448,8 +448,14 @@ mod tests { id, size as u64, AccountsFileProvider::AppendVec, + StorageAccess::File, ); - let av = AccountsFile::AppendVec(AppendVec::new(&tf.path, true, 1024 * 1024)); + let av = AccountsFile::AppendVec(AppendVec::new( + &tf.path, + true, + 1024 * 1024, + StorageAccess::File, + )); data.accounts = av; Arc::new(data) diff --git a/accounts-db/src/storable_accounts.rs b/accounts-db/src/storable_accounts.rs index f41a9c3accf2b5..ea624419f82c55 100644 --- a/accounts-db/src/storable_accounts.rs +++ b/accounts-db/src/storable_accounts.rs @@ -708,6 +708,7 @@ pub mod tests { id, file_size, AccountsFileProvider::AppendVec, + db.storage_access(), ); let storage = Arc::new(data); db.storage.insert(slot, storage.clone()); diff --git a/accounts-db/src/tiered_storage/byte_block.rs b/accounts-db/src/tiered_storage/byte_block.rs index 7436ae7b9710c1..5a2a977caf54d6 100644 --- a/accounts-db/src/tiered_storage/byte_block.rs +++ b/accounts-db/src/tiered_storage/byte_block.rs @@ -391,7 +391,7 @@ mod tests { } #[test] - fn test_write_optionl_fields_raw_format() { + fn test_write_optional_fields_raw_format() { write_optional_fields(AccountBlockFormat::AlignedRaw); } diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index 8dc168434555e5..7a4a38b3818549 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -4,7 +4,7 @@ use { crate::{ account_info::{AccountInfo, Offset}, account_storage::stored_account_info::{StoredAccountInfo, StoredAccountInfoWithoutData}, - accounts_file::{MatchAccountOwnerError, StoredAccountsInfo}, + accounts_file::StoredAccountsInfo, tiered_storage::{ byte_block, file::{TieredReadableFile, TieredWritableFile}, @@ -24,10 +24,19 @@ use { solana_account::{AccountSharedData, ReadableAccount, WritableAccount}, solana_clock::Epoch, solana_pubkey::Pubkey, - solana_rent_collector::RENT_EXEMPT_RENT_EPOCH, std::{io::Write, option::Option, path::Path}, }; +/// When rent is collected from an exempt account, rent_epoch is set to this +/// value. The idea is to have a fixed, consistent value for rent_epoch for all accounts that do not collect rent. +/// This enables us to get rid of the field completely. +pub const RENT_EXEMPT_RENT_EPOCH: Epoch = Epoch::MAX; +#[cfg(test)] +static_assertions::const_assert_eq!( + RENT_EXEMPT_RENT_EPOCH, + solana_svm::rent_calculator::RENT_EXEMPT_RENT_EPOCH +); + pub const HOT_FORMAT: TieredStorageFormat = TieredStorageFormat { meta_entry_size: std::mem::size_of::(), account_meta_format: AccountMetaFormat::Hot, @@ -440,38 +449,6 @@ impl HotStorageReader { .get_owner_address(&self.mmap, &self.footer, owner_offset) } - /// Returns Ok(index_of_matching_owner) if the account owner at - /// `account_offset` is one of the pubkeys in `owners`. - /// - /// Returns Err(MatchAccountOwnerError::NoMatch) if the account has 0 - /// lamports or the owner is not one of the pubkeys in `owners`. - /// - /// Returns Err(MatchAccountOwnerError::UnableToLoad) if there is any internal - /// error that causes the data unable to load, including `account_offset` - /// causes a data overrun. - pub fn account_matches_owners( - &self, - account_offset: HotAccountOffset, - owners: &[Pubkey], - ) -> Result { - let account_meta = self - .get_account_meta_from_offset(account_offset) - .map_err(|_| MatchAccountOwnerError::UnableToLoad)?; - - if account_meta.lamports() == 0 { - Err(MatchAccountOwnerError::NoMatch) - } else { - let account_owner = self - .get_owner_address(account_meta.owner_offset()) - .map_err(|_| MatchAccountOwnerError::UnableToLoad)?; - - owners - .iter() - .position(|candidate| account_owner == candidate) - .ok_or(MatchAccountOwnerError::NoMatch) - } - } - /// Returns the size of the account block based on its account offset /// and index offset. /// @@ -620,7 +597,7 @@ impl HotStorageReader { /// Calculate the amount of storage required for an account with the passed /// in data_len - pub(crate) fn calculate_stored_size(&self, data_len: usize) -> usize { + pub(crate) fn calculate_stored_size(data_len: usize) -> usize { stored_size(data_len) } @@ -870,7 +847,7 @@ mod tests { }, assert_matches::assert_matches, memoffset::offset_of, - rand::{seq::SliceRandom, Rng}, + rand::Rng, solana_account::ReadableAccount, solana_clock::{Epoch, Slot}, solana_hash::Hash, @@ -1226,12 +1203,12 @@ mod tests { #[test] #[should_panic(expected = "would exceed accounts blocks offset boundary")] - fn test_get_acount_meta_from_offset_out_of_bounds() { + fn test_get_account_meta_from_offset_out_of_bounds() { // Generate a new temp path that is guaranteed to NOT already have a file. let temp_dir = TempDir::new().unwrap(); let path = temp_dir .path() - .join("test_get_acount_meta_from_offset_out_of_bounds"); + .join("test_get_account_meta_from_offset_out_of_bounds"); let footer = TieredStorageFooter { account_meta_format: AccountMetaFormat::Hot, @@ -1358,115 +1335,6 @@ mod tests { } } - #[test] - fn test_account_matches_owners() { - // Generate a new temp path that is guaranteed to NOT already have a file. - let temp_dir = TempDir::new().unwrap(); - let path = temp_dir.path().join("test_hot_storage_get_owner_address"); - const NUM_OWNERS: u32 = 10; - - let owner_addresses: Vec<_> = std::iter::repeat_with(Pubkey::new_unique) - .take(NUM_OWNERS as usize) - .collect(); - - const NUM_ACCOUNTS: u32 = 30; - let mut rng = rand::thread_rng(); - - let hot_account_metas: Vec<_> = std::iter::repeat_with({ - || { - HotAccountMeta::new() - .with_lamports(rng.gen_range(1..u64::MAX)) - .with_owner_offset(OwnerOffset(rng.gen_range(0..NUM_OWNERS))) - } - }) - .take(NUM_ACCOUNTS as usize) - .collect(); - let mut footer = TieredStorageFooter { - account_meta_format: AccountMetaFormat::Hot, - account_entry_count: NUM_ACCOUNTS, - owner_count: NUM_OWNERS, - ..TieredStorageFooter::default() - }; - let account_offsets: Vec<_>; - - { - let mut file = TieredWritableFile::new(&path).unwrap(); - let mut current_offset = 0; - - account_offsets = hot_account_metas - .iter() - .map(|meta| { - let prev_offset = current_offset; - current_offset += file.write_pod(meta).unwrap(); - HotAccountOffset::new(prev_offset).unwrap() - }) - .collect(); - footer.index_block_offset = current_offset as u64; - // Typically, the owners block is stored after index block, but - // since we don't write index block in this test, so we have - // the owners_block_offset set to the end of the accounts blocks. - footer.owners_block_offset = footer.index_block_offset; - - let mut owners_table = OwnersTable::default(); - owner_addresses.iter().for_each(|owner_address| { - owners_table.insert(owner_address); - }); - footer - .owners_block_format - .write_owners_block(&mut file, &owners_table) - .unwrap(); - - // while the test only focuses on account metas, writing a footer - // here is necessary to make it a valid tiered-storage file. - footer.write_footer_block(&mut file).unwrap(); - } - - let file = TieredReadableFile::new(&path).unwrap(); - let hot_storage = HotStorageReader::new(file).unwrap(); - - // First, verify whether we can find the expected owners. - let mut owner_candidates = owner_addresses.clone(); - owner_candidates.shuffle(&mut rng); - - for (account_offset, account_meta) in account_offsets.iter().zip(hot_account_metas.iter()) { - let index = hot_storage - .account_matches_owners(*account_offset, &owner_candidates) - .unwrap(); - assert_eq!( - owner_candidates[index], - owner_addresses[account_meta.owner_offset().0 as usize] - ); - } - - // Second, verify the MatchAccountOwnerError::NoMatch case - const NUM_UNMATCHED_OWNERS: usize = 20; - let unmatched_candidates: Vec<_> = std::iter::repeat_with(Pubkey::new_unique) - .take(NUM_UNMATCHED_OWNERS) - .collect(); - - for account_offset in account_offsets.iter() { - assert_eq!( - hot_storage.account_matches_owners(*account_offset, &unmatched_candidates), - Err(MatchAccountOwnerError::NoMatch) - ); - } - - // Thirdly, we mixed two candidates and make sure we still find the - // matched owner. - owner_candidates.extend(unmatched_candidates); - owner_candidates.shuffle(&mut rng); - - for (account_offset, account_meta) in account_offsets.iter().zip(hot_account_metas.iter()) { - let index = hot_storage - .account_matches_owners(*account_offset, &owner_candidates) - .unwrap(); - assert_eq!( - owner_candidates[index], - owner_addresses[account_meta.owner_offset().0 as usize] - ); - } - } - #[test] fn test_get_stored_account_without_data_callback() { const NUM_ACCOUNTS: usize = 20; diff --git a/accounts-db/src/tiered_storage/meta.rs b/accounts-db/src/tiered_storage/meta.rs index 66ce155fb52c1f..951bd915413e1a 100644 --- a/accounts-db/src/tiered_storage/meta.rs +++ b/accounts-db/src/tiered_storage/meta.rs @@ -27,7 +27,7 @@ const _: () = assert!(std::mem::size_of::() == 4); /// A trait that allows different implementations of the account meta that /// support different tiers of the accounts storage. pub trait TieredAccountMeta: Sized { - /// Constructs a TieredAcountMeta instance. + /// Constructs a TieredAccountMeta instance. fn new() -> Self; /// A builder function that initializes lamports. diff --git a/accounts-db/src/tiered_storage/readable.rs b/accounts-db/src/tiered_storage/readable.rs index 7b2aeb9cab094f..5da33c5515a35e 100644 --- a/accounts-db/src/tiered_storage/readable.rs +++ b/accounts-db/src/tiered_storage/readable.rs @@ -2,7 +2,6 @@ use { crate::{ account_info::Offset, account_storage::stored_account_info::{StoredAccountInfo, StoredAccountInfoWithoutData}, - accounts_file::MatchAccountOwnerError, tiered_storage::{ file::TieredReadableFile, footer::{AccountMetaFormat, TieredStorageFooter}, @@ -110,30 +109,6 @@ impl TieredStorageReader { } } - /// Returns Ok(index_of_matching_owner) if the account owner at - /// `account_offset` is one of the pubkeys in `owners`. - /// - /// Returns Err(MatchAccountOwnerError::NoMatch) if the account has 0 - /// lamports or the owner is not one of the pubkeys in `owners`. - /// - /// Returns Err(MatchAccountOwnerError::UnableToLoad) if there is any internal - /// error that causes the data unable to load, including `account_offset` - /// causes a data overrun. - pub fn account_matches_owners( - &self, - index_offset: IndexOffset, - owners: &[Pubkey], - ) -> Result { - match self { - Self::Hot(hot) => { - let account_offset = hot - .get_account_offset(index_offset) - .map_err(|_| MatchAccountOwnerError::UnableToLoad)?; - hot.account_matches_owners(account_offset, owners) - } - } - } - /// iterate over all pubkeys pub fn scan_pubkeys(&self, callback: impl FnMut(&Pubkey)) -> TieredStorageResult<()> { match self { @@ -178,7 +153,7 @@ impl TieredStorageReader { /// in data_len pub(crate) fn calculate_stored_size(&self, data_len: usize) -> usize { match self { - Self::Hot(hot) => hot.calculate_stored_size(data_len), + Self::Hot(_) => HotStorageReader::calculate_stored_size(data_len), } } diff --git a/accounts-db/src/tiered_storage/test_utils.rs b/accounts-db/src/tiered_storage/test_utils.rs index 67a44eeaefcf50..41df055264f8fe 100644 --- a/accounts-db/src/tiered_storage/test_utils.rs +++ b/accounts-db/src/tiered_storage/test_utils.rs @@ -2,10 +2,12 @@ //! Helper functions for TieredStorage tests use { super::footer::TieredStorageFooter, - crate::{account_storage::stored_account_info::StoredAccountInfo, append_vec::StoredMeta}, + crate::{ + account_storage::stored_account_info::StoredAccountInfo, append_vec::StoredMeta, + tiered_storage::hot::RENT_EXEMPT_RENT_EPOCH, + }, solana_account::{Account, AccountSharedData, ReadableAccount}, solana_pubkey::Pubkey, - solana_rent_collector::RENT_EXEMPT_RENT_EPOCH, }; /// Create a test account based on the specified seed. diff --git a/accounts-db/src/utils.rs b/accounts-db/src/utils.rs index 41231c7372a6c9..9582031a79b05e 100644 --- a/accounts-db/src/utils.rs +++ b/accounts-db/src/utils.rs @@ -138,7 +138,8 @@ pub fn move_and_async_delete_path(path: impl AsRef) { /// Removes a directory and all its contents. pub fn remove_dir_all(path: impl Into + AsRef) -> io::Result<()> { #[cfg(target_os = "linux")] - if io_uring_supported() { + { + assert!(io_uring_supported()); if let Ok(mut remover) = RingDirRemover::new() { return remover.remove_dir_all(path); } @@ -152,7 +153,8 @@ pub fn remove_dir_contents(path: impl AsRef) { let path = path.as_ref(); #[cfg(target_os = "linux")] - if io_uring_supported() { + { + assert!(io_uring_supported()); if let Ok(mut remover) = RingDirRemover::new() { if let Err(e) = remover.remove_dir_contents(path) { warn!("Failed to delete contents of '{}': {e}", path.display()); diff --git a/accounts-db/src/verify_accounts_hash_in_background.rs b/accounts-db/src/verify_accounts_hash_in_background.rs deleted file mode 100644 index e3ebc25d3744f0..00000000000000 --- a/accounts-db/src/verify_accounts_hash_in_background.rs +++ /dev/null @@ -1,154 +0,0 @@ -//! at startup, verify accounts hash in the background -use std::{ - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, Mutex, - }, - thread::JoinHandle, -}; - -#[derive(Debug)] -pub struct VerifyAccountsHashInBackground { - /// true when verification has completed or never had to run in background - pub verified: Arc, - /// thread doing verification - thread: Mutex>>, - /// set when background thread has completed - background_completed: Arc, -} - -impl Default for VerifyAccountsHashInBackground { - fn default() -> Self { - // initialize, expecting possible background verification to be started - Self { - // with default initialization, 'verified' is false - verified: Arc::new(AtomicBool::new(false)), - // no thread to start with - thread: Mutex::new(None::>), - background_completed: Arc::new(AtomicBool::new(false)), - } - } -} - -impl VerifyAccountsHashInBackground { - /// start the bg thread to do the verification - pub fn start(&self, start: impl FnOnce() -> JoinHandle) { - // note that we're not verified before - self.verified.store(false, Ordering::Release); - *self.thread.lock().unwrap() = Some(start()); - } - - /// notify that the bg process has completed - pub fn background_finished(&self) { - self.background_completed.store(true, Ordering::Release); - } - - /// notify that verification was completed successfully - /// This can occur because it completed in the background - /// or if the verification was run in the foreground. - pub fn verification_complete(&self) { - self.verified.store(true, Ordering::Release); - } - - /// join background thread. `panic` if verification failed. Otherwise, mark verification complete. - pub fn join_background_thread(&self) { - // just now completing - let mut lock = self.thread.lock().unwrap(); - if lock.is_none() { - return; // nothing to do - } - let result = lock.take().unwrap().join().unwrap(); - if !result { - panic!("initial background accounts hash verification failed: {result}"); - } - // we never have to check again - self.verification_complete(); - } - - /// return true if bg hash verification is complete - /// return false if bg hash verification has not completed yet - /// if hash verification failed, a panic will occur - pub fn check_complete(&self) -> bool { - if self.verified.load(Ordering::Acquire) { - // already completed - return true; - } - if !self.background_completed.load(Ordering::Acquire) { - false - } else { - // background thread has completed, so join the thread and panic if verify fails. - self.join_background_thread(); - true - } - } -} - -#[cfg(test)] -pub mod tests { - use {super::*, std::thread::Builder}; - - #[test] - fn test_default() { - let def = VerifyAccountsHashInBackground::default(); - assert!(!def.check_complete()); - assert!(!def.verified.load(Ordering::Acquire)); - assert!(def.thread.lock().unwrap().is_none()); - def.verification_complete(); - assert!(def.check_complete()); - } - - fn start_thread_and_return( - verify: &Arc, - result: bool, - action: impl FnOnce() + Send + 'static, - ) { - assert!(!verify.check_complete()); - let verify_ = Arc::clone(verify); - verify.start(|| { - Builder::new() - .name("solBgHashVerfy".to_string()) - .spawn(move || { - // should have been marked not complete before thread started - assert!(!verify_.check_complete()); - action(); - verify_.background_finished(); - result - }) - .unwrap() - }); - } - - #[test] - fn test_real() { - solana_logger::setup(); - let verify = Arc::new(VerifyAccountsHashInBackground::default()); - start_thread_and_return(&verify, true, || {}); - verify.join_background_thread(); - assert!(verify.check_complete()); - } - - #[test] - #[should_panic(expected = "initial background accounts hash verification failed")] - fn test_panic() { - let verify = Arc::new(VerifyAccountsHashInBackground::default()); - start_thread_and_return(&verify, false, || {}); - verify.join_background_thread(); - assert!(!verify.check_complete()); - } - - #[test] - fn test_long_running() { - solana_logger::setup(); - let verify = Arc::new(VerifyAccountsHashInBackground::default()); - let finish = Arc::new(AtomicBool::default()); - let finish_ = finish.clone(); - start_thread_and_return(&verify, true, move || { - // busy wait until atomic is set - while !finish_.load(Ordering::Relaxed) {} - }); - assert!(!verify.check_complete()); - finish.store(true, Ordering::Relaxed); - verify.join_background_thread(); - assert!(verify.check_complete()); - } -} diff --git a/accounts-db/store-tool/src/main.rs b/accounts-db/store-tool/src/main.rs index 231be8909a092c..6bc8ba23be04b0 100644 --- a/accounts-db/store-tool/src/main.rs +++ b/accounts-db/store-tool/src/main.rs @@ -137,28 +137,31 @@ fn do_inspect(file: impl AsRef, verbose: bool) -> Result<(), String> { let mut num_accounts = Saturating(0usize); let mut stored_accounts_size = Saturating(0); let mut lamports = Saturating(0); - storage.scan_accounts_stored_meta(|account| { - if verbose { - println!("{account:?}"); - } else { - println!( - "{:#0offset_width$x}: {:44}, owner: {:44}, data size: {:data_size_width$}, lamports: {}", - account.offset(), - account.pubkey().to_string(), - account.owner().to_string(), - account.data_len(), - account.lamports(), - ); - } - num_accounts += 1; - stored_accounts_size += account.stored_size(); - lamports += account.lamports(); - }).map_err(|err| { - format!( - "failed to scan accounts in file '{}': {err}", - file.as_ref().display(), - ) - })?; + storage + .scan_accounts_stored_meta(|account| { + if verbose { + println!("{account:?}"); + } else { + println!( + "{:#0offset_width$x}: {:44}, owner: {:44}, data size: {:data_size_width$}, \ + lamports: {}", + account.offset(), + account.pubkey().to_string(), + account.owner().to_string(), + account.data_len(), + account.lamports(), + ); + } + num_accounts += 1; + stored_accounts_size += account.stored_size(); + lamports += account.lamports(); + }) + .map_err(|err| { + format!( + "failed to scan accounts in file '{}': {err}", + file.as_ref().display(), + ) + })?; println!( "number of accounts: {}, stored accounts size: {}, file size: {}, lamports: {}", @@ -198,19 +201,20 @@ fn do_search( let file_size = match fs::metadata(file) { Ok(metadata) => metadata.len() as usize, Err(err) => { - eprintln!( - "failed to get storage metadata '{}': {err}", - file.display(), - ); + eprintln!("failed to get storage metadata '{}': {err}", file.display(),); return; } }; - let Ok((storage, _size)) = AccountsFile::new_from_file(file, file_size, StorageAccess::default()).inspect_err(|err| { - eprintln!( - "failed to open account storage file '{}': {err}", - file.display(), + let Ok((storage, _size)) = + AccountsFile::new_from_file(file, file_size, StorageAccess::default()).inspect_err( + |err| { + eprintln!( + "failed to open account storage file '{}': {err}", + file.display(), + ) + }, ) - }) else { + else { return; }; // By default, when the storage is dropped, the backing file will be removed. @@ -218,24 +222,31 @@ fn do_search( let storage = ManuallyDrop::new(storage); let file_name = Path::new(file.file_name().expect("path is a file")); - storage.scan_accounts_stored_meta(|account| { - if addresses.contains(account.pubkey()) { - if verbose { - println!("storage: {}, {account:?}", file_name.display()); - } else { - println!( - "storage: {}, offset: {}, pubkey: {}, owner: {}, data size: {}, lamports: {}", - file_name.display(), - account.offset(), - account.pubkey(), - account.owner(), - account.data_len(), - account.lamports(), - ); + storage + .scan_accounts_stored_meta(|account| { + if addresses.contains(account.pubkey()) { + if verbose { + println!("storage: {}, {account:?}", file_name.display()); + } else { + println!( + "storage: {}, offset: {}, pubkey: {}, owner: {}, data size: {}, \ + lamports: {}", + file_name.display(), + account.offset(), + account.pubkey(), + account.owner(), + account.data_len(), + account.lamports(), + ); + } } - } - }).unwrap_or_else(|err| eprintln!("failed to scan accounts in file '{}': {err}", - file.display())); + }) + .unwrap_or_else(|err| { + eprintln!( + "failed to scan accounts in file '{}': {err}", + file.display() + ) + }); }); Ok(()) diff --git a/banking-bench/src/main.rs b/banking-bench/src/main.rs index c91ccc837ee5f0..dc7d3418e83e91 100644 --- a/banking-bench/src/main.rs +++ b/banking-bench/src/main.rs @@ -36,6 +36,7 @@ use { solana_time_utils::timestamp, solana_transaction::Transaction, std::{ + num::NonZeroUsize, sync::{atomic::Ordering, Arc, RwLock}, thread::sleep, time::{Duration, Instant}, @@ -287,6 +288,13 @@ fn main() { .possible_values(BlockProductionMethod::cli_names()) .help(BlockProductionMethod::cli_message()), ) + .arg( + Arg::with_name("block_production_num_workers") + .long("block-production-num-workers") + .takes_value(true) + .value_name("NUMBER") + .help("Number of worker threads to use for block production"), + ) .arg( Arg::with_name("transaction_struct") .long("transaction-structure") @@ -295,12 +303,6 @@ fn main() { .possible_values(TransactionStructure::cli_names()) .help(TransactionStructure::cli_message()), ) - .arg( - Arg::new("num_banking_threads") - .long("num-banking-threads") - .takes_value(true) - .help("Number of threads to use in the banking stage"), - ) .arg( Arg::new("simulate_mint") .long("simulate-mint") @@ -319,12 +321,12 @@ fn main() { let block_production_method = matches .value_of_t::("block_production_method") .unwrap_or_default(); + let block_production_num_workers = matches + .value_of_t::("block_production_num_workers") + .unwrap_or_else(|_| BankingStage::default_num_workers()); let transaction_struct = matches .value_of_t::("transaction_struct") .unwrap_or_default(); - let num_banking_threads = matches - .value_of_t::("num_banking_threads") - .unwrap_or_else(|_| BankingStage::num_threads()); // a multiple of packet chunk duplicates to avoid races let num_chunks = matches.value_of_t::("num_chunks").unwrap_or(16); let packets_per_batch = matches @@ -333,7 +335,7 @@ fn main() { let iterations = matches.value_of_t::("iterations").unwrap_or(1000); let batches_per_iteration = matches .value_of_t::("batches_per_iteration") - .unwrap_or(BankingStage::num_threads() as usize); + .unwrap_or(BankingStage::default_num_workers().get()); let write_lock_contention = matches .value_of_t::("write_lock_contention") .unwrap_or(WriteLockContention::None); @@ -375,10 +377,7 @@ fn main() { .iter() .map(|packets_for_single_iteration| packets_for_single_iteration.transactions.len() as u64) .sum(); - info!( - "threads: {} txs: {}", - num_banking_threads, total_num_transactions - ); + info!("worker threads: {block_production_num_workers} txs: {total_num_transactions}"); // fund all the accounts all_packets.iter().for_each(|packets_for_single_iteration| { @@ -431,13 +430,19 @@ fn main() { Blockstore::open(ledger_path.path()).expect("Expected to be able to open database ledger"), ); let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); - let (exit, poh_recorder, transaction_recorder, poh_service, signal_receiver) = - create_test_recorder( - bank.clone(), - blockstore.clone(), - None, - Some(leader_schedule_cache), - ); + let ( + exit, + poh_recorder, + mut poh_controller, + transaction_recorder, + poh_service, + signal_receiver, + ) = create_test_recorder( + bank.clone(), + blockstore.clone(), + None, + Some(leader_schedule_cache), + ); let (banking_tracer, tracer_thread) = BankingTracer::new(matches.is_present("trace_banking").then_some(( &blockstore.banking_trace_path(), @@ -457,17 +462,17 @@ fn main() { let banking_stage = BankingStage::new_num_threads( block_production_method, transaction_struct, - &poh_recorder, + poh_recorder.clone(), transaction_recorder, non_vote_receiver, tpu_vote_receiver, gossip_vote_receiver, - num_banking_threads, + block_production_num_workers, None, replay_vote_sender, None, bank_forks.clone(), - &prioritization_fee_cache, + prioritization_fee_cache, ); // This is so that the signal_receiver does not go out of scope after the closure. @@ -481,7 +486,7 @@ fn main() { let collector = solana_pubkey::new_rand(); let mut total_sent = 0; for current_iteration_index in 0..iterations { - trace!("RUNNING ITERATION {}", current_iteration_index); + trace!("RUNNING ITERATION {current_iteration_index}"); let now = Instant::now(); let mut sent = 0; @@ -530,10 +535,9 @@ fn main() { tx_total_us += now.elapsed().as_micros() as u64; let mut poh_time = Measure::start("poh_time"); - poh_recorder - .write() - .unwrap() - .reset(bank.clone(), Some((bank.slot(), bank.slot() + 1))); + poh_controller + .reset_sync(bank.clone(), Some((bank.slot(), bank.slot() + 1))) + .unwrap(); poh_time.stop(); let mut new_bank_time = Measure::start("new_bank"); @@ -548,7 +552,7 @@ fn main() { assert_matches!(poh_recorder.read().unwrap().bank(), None); update_bank_forks_and_poh_recorder_for_new_tpu_bank( &bank_forks, - &poh_recorder, + &mut poh_controller, new_bank, ); bank = bank_forks.read().unwrap().working_bank_with_scheduler(); @@ -590,10 +594,18 @@ fn main() { .unwrap() .working_bank() .transaction_count(); - debug!("processed: {} base: {}", txs_processed, base_tx_count); + debug!("processed: {txs_processed} base: {base_tx_count}"); - eprintln!("[total_sent: {}, base_tx_count: {}, txs_processed: {}, txs_landed: {}, total_us: {}, tx_total_us: {}]", - total_sent, base_tx_count, txs_processed, (txs_processed - base_tx_count), total_us, tx_total_us); + eprintln!( + "[total_sent: {}, base_tx_count: {}, txs_processed: {}, txs_landed: {}, total_us: {}, \ + tx_total_us: {}]", + total_sent, + base_tx_count, + txs_processed, + (txs_processed - base_tx_count), + total_us, + tx_total_us + ); eprintln!( "{{'name': 'banking_bench_total', 'median': '{:.2}'}}", diff --git a/banks-client/src/lib.rs b/banks-client/src/lib.rs index 6068666f18f9fa..90432b08544474 100644 --- a/banks-client/src/lib.rs +++ b/banks-client/src/lib.rs @@ -25,7 +25,7 @@ use { solana_pubkey::Pubkey, solana_rent::Rent, solana_signature::Signature, - solana_sysvar::Sysvar, + solana_sysvar::SysvarSerialize, solana_transaction::versioned::VersionedTransaction, tarpc::{ client::{self, NewClient, RequestDispatch}, @@ -182,7 +182,7 @@ impl BanksClient { } /// Return the cluster Sysvar - pub async fn get_sysvar(&self) -> Result { + pub async fn get_sysvar(&self) -> Result { let sysvar = self .get_account(T::id()) .await? diff --git a/banks-server/src/banks_server.rs b/banks-server/src/banks_server.rs index c273e77f381e81..8cadef88582d49 100644 --- a/banks-server/src/banks_server.rs +++ b/banks-server/src/banks_server.rs @@ -191,6 +191,11 @@ fn simulate_transaction( loaded_accounts_data_size, return_data, inner_instructions, + fee: _, + pre_balances: _, + post_balances: _, + pre_token_balances: _, + post_token_balances: _, } = bank.simulate_transaction_unchecked(&sanitized_transaction, true); let simulation_details = TransactionSimulationDetails { diff --git a/bench-tps/Cargo.toml b/bench-tps/Cargo.toml index 2a67bee53e835b..5b6beb76b27eb8 100644 --- a/bench-tps/Cargo.toml +++ b/bench-tps/Cargo.toml @@ -35,7 +35,6 @@ solana-commitment-config = { workspace = true } solana-compute-budget-interface = { workspace = true } solana-connection-cache = { workspace = true } solana-core = { workspace = true, features = ["dev-context-only-utils"] } -solana-faucet = { workspace = true } solana-fee-calculator = { workspace = true } solana-genesis = { workspace = true } solana-genesis-config = { workspace = true } @@ -43,7 +42,7 @@ solana-gossip = { workspace = true } solana-hash = { workspace = true } solana-instruction = { workspace = true } solana-keypair = { workspace = true } -solana-logger = "=2.3.1" +solana-logger = "=3.0.0" solana-measure = { workspace = true } solana-message = { workspace = true } solana-metrics = { workspace = true } @@ -67,7 +66,7 @@ solana-tpu-client = { workspace = true } solana-transaction = { workspace = true } solana-transaction-status = { workspace = true } solana-version = { workspace = true } -spl-instruction-padding-interface = { version = "=0.1.0" } +spl-instruction-padding-interface = { version = "=1.0.0" } thiserror = { workspace = true } [target.'cfg(not(any(target_env = "msvc", target_os = "freebsd")))'.dependencies] @@ -76,6 +75,7 @@ jemallocator = { workspace = true } [dev-dependencies] agave-feature-set = { workspace = true } serial_test = { workspace = true } +solana-faucet = { workspace = true, features = ["dev-context-only-utils"] } solana-local-cluster = { workspace = true } solana-rent = { workspace = true } solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } diff --git a/bench-tps/src/bench.rs b/bench-tps/src/bench.rs index 16f8a7dbf8fe52..c22180b29b757b 100644 --- a/bench-tps/src/bench.rs +++ b/bench-tps/src/bench.rs @@ -229,7 +229,8 @@ where let bsps = (tx_count) as f64 / ns as f64; let nsps = ns as f64 / (tx_count) as f64; info!( - "Done. {:.2} thousand signatures per second, {:.2} us per signature, {} ms total time, {:?}", + "Done. {:.2} thousand signatures per second, {:.2} us per signature, {} ms total \ + time, {:?}", bsps * 1_000_000_f64, nsps / 1_000_f64, duration.as_millis(), @@ -266,14 +267,11 @@ where T: 'static + TpsClient + Send + Sync + ?Sized, { if target_slots_per_epoch != 0 { - info!( - "Waiting until epochs are {} slots long..", - target_slots_per_epoch - ); + info!("Waiting until epochs are {target_slots_per_epoch} slots long.."); loop { if let Ok(epoch_info) = client.get_epoch_info() { if epoch_info.slots_in_epoch >= target_slots_per_epoch { - info!("Done epoch_info: {:?}", epoch_info); + info!("Done epoch_info: {epoch_info:?}"); break; } info!( @@ -295,7 +293,7 @@ fn create_sampler_thread( where T: 'static + TpsClient + Send + Sync + ?Sized, { - info!("Sampling TPS every {} second...", sample_period); + info!("Sampling TPS every {sample_period} second..."); let maxes = maxes.clone(); let client = client.clone(); Builder::new() @@ -440,12 +438,12 @@ where match client.get_transaction_count() { Ok(count) => break count, Err(err) => { - info!("Couldn't get transaction count: {:?}", err); + info!("Couldn't get transaction count: {err:?}"); sleep(Duration::from_secs(1)); } } }; - info!("Initial transaction count {}", first_tx_count); + info!("Initial transaction count {first_tx_count}"); let exit_signal = Arc::new(AtomicBool::new(false)); @@ -516,28 +514,28 @@ where info!("Waiting for sampler threads..."); if let Err(err) = sample_thread.join() { - info!(" join() failed with: {:?}", err); + info!(" join() failed with: {err:?}"); } // join the tx send threads info!("Waiting for transmit threads..."); for t in sender_threads { if let Err(err) = t.join() { - info!(" join() failed with: {:?}", err); + info!(" join() failed with: {err:?}"); } } if let Some(blockhash_thread) = blockhash_thread { info!("Waiting for blockhash thread..."); if let Err(err) = blockhash_thread.join() { - info!(" join() failed with: {:?}", err); + info!(" join() failed with: {err:?}"); } } if let Some(log_transaction_service) = log_transaction_service { info!("Waiting for log_transaction_service thread..."); if let Err(err) = log_transaction_service.join() { - info!(" join() failed with: {:?}", err); + info!(" join() failed with: {err:?}"); } } @@ -560,7 +558,7 @@ where } fn metrics_submit_lamport_balance(lamport_balance: u64) { - info!("Token balance: {}", lamport_balance); + info!("Token balance: {lamport_balance}"); datapoint_info!( "bench-tps-lamport_balance", ("balance", lamport_balance, i64) @@ -700,7 +698,7 @@ fn get_nonce_accounts( return nonce_accounts; } Err(err) => { - info!("Couldn't get durable nonce account: {:?}", err); + info!("Couldn't get durable nonce account: {err:?}"); sleep(Duration::from_secs(1)); } } @@ -883,7 +881,7 @@ fn get_new_latest_blockhash( return Some(new_blockhash); } } - debug!("Got same blockhash ({:?}), will retry...", blockhash); + debug!("Got same blockhash ({blockhash:?}), will retry..."); // Retry ~twice during a slot sleep(Duration::from_millis(DEFAULT_MS_PER_SLOT / 2)); @@ -962,7 +960,7 @@ fn do_tx_transfers( if let Some(txs) = txs { shared_tx_thread_count.fetch_add(1, Ordering::Relaxed); let num_txs = txs.len(); - info!("Transferring 1 unit {} times...", num_txs); + info!("Transferring 1 unit {num_txs} times..."); let transfer_start = Instant::now(); let mut old_transactions = false; let mut min_timestamp = u64::MAX; @@ -1000,13 +998,16 @@ fn do_tx_transfers( sent_at: Utc::now(), compute_unit_prices, }) { - error!("Receiver has been dropped with error `{error}`, stop sending transactions."); + error!( + "Receiver has been dropped with error `{error}`, stop sending \ + transactions." + ); break 'thread_loop; } } if let Err(error) = client.send_batch(transactions) { - warn!("send_batch_sync in do_tx_transfers failed: {}", error); + warn!("send_batch_sync in do_tx_transfers failed: {error}"); } datapoint_info!( @@ -1084,10 +1085,7 @@ fn compute_and_report_stats( if total_maxes > 0.0 { let num_nodes_with_tps = maxes.read().unwrap().len() - nodes_with_zero_tps; let average_max = total_maxes / num_nodes_with_tps as f32; - info!( - "\nAverage max TPS: {:.2}, {} nodes had 0 TPS", - average_max, nodes_with_zero_tps - ); + info!("\nAverage max TPS: {average_max:.2}, {nodes_with_zero_tps} nodes had 0 TPS"); } let total_tx_send_count = total_tx_send_count as u64; @@ -1097,7 +1095,8 @@ fn compute_and_report_stats( 0.0 }; info!( - "\nHighest TPS: {:.2} sampling period {}s max transactions: {} clients: {} drop rate: {:.2}", + "\nHighest TPS: {:.2} sampling period {}s max transactions: {} clients: {} drop rate: \ + {:.2}", max_of_maxes, sample_period, max_tx_count, @@ -1121,7 +1120,7 @@ pub fn generate_and_fund_keypairs let rent = client.get_minimum_balance_for_rent_exemption(0)?; let lamports_per_account = lamports_per_account + rent; - info!("Creating {} keypairs...", keypair_count); + info!("Creating {keypair_count} keypairs..."); let (mut keypairs, extra) = generate_keypairs(funding_key, keypair_count as u64); fund_keypairs( client, @@ -1181,8 +1180,8 @@ pub fn fund_keypairs( let funding_key_balance = client.get_balance(&funding_key.pubkey()).unwrap_or(0); info!( - "Funding keypair balance: {} max_fee: {} lamports_per_account: {} extra: {} total: {}", - funding_key_balance, max_fee, lamports_per_account, extra, total + "Funding keypair balance: {funding_key_balance} max_fee: {max_fee} \ + lamports_per_account: {lamports_per_account} extra: {extra} total: {total}" ); if funding_key_balance < total + rent { @@ -1227,7 +1226,7 @@ mod tests { solana_commitment_config::CommitmentConfig, solana_fee_calculator::FeeRateGovernor, solana_genesis_config::{create_genesis_config, GenesisConfig}, - solana_native_token::sol_to_lamports, + solana_native_token::LAMPORTS_PER_SOL, solana_nonce::state::State, solana_runtime::{bank::Bank, bank_client::BankClient, bank_forks::BankForks}, }; @@ -1242,7 +1241,7 @@ mod tests { #[test] fn test_bench_tps_bank_client() { - let (genesis_config, id) = create_genesis_config(sol_to_lamports(10_000.0)); + let (genesis_config, id) = create_genesis_config(10_000 * LAMPORTS_PER_SOL); let (bank, _bank_forks) = bank_with_all_features(&genesis_config); let client = Arc::new(BankClient::new_shared(bank)); @@ -1263,7 +1262,7 @@ mod tests { #[test] fn test_bench_tps_fund_keys() { - let (genesis_config, id) = create_genesis_config(sol_to_lamports(10_000.0)); + let (genesis_config, id) = create_genesis_config(10_000 * LAMPORTS_PER_SOL); let (bank, _bank_forks) = bank_with_all_features(&genesis_config); let client = Arc::new(BankClient::new_shared(bank)); let keypair_count = 20; @@ -1286,7 +1285,7 @@ mod tests { #[test] fn test_bench_tps_fund_keys_with_fees() { - let (mut genesis_config, id) = create_genesis_config(sol_to_lamports(10_000.0)); + let (mut genesis_config, id) = create_genesis_config(10_000 * LAMPORTS_PER_SOL); let fee_rate_governor = FeeRateGovernor::new(11, 0); genesis_config.fee_rate_governor = fee_rate_governor; let (bank, _bank_forks) = bank_with_all_features(&genesis_config); @@ -1306,7 +1305,7 @@ mod tests { #[test] fn test_bench_tps_create_durable_nonce() { - let (genesis_config, id) = create_genesis_config(sol_to_lamports(10_000.0)); + let (genesis_config, id) = create_genesis_config(10_000 * LAMPORTS_PER_SOL); let (bank, _bank_forks) = bank_with_all_features(&genesis_config); let client = Arc::new(BankClient::new_shared(bank)); let keypair_count = 10; diff --git a/bench-tps/src/cli.rs b/bench-tps/src/cli.rs index 4f4044ef6cde48..5556388455bf7d 100644 --- a/bench-tps/src/cli.rs +++ b/bench-tps/src/cli.rs @@ -121,7 +121,8 @@ impl Default for Config { /// Defines and builds the CLI args for a run of the benchmark pub fn build_args<'a>(version: &'_ str) -> App<'a, '_> { - App::new(crate_name!()).about(crate_description!()) + App::new(crate_name!()) + .about(crate_description!()) .version(version) .arg({ let arg = Arg::with_name("config_file") @@ -146,8 +147,8 @@ pub fn build_args<'a>(version: &'_ str) -> App<'a, '_> { .global(true) .validator(is_url_or_moniker) .help( - "URL for Solana's JSON RPC or moniker (or their first letter): \ - [mainnet-beta, testnet, devnet, localhost]", + "URL for Solana's JSON RPC or moniker (or their first letter): [mainnet-beta, \ + testnet, devnet, localhost]", ), ) .arg( @@ -176,7 +177,6 @@ pub fn build_args<'a>(version: &'_ str) -> App<'a, '_> { .takes_value(true) .hidden(hidden_unless_forced()) .help("Deprecated. Use --authority instead"), - ) .arg( Arg::with_name("authority") @@ -184,7 +184,9 @@ pub fn build_args<'a>(version: &'_ str) -> App<'a, '_> { .long("authority") .value_name("PATH") .takes_value(true) - .help("File containing a client authority (keypair) to fund participating accounts"), + .help( + "File containing a client authority (keypair) to fund participating accounts", + ), ) .arg( Arg::with_name("threads") @@ -201,25 +203,24 @@ pub fn build_args<'a>(version: &'_ str) -> App<'a, '_> { .takes_value(true) .help("Seconds to run benchmark, then exit; default is forever"), ) - .arg( - Arg::with_name("sustained") - .long("sustained") - .help("Use sustained performance mode vs. peak mode. This overlaps the tx generation with transfers."), - ) + .arg(Arg::with_name("sustained").long("sustained").help( + "Use sustained performance mode vs. peak mode. This overlaps the tx generation with \ + transfers.", + )) .arg( Arg::with_name("tx_count") .long("tx-count") .alias("tx_count") .value_name("NUM") .takes_value(true) - .help("Number of transactions to send per batch") + .help("Number of transactions to send per batch"), ) .arg( Arg::with_name("keypair_multiplier") .long("keypair-multiplier") .value_name("NUM") .takes_value(true) - .help("Multiply by transaction count to determine number of keypairs to create") + .help("Multiply by transaction count to determine number of keypairs to create"), ) .arg( Arg::with_name("thread-batch-sleep-ms") @@ -249,8 +250,8 @@ pub fn build_args<'a>(version: &'_ str) -> App<'a, '_> { .value_name("LAMPORTS") .takes_value(true) .help( - "The cost in lamports that the cluster will charge for signature \ - verification when the cluster is operating at target-signatures-per-slot", + "The cost in lamports that the cluster will charge for signature verification \ + when the cluster is operating at target-signatures-per-slot", ), ) .arg( @@ -258,51 +259,53 @@ pub fn build_args<'a>(version: &'_ str) -> App<'a, '_> { .long("num-lamports-per-account") .value_name("LAMPORTS") .takes_value(true) - .help( - "Number of lamports per account.", - ), + .help("Number of lamports per account."), ) .arg( Arg::with_name("target_slots_per_epoch") .long("target-slots-per-epoch") .value_name("SLOTS") .takes_value(true) - .help( - "Wait until epochs are this many slots long.", - ), + .help("Wait until epochs are this many slots long."), ) .arg( Arg::with_name("rpc_client") .long("use-rpc-client") .conflicts_with("tpu_client") .takes_value(false) - .help("Submit transactions with a RpcClient") + .help("Submit transactions with a RpcClient"), ) .arg( Arg::with_name("tpu_client") .long("use-tpu-client") .conflicts_with("rpc_client") .takes_value(false) - .help("Submit transactions with a TpuClient") + .help("Submit transactions with a TpuClient"), ) .arg( Arg::with_name("tpu_disable_quic") .long("tpu-disable-quic") .takes_value(false) - .help("DEPRECATED: Do not submit transactions via QUIC; only affects TpuClient (default) sends"), + .help( + "DEPRECATED: Do not submit transactions via QUIC; only affects TpuClient \ + (default) sends", + ), ) .arg( Arg::with_name("tpu_connection_pool_size") .long("tpu-connection-pool-size") .takes_value(true) - .help("Controls the connection pool size per remote address; only affects TpuClient (default) sends"), + .help( + "Controls the connection pool size per remote address; only affects TpuClient \ + (default) sends", + ), ) .arg( Arg::with_name("compute_unit_price") - .long("compute-unit-price") - .takes_value(true) - .validator(|s| is_within_range(s, 0..)) - .help("Sets constant compute-unit-price to transfer transactions"), + .long("compute-unit-price") + .takes_value(true) + .validator(|s| is_within_range(s, 0..)) + .help("Sets constant compute-unit-price to transfer transactions"), ) .arg( Arg::with_name("use_randomized_compute_unit_price") @@ -329,20 +332,29 @@ pub fn build_args<'a>(version: &'_ str) -> App<'a, '_> { .requires("instruction_padding_data_size") .takes_value(true) .value_name("PUBKEY") - .help("If instruction data is padded, optionally specify the padding program id to target"), + .help( + "If instruction data is padded, optionally specify the padding program id to \ + target", + ), ) .arg( Arg::with_name("instruction_padding_data_size") .long("instruction-padding-data-size") .takes_value(true) - .help("If set, wraps all instructions in the instruction padding program, with the given amount of padding bytes in instruction data."), + .help( + "If set, wraps all instructions in the instruction padding program, with the \ + given amount of padding bytes in instruction data.", + ), ) .arg( Arg::with_name("num_conflict_groups") .long("num-conflict-groups") .takes_value(true) .validator(|arg| is_within_range(arg, 1..)) - .help("The number of unique destination accounts per transactions 'chunk'. Lower values will result in more transaction conflicts.") + .help( + "The number of unique destination accounts per transactions 'chunk'. Lower \ + values will result in more transaction conflicts.", + ), ) .arg( Arg::with_name("bind_address") @@ -360,7 +372,10 @@ pub fn build_args<'a>(version: &'_ str) -> App<'a, '_> { .takes_value(true) .requires("json_rpc_url") .validator(is_keypair) - .help("File containing the node identity (keypair) of a validator with active stake. This allows communicating with network using staked connection"), + .help( + "File containing the node identity (keypair) of a validator with active \ + stake. This allows communicating with network using staked connection", + ), ) .arg( Arg::with_name("commitment_config") @@ -383,8 +398,8 @@ pub fn build_args<'a>(version: &'_ str) -> App<'a, '_> { .value_name("FILENAME") .takes_value(true) .help( - "File to save details about all the submitted transactions.\ - This option is useful for debug purposes." + "File to save details about all the submitted transactions.This option is \ + useful for debug purposes.", ), ) } diff --git a/bench-tps/src/keypairs.rs b/bench-tps/src/keypairs.rs index 12bb6fbc5df5a2..b278bc0e6198a8 100644 --- a/bench-tps/src/keypairs.rs +++ b/bench-tps/src/keypairs.rs @@ -25,7 +25,7 @@ where let path = Path::new(client_ids_and_stake_file); let file = File::open(path).unwrap(); - info!("Reading {}", client_ids_and_stake_file); + info!("Reading {client_ids_and_stake_file}"); let accounts: HashMap = serde_yaml::from_reader(file).unwrap(); let mut keypairs = vec![]; let mut last_balance = 0; @@ -34,7 +34,7 @@ where .into_iter() .for_each(|(keypair, primordial_account)| { let bytes: Vec = serde_json::from_str(keypair.as_str()).unwrap(); - keypairs.push(Keypair::from_bytes(&bytes).unwrap()); + keypairs.push(Keypair::try_from(bytes.as_ref()).unwrap()); last_balance = primordial_account.balance; }); diff --git a/bench-tps/src/log_transaction_service.rs b/bench-tps/src/log_transaction_service.rs index 5e54f4d80068bf..b3a021aed26207 100644 --- a/bench-tps/src/log_transaction_service.rs +++ b/bench-tps/src/log_transaction_service.rs @@ -89,7 +89,10 @@ impl LogTransactionService { Client: 'static + TpsClient + Send + Sync + ?Sized, { if !data_file_provided(block_data_file, transaction_data_file) { - panic!("Expect block-data-file or transaction-data-file is specified, must have been verified by callee."); + panic!( + "Expect block-data-file or transaction-data-file is specified, must have been \ + verified by callee." + ); } let client = client.clone(); @@ -123,8 +126,10 @@ impl LogTransactionService { }; let block_processing_timer_receiver = tick(Duration::from_millis(PROCESS_BLOCKS_EVERY_MS)); - let mut start_slot = get_slot_with_retry(&client, commitment) - .expect("get_slot_with_retry should have succeed, cannot proceed without having slot. Must be a problem with RPC."); + let mut start_slot = get_slot_with_retry(&client, commitment).expect( + "get_slot_with_retry should have succeed, cannot proceed without having slot. Must be \ + a problem with RPC.", + ); let mut sender_stopped = false; let mut signature_to_tx_info = MapSignatureToTxInfo::new(); diff --git a/bench-tps/src/main.rs b/bench-tps/src/main.rs index ae79e3458c7ae4..bc99e03fd149e5 100644 --- a/bench-tps/src/main.rs +++ b/bench-tps/src/main.rs @@ -47,7 +47,7 @@ fn find_node_activated_stake( ) -> Result<(u64, u64), ()> { let vote_accounts = rpc_client.get_vote_accounts(); if let Err(error) = vote_accounts { - error!("Failed to get vote accounts, error: {}", error); + error!("Failed to get vote accounts, error: {error}"); return Err(()); } @@ -207,7 +207,7 @@ fn main() { let keypair_count = *tx_count * keypair_multiplier; if *write_to_client_file { - info!("Generating {} keypairs", keypair_count); + info!("Generating {keypair_count} keypairs"); let (keypairs, _) = generate_keypairs(id, keypair_count as u64); let num_accounts = keypairs.len() as u64; let max_fee = FeeRateGovernor::new(*target_lamports_per_signature, 0) @@ -228,7 +228,7 @@ fn main() { ); }); - info!("Writing {}", client_ids_and_stake_file); + info!("Writing {client_ids_and_stake_file}"); let serialized = serde_yaml::to_string(&accounts).unwrap(); let path = Path::new(&client_ids_and_stake_file); let mut file = File::create(path).unwrap(); @@ -259,7 +259,12 @@ fn main() { ); client .get_account(&instruction_padding_config.program_id) - .expect("Instruction padding program must be deployed to this cluster. Deploy the program using `solana program deploy ./bench-tps/tests/fixtures/spl_instruction_padding.so` and pass the resulting program id with `--instruction-padding-program-id`"); + .expect( + "Instruction padding program must be deployed to this cluster. Deploy the program \ + using `solana program deploy \ + ./bench-tps/tests/fixtures/spl_instruction_padding.so` and pass the resulting \ + program id with `--instruction-padding-program-id`", + ); } let keypairs = get_keypairs( client.clone(), diff --git a/bench-tps/src/perf_utils.rs b/bench-tps/src/perf_utils.rs index 87ccbae997a36b..907d937c029423 100644 --- a/bench-tps/src/perf_utils.rs +++ b/bench-tps/src/perf_utils.rs @@ -45,7 +45,7 @@ pub fn sample_txs( let mut txs = match client.get_transaction_count_with_commitment(CommitmentConfig::processed()) { Err(e) => { - info!("Couldn't get transaction count {:?}", e); + info!("Couldn't get transaction count {e:?}"); sleep(Duration::from_secs(sample_period)); continue; } @@ -53,7 +53,7 @@ pub fn sample_txs( }; if txs < last_txs { - info!("Expected txs({}) >= last_txs({})", txs, last_txs); + info!("Expected txs({txs}) >= last_txs({last_txs})"); txs = last_txs; } total_txs = txs - initial_txs; diff --git a/bench-tps/src/send_batch.rs b/bench-tps/src/send_batch.rs index b40f43c6e02484..9c8092e8007df9 100644 --- a/bench-tps/src/send_batch.rs +++ b/bench-tps/src/send_batch.rs @@ -31,7 +31,7 @@ pub fn get_latest_blockhash(client: &T) -> Hash { match client.get_latest_blockhash() { Ok(blockhash) => return blockhash, Err(err) => { - info!("Couldn't get last blockhash: {:?}", err); + info!("Couldn't get last blockhash: {err:?}"); sleep(Duration::from_secs(1)); } }; @@ -110,7 +110,7 @@ pub fn generate_durable_nonce_accounts = authority_keypairs .iter() .zip(nonce_keypairs.iter()) @@ -155,7 +155,7 @@ fn verify_funding_transfer( for a in &tx.message().account_keys[1..] { match client.get_balance_with_commitment(a, CommitmentConfig::processed()) { Ok(balance) => return balance >= amount, - Err(err) => error!("failed to get balance {:?}", err), + Err(err) => error!("failed to get balance {err:?}"), } } false @@ -231,7 +231,7 @@ where // retry tries += 1; } - info!("transactions sent in {} tries", tries); + info!("transactions sent in {tries} tries"); } fn sign(&mut self, blockhash: Hash) { @@ -295,16 +295,16 @@ where if failed_verify > 100 && failed_verify > verified_txs { too_many_failures.store(true, Ordering::Relaxed); warn!( - "Too many failed transfers... {} remaining, {} verified, {} failures", - remaining_count, verified_txs, failed_verify + "Too many failed transfers... {remaining_count} remaining, \ + {verified_txs} verified, {failed_verify} failures" ); } if remaining_count > 0 { let mut time_l = time.lock().unwrap(); if time_l.elapsed().as_secs() > 2 { info!( - "Verifying transfers... {} remaining, {} verified, {} failures", - remaining_count, verified_txs, failed_verify + "Verifying transfers... {remaining_count} remaining, \ + {verified_txs} verified, {failed_verify} failures" ); *time_l = Instant::now(); } @@ -324,8 +324,8 @@ where let failed_verify = failed_verify.load(Ordering::Relaxed); let remaining_count = starting_txs.saturating_sub(verified_txs + failed_verify); info!( - "Verifying transfers... {} remaining, {} verified, {} failures", - remaining_count, verified_txs, failed_verify + "Verifying transfers... {remaining_count} remaining, {verified_txs} verified, \ + {failed_verify} failures" ); sleep(Duration::from_millis(100)); } diff --git a/bench-tps/tests/bench_tps.rs b/bench-tps/tests/bench_tps.rs index bb477686d8eb41..a47ea4f5c0f2d8 100644 --- a/bench-tps/tests/bench_tps.rs +++ b/bench-tps/tests/bench_tps.rs @@ -11,7 +11,7 @@ use { solana_commitment_config::CommitmentConfig, solana_connection_cache::connection_cache::NewConnectionConfig, solana_core::validator::ValidatorConfig, - solana_faucet::faucet::run_local_faucet, + solana_faucet::faucet::run_local_faucet_for_tests, solana_fee_calculator::FeeRateGovernor, solana_keypair::Keypair, solana_local_cluster::{ @@ -41,7 +41,6 @@ fn program_account(program_data: &[u8]) -> AccountSharedData { } fn test_bench_tps_local_cluster(config: Config) { - let native_instruction_processors = vec![]; let additional_accounts = vec![( spl_instruction_padding_interface::ID, program_account(include_bytes!("fixtures/spl_instruction_padding.so")), @@ -51,7 +50,11 @@ fn test_bench_tps_local_cluster(config: Config) { let faucet_keypair = Keypair::new(); let faucet_pubkey = faucet_keypair.pubkey(); - let faucet_addr = run_local_faucet(faucet_keypair, None); + let faucet_addr = run_local_faucet_for_tests( + faucet_keypair, + None, /* per_time_cap */ + 0, /* port */ + ); const NUM_NODES: usize = 1; let cluster = LocalCluster::new( @@ -68,7 +71,6 @@ fn test_bench_tps_local_cluster(config: Config) { }, NUM_NODES, ), - native_instruction_processors, additional_accounts, ..ClusterConfig::default() }, @@ -109,8 +111,11 @@ fn test_bench_tps_test_validator(config: Config) { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); - - let faucet_addr = run_local_faucet(mint_keypair, None); + let faucet_addr = run_local_faucet_for_tests( + mint_keypair, + None, /* per_time_cap */ + 0, /* port */ + ); let test_validator = TestValidatorGenesis::default() .fee_rate_governor(FeeRateGovernor::new(0, 0)) diff --git a/bench-vote/Cargo.toml b/bench-vote/Cargo.toml index f6c73ae97a94b4..b0460558f6ba1f 100644 --- a/bench-vote/Cargo.toml +++ b/bench-vote/Cargo.toml @@ -29,6 +29,7 @@ solana-streamer = { workspace = true } solana-transaction = { workspace = true } solana-version = { workspace = true } solana-vote-program = { workspace = true } +tokio-util = { workspace = true } [target.'cfg(not(any(target_env = "msvc", target_os = "freebsd")))'.dependencies] jemallocator = { workspace = true } diff --git a/bench-vote/src/main.rs b/bench-vote/src/main.rs index 9923edc1393875..488c75fe22230d 100644 --- a/bench-vote/src/main.rs +++ b/bench-vote/src/main.rs @@ -18,7 +18,7 @@ use { solana_streamer::{ packet::PacketBatchRecycler, quic::{ - spawn_server_multi, QuicServerParams, DEFAULT_MAX_QUIC_CONNECTIONS_PER_PEER, + spawn_server_with_cancel, QuicServerParams, DEFAULT_MAX_QUIC_CONNECTIONS_PER_PEER, DEFAULT_MAX_STAKED_CONNECTIONS, }, streamer::{receiver, PacketBatchReceiver, StakedNodes, StreamerReceiveStats}, @@ -36,6 +36,7 @@ use { thread::{self, spawn, JoinHandle, Result}, time::{Duration, Instant, SystemTime}, }, + tokio_util::sync::CancellationToken, }; #[cfg(not(any(target_env = "msvc", target_os = "freebsd")))] @@ -55,6 +56,7 @@ fn sink( ) -> JoinHandle<()> { spawn(move || { let mut last_report = Instant::now(); + let mut last_count = 0; while !exit.load(Ordering::Relaxed) { if let Ok(packet_batch) = receiver.recv_timeout(SINK_RECEIVE_TIMEOUT) { received_size.fetch_add(packet_batch.len(), Ordering::Relaxed); @@ -63,8 +65,11 @@ fn sink( let count = received_size.load(Ordering::Relaxed); if verbose && last_report.elapsed() > SINK_REPORT_INTERVAL { - println!("Received txns count: {count}"); + let change = count - last_count; + let rate = change as u64 / SINK_REPORT_INTERVAL.as_secs(); + println!("Received txns count: total: {count}, rate {rate}/s"); last_report = Instant::now(); + last_count = count; } } }) @@ -83,7 +88,10 @@ fn main() -> Result<()> { .value_name("KEYPAIR") .takes_value(true) .validator(is_keypair_or_ask_keyword) - .help("Identity keypair for the QUIC endpoint. If it is not specified a random key is created."), + .help( + "Identity keypair for the QUIC endpoint. If it is not specified a random key \ + is created.", + ), ) .arg( Arg::with_name("num-recv-sockets") @@ -118,7 +126,9 @@ fn main() -> Result<()> { .long("max-connections-per-ipaddr-per-min") .value_name("NUM") .takes_value(true) - .help("Maximum client connections per ipaddr per minute allowed on the server side."), + .help( + "Maximum client connections per ipaddr per minute allowed on the server side.", + ), ) .arg( Arg::with_name("connection-pool-size") @@ -147,7 +157,10 @@ fn main() -> Result<()> { .value_name("HOST:PORT") .takes_value(true) .validator(|arg| solana_net_utils::is_host_port(arg.to_string())) - .help("The destination streamer address to which the client will send transactions to"), + .help( + "The destination streamer address to which the client will send transactions \ + to", + ), ) .arg( Arg::with_name("use-connection-cache") @@ -232,8 +245,9 @@ fn main() -> Result<()> { } }); - let (exit, read_threads, sink_threads, destination) = if !client_only { + let (exit, cancel, read_threads, sink_threads, destination) = if !client_only { let exit = Arc::new(AtomicBool::new(false)); + let cancel = CancellationToken::new(); let mut read_channels = Vec::new(); let mut read_threads = Vec::new(); @@ -261,15 +275,15 @@ fn main() -> Result<()> { let (s_reader, r_reader) = unbounded(); read_channels.push(r_reader); - let server = spawn_server_multi( + let server = spawn_server_with_cancel( "solRcvrBenVote", "bench_vote_metrics", read_sockets, &quic_params.identity_keypair, s_reader, - exit.clone(), quic_params.staked_nodes.clone(), quic_server_params, + cancel.clone(), ) .unwrap(); read_threads.push(server.thread); @@ -304,12 +318,13 @@ fn main() -> Result<()> { println!("Running server at {destination:?}"); ( Some(exit), + Some(cancel), Some(read_threads), Some(sink_threads), destination, ) } else { - (None, None, None, destination.unwrap()) + (None, None, None, None, destination.unwrap()) }; let start = SystemTime::now(); @@ -332,6 +347,7 @@ fn main() -> Result<()> { if !server_only { if let Some(exit) = exit { exit.store(true, Ordering::Relaxed); + cancel.unwrap().cancel(); } } else { println!("To stop the server, please press ^C"); diff --git a/bucket_map/src/bucket_api.rs b/bucket_map/src/bucket_api.rs index e6969e2cd3ea75..77d27d1f183ab2 100644 --- a/bucket_map/src/bucket_api.rs +++ b/bucket_map/src/bucket_api.rs @@ -68,11 +68,11 @@ impl BucketApi { } /// Get the values for Pubkey `key` - pub fn read_value(&self, key: &Pubkey) -> Option<(Vec, RefCount)> { + pub fn read_value From<&'a [T]>>(&self, key: &Pubkey) -> Option<(C, RefCount)> { self.bucket.read().unwrap().as_ref().and_then(|bucket| { bucket .read_value(key) - .map(|(value, ref_count)| (value.to_vec(), ref_count)) + .map(|(value, ref_count)| (C::from(value), ref_count)) }) } diff --git a/bucket_map/src/bucket_map.rs b/bucket_map/src/bucket_map.rs index e4f67f632d4fb4..099b98113ea307 100644 --- a/bucket_map/src/bucket_map.rs +++ b/bucket_map/src/bucket_map.rs @@ -154,7 +154,7 @@ impl BucketMap { } /// Get the values for Pubkey `key` - pub fn read_value(&self, key: &Pubkey) -> Option<(Vec, RefCount)> { + pub fn read_value From<&'a [T]>>(&self, key: &Pubkey) -> Option<(C, RefCount)> { self.get_bucket(key).read_value(key) } @@ -236,12 +236,12 @@ mod tests { } else { let result = index.try_insert(&key, (&[0], 0)); assert!(result.is_err()); - assert_eq!(index.read_value(&key), None); + assert_eq!(index.read_value::>(&key), None); if pass == 2 { // another call to try insert again - should still return an error let result = index.try_insert(&key, (&[0], 0)); assert!(result.is_err()); - assert_eq!(index.read_value(&key), None); + assert_eq!(index.read_value::>(&key), None); } bucket.grow(result.unwrap_err()); let result = index.try_insert(&key, (&[0], 0)); @@ -298,13 +298,13 @@ mod tests { let index = BucketMap::new(config); for i in 0..10 { let key = Pubkey::new_unique(); - assert_eq!(index.read_value(&key), None); + assert_eq!(index.read_value::>(&key), None); index.update(&key, |_| Some((vec![i], 1))); assert_eq!(index.read_value(&key), Some((vec![i], 1))); index.delete_key(&key); - assert_eq!(index.read_value(&key), None); + assert_eq!(index.read_value::>(&key), None); index.update(&key, |_| Some((vec![i], 1))); assert_eq!(index.read_value(&key), Some((vec![i], 1))); @@ -318,13 +318,13 @@ mod tests { let index = BucketMap::new(config); for i in 0..100 { let key = Pubkey::new_unique(); - assert_eq!(index.read_value(&key), None); + assert_eq!(index.read_value::>(&key), None); index.update(&key, |_| Some((vec![i], 1))); assert_eq!(index.read_value(&key), Some((vec![i], 1))); index.delete_key(&key); - assert_eq!(index.read_value(&key), None); + assert_eq!(index.read_value::>(&key), None); index.update(&key, |_| Some((vec![i], 1))); assert_eq!(index.read_value(&key), Some((vec![i], 1))); @@ -379,7 +379,7 @@ mod tests { for k in 0..keys.len() { let key = &keys[k]; index.delete_key(key); - assert_eq!(index.read_value(key), None); + assert_eq!(index.read_value::>(key), None); for key in keys.iter().skip(k + 1) { let i = read_be_u64(key.as_ref()); assert_eq!(index.read_value(key), Some((vec![i], 1))); diff --git a/builtins-default-costs/Cargo.toml b/builtins-default-costs/Cargo.toml index f7f0e85cc02c56..a9716406407c62 100644 --- a/builtins-default-costs/Cargo.toml +++ b/builtins-default-costs/Cargo.toml @@ -19,12 +19,13 @@ name = "solana_builtins_default_costs" [features] frozen-abi = ["dep:solana-frozen-abi", "solana-vote-program/frozen-abi"] -dev-context-only-utils = [] +dev-context-only-utils = ["dep:qualifier_attr"] [dependencies] agave-feature-set = { workspace = true } ahash = { workspace = true } log = { workspace = true } +qualifier_attr = { workspace = true, optional = true } solana-bpf-loader-program = { workspace = true } solana-compute-budget-program = { workspace = true } solana-frozen-abi = { workspace = true, optional = true, features = [ diff --git a/builtins-default-costs/src/lib.rs b/builtins-default-costs/src/lib.rs index 216b7a869bec5a..fc74bc0c1f4836 100644 --- a/builtins-default-costs/src/lib.rs +++ b/builtins-default-costs/src/lib.rs @@ -1,16 +1,22 @@ #![cfg_attr(feature = "frozen-abi", feature(min_specialization))] #![allow(clippy::arithmetic_side_effects)] + +#[cfg(feature = "dev-context-only-utils")] +use qualifier_attr::field_qualifiers; use { - agave_feature_set::{self as feature_set}, ahash::AHashMap, solana_pubkey::Pubkey, solana_sdk_ids::{ bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, compute_budget, ed25519_program, - loader_v4, secp256k1_program, stake, system_program, vote, + loader_v4, secp256k1_program, system_program, vote, }, }; #[derive(Clone)] +#[cfg_attr( + feature = "dev-context-only-utils", + field_qualifiers(core_bpf_migration_feature(pub), position(pub)) +)] pub struct MigratingBuiltinCost { core_bpf_migration_feature: Pubkey, // encoding positional information explicitly for migration feature item, @@ -77,20 +83,16 @@ static BUILTIN_INSTRUCTION_COSTS: std::sync::LazyLock=1.0.0 - # need to solve this depentant tree: + # need to solve this dependant tree: # jsonrpc-core-client v18.0.0 -> jsonrpc-client-transports v18.0.0 -> url v1.7.2 -> idna v0.1.5 --ignore RUSTSEC-2024-0421 diff --git a/ci/docker/env.sh b/ci/docker/env.sh index a54c3620e2c28e..fec93a008f6f2e 100755 --- a/ci/docker/env.sh +++ b/ci/docker/env.sh @@ -13,7 +13,7 @@ fi export CI_DOCKER_ARG_BASE_IMAGE=ubuntu:22.04 export CI_DOCKER_ARG_RUST_VERSION="${rust_stable}" export CI_DOCKER_ARG_RUST_NIGHTLY_VERSION="${rust_nightly}" -export CI_DOCKER_ARG_NODE_MAJOR=18 +export CI_DOCKER_ARG_NODE_MAJOR=24 export CI_DOCKER_ARG_SCCACHE_VERSION=v0.9.1 export CI_DOCKER_ARG_GRCOV_VERSION=v0.8.18 diff --git a/ci/publish-tarball.sh b/ci/publish-tarball.sh index 3ec0a25de61fdb..e15bca255c25c0 100755 --- a/ci/publish-tarball.sh +++ b/ci/publish-tarball.sh @@ -3,19 +3,6 @@ set -e cd "$(dirname "$0")/.." -if [[ -n $APPVEYOR ]]; then - # Bootstrap rust build environment - source ci/env.sh - source ci/rust-version.sh - - appveyor DownloadFile https://win.rustup.rs/ -FileName rustup-init.exe - export USERPROFILE="D:\\" - ./rustup-init -yv --default-toolchain "$rust_stable" --default-host x86_64-pc-windows-msvc - export PATH="$PATH:/d/.cargo/bin" - rustc -vV - cargo -vV -fi - DRYRUN= if [[ -z $CI_BRANCH ]]; then DRYRUN="echo" @@ -91,10 +78,16 @@ echo --- Creating release tarball source ci/rust-version.sh stable scripts/cargo-install-all.sh stable "${RELEASE_BASENAME}" - tar cvf "${TARBALL_BASENAME}"-$TARGET.tar "${RELEASE_BASENAME}" - bzip2 "${TARBALL_BASENAME}"-$TARGET.tar - cp "${RELEASE_BASENAME}"/bin/agave-install-init agave-install-init-$TARGET - cp "${RELEASE_BASENAME}"/version.yml "${TARBALL_BASENAME}"-$TARGET.yml + source scripts/agave-build-lists.sh + tmp_excludes=$(mktemp) + for bin in "${AGAVE_BINS_VAL_OP[@]}"; do + find "${RELEASE_BASENAME}" -type f -name "$bin" -print -quit >> "$tmp_excludes" + done + + tar -I bzip2 -X "$tmp_excludes" -cvf "${TARBALL_BASENAME}"-"$TARGET".tar.bz2 "${RELEASE_BASENAME}" + + cp "${RELEASE_BASENAME}"/bin/agave-install-init agave-install-init-"$TARGET" + cp "${RELEASE_BASENAME}"/version.yml "${TARBALL_BASENAME}"-"$TARGET".yml ) # Maybe tarballs are platform agnostic, only publish them from the Linux build @@ -135,9 +128,6 @@ for file in "${TARBALL_BASENAME}"-$TARGET.tar.bz2 "${TARBALL_BASENAME}"-$TARGET. mkdir -p github-action-release-upload/ cp -v "$file" github-action-release-upload/ fi - elif [[ -n $APPVEYOR ]]; then - # Add artifacts for .appveyor.yml to upload - appveyor PushArtifact "$file" -FileName "$CHANNEL_OR_TAG"/"$file" fi done diff --git a/ci/run-sanity.sh b/ci/run-sanity.sh index 8edf698b294460..0805e1b04f7ffd 100755 --- a/ci/run-sanity.sh +++ b/ci/run-sanity.sh @@ -6,7 +6,7 @@ cd "$(dirname "$0")/.." source multinode-demo/common.sh if [[ -z $CI ]]; then - # Build eargerly if needed for local development. Otherwise, odd timing error occurs... + # Build eagerly if needed for local development. Otherwise, odd timing error occurs... $solana_keygen --version $solana_genesis --version $solana_faucet --version diff --git a/ci/test-abi.sh b/ci/test-abi.sh new file mode 100755 index 00000000000000..c725d8fc156f93 --- /dev/null +++ b/ci/test-abi.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +# +# Easily run the ABI tests for the entire repo or a subset +# + +here="$(dirname "$0")" +cargo="$(readlink -f "${here}/../cargo")" + +set -x +exec "$cargo" nightly test --features frozen-abi --lib -- test_abi_ --nocapture diff --git a/ci/test-bench.sh b/ci/test-bench.sh deleted file mode 100755 index c39d787323029b..00000000000000 --- a/ci/test-bench.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env bash -# To prevent usange of `./cargo` without `nightly` -# Introduce cargoNighlty and disable warning to use word splitting -# shellcheck disable=SC2086 -set -e -cd "$(dirname "$0")/.." - -source ci/_ -source ci/upload-ci-artifact.sh - -eval "$(ci/channel-info.sh)" - -cargoNightly="$(readlink -f "./cargo") nightly" - -set -o pipefail -export RUST_BACKTRACE=1 - -UPLOAD_METRICS="" -TARGET_BRANCH=$CI_BRANCH -if [[ -z $CI_BRANCH ]] || [[ -n $CI_PULL_REQUEST ]]; then - TARGET_BRANCH=$EDGE_CHANNEL -else - UPLOAD_METRICS="upload" -fi - -BENCH_FILE=bench_output.log -BENCH_ARTIFACT=current_bench_results.log - -# solana-keygen required when building C programs -_ cargo build --manifest-path=keygen/Cargo.toml -export PATH="$PWD/target/debug":$PATH - -# Clear the C dependency files, if dependency moves these files are not regenerated -test -d target/debug/sbf && find target/debug/sbf -name '*.d' -delete -test -d target/release/sbf && find target/release/sbf -name '*.d' -delete - -# Ensure all dependencies are built -_ $cargoNightly build --release - -# Remove "BENCH_FILE", if it exists so that the following commands can append -rm -f "$BENCH_FILE" - -# Run runtime benches -_ $cargoNightly bench --manifest-path runtime/Cargo.toml ${V:+--verbose} \ - -- -Z unstable-options --format=json | tee -a "$BENCH_FILE" - -# Run gossip benches -_ $cargoNightly bench --manifest-path gossip/Cargo.toml ${V:+--verbose} \ - -- -Z unstable-options --format=json | tee -a "$BENCH_FILE" - -# Run poh benches -_ $cargoNightly bench --manifest-path poh/Cargo.toml ${V:+--verbose} \ - -- -Z unstable-options --format=json | tee -a "$BENCH_FILE" - -# Run core benches -_ $cargoNightly bench --manifest-path core/Cargo.toml ${V:+--verbose} \ - -- -Z unstable-options --format=json | tee -a "$BENCH_FILE" - -# Run sbf benches -_ make -C programs/sbf all -_ $cargoNightly bench --manifest-path programs/sbf/Cargo.toml ${V:+--verbose} --features=sbf_c \ - -- -Z unstable-options --format=json --nocapture | tee -a "$BENCH_FILE" - -# Run banking/accounts bench. Doesn't require nightly, but use since it is already built. -_ $cargoNightly run --release --manifest-path banking-bench/Cargo.toml ${V:+--verbose} | tee -a "$BENCH_FILE" - -# `solana-upload-perf` disabled as it can take over 30 minutes to complete for some -# reason -exit 0 -_ $cargoNightly run --release --package solana-upload-perf \ - -- "$BENCH_FILE" "$TARGET_BRANCH" "$UPLOAD_METRICS" | tee "$BENCH_ARTIFACT" - -upload-ci-artifact "$BENCH_FILE" -upload-ci-artifact "$BENCH_ARTIFACT" diff --git a/ci/test-shuttle.sh b/ci/test-shuttle.sh index fbc607c0a48b93..c7a1dd821fd86d 100755 --- a/ci/test-shuttle.sh +++ b/ci/test-shuttle.sh @@ -4,4 +4,4 @@ set -eo pipefail source ci/_ -cargo nextest run --profile ci --manifest-path="svm/Cargo.toml" --features="shuttle-test" --test concurrent_tests --jobs 1 +cargo nextest run --profile ci --manifest-path="svm/Cargo.toml" --features="shuttle-test" --test concurrent_tests --release --jobs 1 diff --git a/ci/upload-ci-artifact.sh b/ci/upload-ci-artifact.sh index e7cc34ab2b2d8c..d8f1e1aa9db259 100644 --- a/ci/upload-ci-artifact.sh +++ b/ci/upload-ci-artifact.sh @@ -16,31 +16,6 @@ upload-ci-artifact() { fi } -upload-s3-artifact() { - echo "--- artifact: $1 to $2" - ( - args=( - --rm - --env AWS_ACCESS_KEY_ID - --env AWS_SECRET_ACCESS_KEY - --volume "$PWD:/solana" - - ) - if [[ $(uname -m) = arm64 ]]; then - # Ref: https://blog.jaimyn.dev/how-to-build-multi-architecture-docker-images-on-an-m1-mac/#tldr - args+=( - --platform linux/amd64 - ) - fi - args+=( - amazon/aws-cli:2.13.11 - s3 cp "$1" "$2" --acl public-read - ) - set -x - docker run "${args[@]}" - ) -} - upload-gcs-artifact() { echo "--- artifact: $1 to $2" docker run --rm \ diff --git a/clap-utils/src/compute_budget.rs b/clap-utils/src/compute_budget.rs index 24f64ec13b091f..0c5a7e3cd56f74 100644 --- a/clap-utils/src/compute_budget.rs +++ b/clap-utils/src/compute_budget.rs @@ -6,7 +6,8 @@ use { pub const COMPUTE_UNIT_PRICE_ARG: ArgConstant<'static> = ArgConstant { name: "compute_unit_price", long: "--with-compute-unit-price", - help: "Set compute unit price for transaction, in increments of 0.000001 lamports per compute unit.", + help: "Set compute unit price for transaction, in increments of 0.000001 lamports per compute \ + unit.", }; pub const COMPUTE_UNIT_LIMIT_ARG: ArgConstant<'static> = ArgConstant { @@ -43,4 +44,6 @@ pub enum ComputeUnitLimit { Static(u32), /// Simulate the transaction to find out the compute unit usage Simulated, + /// Simulate the transaction and add a small percentage to account for potential drift + SimulatedWithExtraPercentage(u8), } diff --git a/clap-utils/src/fee_payer.rs b/clap-utils/src/fee_payer.rs index ef21c31def8852..ea0cd363d1f2cd 100644 --- a/clap-utils/src/fee_payer.rs +++ b/clap-utils/src/fee_payer.rs @@ -6,9 +6,9 @@ use { pub const FEE_PAYER_ARG: ArgConstant<'static> = ArgConstant { name: "fee_payer", long: "fee-payer", - help: "Specify the fee-payer account. This may be a keypair file, the ASK keyword \n\ - or the pubkey of an offline signer, provided an appropriate --signer argument \n\ - is also passed. Defaults to the client keypair.", + help: "Specify the fee-payer account. This may be a keypair file, the ASK keyword or the \ + pubkey of an offline signer, provided an appropriate --signer argument is also passed. \ + Defaults to the client keypair.", }; pub fn fee_payer_arg<'a, 'b>() -> Arg<'a, 'b> { diff --git a/clap-utils/src/input_parsers.rs b/clap-utils/src/input_parsers.rs index 6ddd92b539ff6a..d720660d4bab30 100644 --- a/clap-utils/src/input_parsers.rs +++ b/clap-utils/src/input_parsers.rs @@ -198,7 +198,7 @@ pub fn lamports_of_sol(matches: &ArgMatches<'_>, name: &str) -> Option { let lamports = if lamports.is_empty() { 0 } else { - format!("{:0<9}", lamports)[..9].parse().ok()? + format!("{lamports:0<9}")[..9].parse().ok()? }; Some( LAMPORTS_PER_SOL @@ -386,9 +386,9 @@ mod tests { #[test] #[ignore = "historical reference; shows float behavior fixed in pull #4988"] fn test_lamports_of_sol_origin() { - use solana_native_token::sol_to_lamports; + use solana_native_token::sol_str_to_lamports; pub fn lamports_of_sol(matches: &ArgMatches<'_>, name: &str) -> Option { - value_of(matches, name).map(sol_to_lamports) + matches.value_of(name).and_then(sol_str_to_lamports) } let matches = app().get_matches_from(vec!["test", "--single", "50"]); diff --git a/clap-utils/src/keypair.rs b/clap-utils/src/keypair.rs index e44015d180a9ec..421d1a2df2ebe7 100644 --- a/clap-utils/src/keypair.rs +++ b/clap-utils/src/keypair.rs @@ -176,7 +176,8 @@ impl DefaultSigner { }) .map_err(|_| { std::io::Error::other(format!( - "No default signer found, run \"solana-keygen new -o {}\" to create a new one", + "No default signer found, run \"solana-keygen new -o {}\" to create a new \ + one", self.path )) })?; @@ -773,7 +774,8 @@ pub fn signer_from_path_with_config( } SignerSourceKind::Filepath(path) => match read_keypair_file(&path) { Err(e) => Err(std::io::Error::other(format!( - "could not read keypair file \"{path}\". Run \"solana-keygen new\" to create a keypair file: {e}" + "could not read keypair file \"{path}\". Run \"solana-keygen new\" to create a \ + keypair file: {e}" )) .into()), Ok(file) => Ok(Box::new(file)), @@ -807,9 +809,9 @@ pub fn signer_from_path_with_config( } else if config.allow_null_signer || matches.is_present(SIGN_ONLY_ARG.name) { Ok(Box::new(NullSigner::new(&pubkey))) } else { - Err(std::io::Error::other( - format!("missing signature for supplied pubkey: {pubkey}"), - ) + Err(std::io::Error::other(format!( + "missing signature for supplied pubkey: {pubkey}" + )) .into()) } } @@ -893,8 +895,8 @@ pub fn resolve_signer_from_path( } SignerSourceKind::Filepath(path) => match read_keypair_file(&path) { Err(e) => Err(std::io::Error::other(format!( - "could not read keypair file \"{path}\". \ - Run \"solana-keygen new\" to create a keypair file: {e}" + "could not read keypair file \"{path}\". Run \"solana-keygen new\" to create a \ + keypair file: {e}" )) .into()), Ok(_) => Ok(Some(path.to_string())), @@ -933,7 +935,8 @@ pub const ASK_KEYWORD: &str = "ASK"; pub const SKIP_SEED_PHRASE_VALIDATION_ARG: ArgConstant<'static> = ArgConstant { long: "skip-seed-phrase-validation", name: "skip_seed_phrase_validation", - help: "Skip validation of seed phrases. Use this if your phrase does not use the BIP39 official English word list", + help: "Skip validation of seed phrases. Use this if your phrase does not use the BIP39 \ + official English word list", }; /// Prompts user for a passphrase and then asks for confirmirmation to check for mistakes @@ -1012,8 +1015,8 @@ pub fn keypair_from_path( } SignerSourceKind::Filepath(path) => match read_keypair_file(&path) { Err(e) => Err(std::io::Error::other(format!( - "could not read keypair file \"{path}\". \ - Run \"solana-keygen new\" to create a keypair file: {e}" + "could not read keypair file \"{path}\". Run \"solana-keygen new\" to create a \ + keypair file: {e}" )) .into()), Ok(file) => Ok(file), @@ -1043,7 +1046,8 @@ pub fn keypair_from_seed_phrase( let seed_phrase = prompt_password(format!("[{keypair_name}] seed phrase: "))?; let seed_phrase = seed_phrase.trim(); let passphrase_prompt = format!( - "[{keypair_name}] If this seed phrase has an associated passphrase, enter it now. Otherwise, press ENTER to continue: ", + "[{keypair_name}] If this seed phrase has an associated passphrase, enter it now. \ + Otherwise, press ENTER to continue: ", ); let keypair = if skip_validation { diff --git a/clap-utils/src/nonce.rs b/clap-utils/src/nonce.rs index 514302e6f52e61..9c2e64c85672d1 100644 --- a/clap-utils/src/nonce.rs +++ b/clap-utils/src/nonce.rs @@ -9,7 +9,7 @@ pub const NONCE_ARG: ArgConstant<'static> = ArgConstant { help: "Provide the nonce account to use when creating a nonced \n\ transaction. Nonced transactions are useful when a transaction \n\ requires a lengthy signing process. Learn more about nonced \n\ - transactions at https://docs.solanalabs.com/cli/examples/durable-nonce", + transactions at https://docs.anza.xyz/cli/examples/durable-nonce", }; pub const NONCE_AUTHORITY_ARG: ArgConstant<'static> = ArgConstant { diff --git a/clap-v3-utils/src/compute_budget.rs b/clap-v3-utils/src/compute_budget.rs index b2f43f6851a8ea..f43c27f9c91dd5 100644 --- a/clap-v3-utils/src/compute_budget.rs +++ b/clap-v3-utils/src/compute_budget.rs @@ -6,7 +6,8 @@ use { pub const COMPUTE_UNIT_PRICE_ARG: ArgConstant<'static> = ArgConstant { name: "compute_unit_price", long: "--with-compute-unit-price", - help: "Set compute unit price for transaction, in increments of 0.000001 lamports per compute unit.", + help: "Set compute unit price for transaction, in increments of 0.000001 lamports per compute \ + unit.", }; pub const COMPUTE_UNIT_LIMIT_ARG: ArgConstant<'static> = ArgConstant { diff --git a/clap-v3-utils/src/fee_payer.rs b/clap-v3-utils/src/fee_payer.rs index 06be013c041e53..f598a110b7fc3b 100644 --- a/clap-v3-utils/src/fee_payer.rs +++ b/clap-v3-utils/src/fee_payer.rs @@ -6,9 +6,9 @@ use { pub const FEE_PAYER_ARG: ArgConstant<'static> = ArgConstant { name: "fee_payer", long: "fee-payer", - help: "Specify the fee-payer account. This may be a keypair file, the ASK keyword \n\ - or the pubkey of an offline signer, provided an appropriate --signer argument \n\ - is also passed. Defaults to the client keypair.", + help: "Specify the fee-payer account. This may be a keypair file, the ASK keyword or the \ + pubkey of an offline signer, provided an appropriate --signer argument is also passed. \ + Defaults to the client keypair.", }; #[allow(deprecated)] diff --git a/clap-v3-utils/src/input_parsers/mod.rs b/clap-v3-utils/src/input_parsers/mod.rs index a216cff3bb2260..07601cee984810 100644 --- a/clap-v3-utils/src/input_parsers/mod.rs +++ b/clap-v3-utils/src/input_parsers/mod.rs @@ -9,7 +9,7 @@ use { solana_cluster_type::ClusterType, solana_commitment_config::CommitmentConfig, solana_keypair::{read_keypair_file, Keypair}, - solana_native_token::sol_to_lamports, + solana_native_token::sol_str_to_lamports, solana_pubkey::{Pubkey, MAX_SEED_LEN}, solana_signer::Signer, std::str::FromStr, @@ -18,7 +18,8 @@ use { pub mod signer; #[deprecated( since = "1.17.0", - note = "Please use the functions in `solana_clap_v3_utils::input_parsers::signer` directly instead" + note = "Please use the functions in `solana_clap_v3_utils::input_parsers::signer` directly \ + instead" )] #[allow(deprecated)] pub use signer::{ @@ -80,7 +81,7 @@ pub fn unix_timestamp_from_rfc3339_datetime( )] #[allow(deprecated)] pub fn lamports_of_sol(matches: &ArgMatches, name: &str) -> Option { - value_of(matches, name).map(sol_to_lamports) + matches.value_of(name).and_then(sol_str_to_lamports) } #[deprecated( diff --git a/clap-v3-utils/src/input_validators.rs b/clap-v3-utils/src/input_validators.rs index c2d96d2e965740..0d15c43f2b4bdc 100644 --- a/clap-v3-utils/src/input_validators.rs +++ b/clap-v3-utils/src/input_validators.rs @@ -98,7 +98,9 @@ where // Return an error if a keypair file cannot be parsed #[deprecated( since = "1.18.0", - note = "please use `SignerSourceParserBuilder::default().allow_file_path().allow_prompt().allow_legacy().build()` instead" + note = "please use \ + `SignerSourceParserBuilder::default().allow_file_path().allow_prompt().allow_legacy().\ + build()` instead" )] pub fn is_keypair_or_ask_keyword(string: T) -> Result<(), String> where @@ -115,7 +117,8 @@ where // Return an error if a `SignerSourceKind::Prompt` cannot be parsed #[deprecated( since = "1.18.0", - note = "please use `SignerSourceParserBuilder::default().allow_prompt().allow_legacy().build()` instead" + note = "please use \ + `SignerSourceParserBuilder::default().allow_prompt().allow_legacy().build()` instead" )] pub fn is_prompt_signer_source(string: &str) -> Result<(), String> { if string == ASK_KEYWORD { @@ -135,7 +138,9 @@ pub fn is_prompt_signer_source(string: &str) -> Result<(), String> { // Return an error if string cannot be parsed as pubkey string or keypair file location #[deprecated( since = "1.18.0", - note = "please use `SignerSourceParserBuilder::default().allow_pubkey().allow_file_path().build()` instead" + note = "please use \ + `SignerSourceParserBuilder::default().allow_pubkey().allow_file_path().build()` \ + instead" )] #[allow(deprecated)] pub fn is_pubkey_or_keypair(string: T) -> Result<(), String> diff --git a/clap-v3-utils/src/keygen/derivation_path.rs b/clap-v3-utils/src/keygen/derivation_path.rs index 81a95693e1c968..460f25c64a00ee 100644 --- a/clap-v3-utils/src/keygen/derivation_path.rs +++ b/clap-v3-utils/src/keygen/derivation_path.rs @@ -13,9 +13,10 @@ pub fn derivation_path_arg<'a>() -> Arg<'a> { .takes_value(true) .min_values(0) .max_values(1) - .help("Derivation path. All indexes will be promoted to hardened. \ - If arg is not presented then derivation path will not be used. \ - If arg is presented with empty DERIVATION_PATH value then m/44'/501'/0'/0' will be used." + .help( + "Derivation path. All indexes will be promoted to hardened. If arg is not presented \ + then derivation path will not be used. If arg is presented with empty \ + DERIVATION_PATH value then m/44'/501'/0'/0' will be used.", ) } diff --git a/clap-v3-utils/src/keygen/mnemonic.rs b/clap-v3-utils/src/keygen/mnemonic.rs index f5eb032dd1cd64..441f0ee2a2c6ff 100644 --- a/clap-v3-utils/src/keygen/mnemonic.rs +++ b/clap-v3-utils/src/keygen/mnemonic.rs @@ -121,15 +121,17 @@ pub fn no_passphrase_and_message() -> (String, String) { pub fn acquire_passphrase_and_message( matches: &ArgMatches, ) -> Result<(String, String), Box> { + #[rustfmt::skip] + const PROMPT: &str = + "\nFor added security, enter a BIP39 passphrase\n\ + \nNOTE! This passphrase improves security of the recovery seed phrase NOT the\n\ + keypair file itself, which is stored as insecure plain text\n\ + \nBIP39 Passphrase (empty for none): "; + if matches.try_contains_id(NO_PASSPHRASE_ARG.name)? { Ok(no_passphrase_and_message()) } else { - match prompt_passphrase( - "\nFor added security, enter a BIP39 passphrase\n\ - \nNOTE! This passphrase improves security of the recovery seed phrase NOT the\n\ - keypair file itself, which is stored as insecure plain text\n\ - \nBIP39 Passphrase (empty for none): ", - ) { + match prompt_passphrase(PROMPT) { Ok(passphrase) => { println!(); Ok((passphrase, " and your BIP39 passphrase".to_string())) diff --git a/clap-v3-utils/src/keypair.rs b/clap-v3-utils/src/keypair.rs index 1eaabe1f6e4915..4022775b88ea7d 100644 --- a/clap-v3-utils/src/keypair.rs +++ b/clap-v3-utils/src/keypair.rs @@ -171,12 +171,11 @@ impl DefaultSigner { } }) .map_err(|_| { - std::io::Error::other( - format!( - "No default signer found, run \"solana-keygen new -o {}\" to create a new one", + std::io::Error::other(format!( + "No default signer found, run \"solana-keygen new -o {}\" to create a new \ + one", self.path - ), - ) + )) })?; *self.is_path_checked.borrow_mut() = true; } @@ -652,9 +651,10 @@ pub fn signer_from_source_with_config( )?)) } SignerSourceKind::Filepath(path) => match read_keypair_file(path) { - Err(e) => Err(std::io::Error::other( - format!("could not read keypair file \"{path}\". Run \"solana-keygen new\" to create a keypair file: {e}"), - ) + Err(e) => Err(std::io::Error::other(format!( + "could not read keypair file \"{path}\". Run \"solana-keygen new\" to create a \ + keypair file: {e}" + )) .into()), Ok(file) => Ok(Box::new(file)), }, @@ -680,17 +680,21 @@ pub fn signer_from_source_with_config( } } SignerSourceKind::Pubkey(pubkey) => { - let presigner = try_pubkeys_sigs_of(matches, SIGNER_ARG.name).ok().flatten() + let presigner = try_pubkeys_sigs_of(matches, SIGNER_ARG.name) + .ok() + .flatten() .as_ref() .and_then(|presigners| presigner_from_pubkey_sigs(pubkey, presigners)); if let Some(presigner) = presigner { Ok(Box::new(presigner)) - } else if config.allow_null_signer || matches.try_contains_id(SIGN_ONLY_ARG.name).unwrap_or(false) { + } else if config.allow_null_signer + || matches.try_contains_id(SIGN_ONLY_ARG.name).unwrap_or(false) + { Ok(Box::new(NullSigner::new(pubkey))) } else { - Err(std::io::Error::other( - format!("missing signature for supplied pubkey: {pubkey}"), - ) + Err(std::io::Error::other(format!( + "missing signature for supplied pubkey: {pubkey}" + )) .into()) } } @@ -793,8 +797,8 @@ pub fn resolve_signer_from_source( } SignerSourceKind::Filepath(path) => match read_keypair_file(path) { Err(e) => Err(std::io::Error::other(format!( - "could not read keypair file \"{path}\". \ - Run \"solana-keygen new\" to create a keypair file: {e}" + "could not read keypair file \"{path}\". Run \"solana-keygen new\" to create a \ + keypair file: {e}" )) .into()), Ok(_) => Ok(Some(path.to_string())), @@ -834,7 +838,8 @@ pub const ASK_KEYWORD: &str = "ASK"; pub const SKIP_SEED_PHRASE_VALIDATION_ARG: ArgConstant<'static> = ArgConstant { long: "skip-seed-phrase-validation", name: "skip_seed_phrase_validation", - help: "Skip validation of seed phrases. Use this if your phrase does not use the BIP39 official English word list", + help: "Skip validation of seed phrases. Use this if your phrase does not use the BIP39 \ + official English word list", }; /// Prompts user for a passphrase and then asks for confirmirmation to check for mistakes @@ -1071,8 +1076,8 @@ fn encodable_key_from_source( )?), SignerSourceKind::Filepath(path) => match K::read_from_file(path) { Err(e) => Err(std::io::Error::other(format!( - "could not read keypair file \"{path}\". \ - Run \"solana-keygen new\" to create a keypair file: {e}" + "could not read keypair file \"{path}\". Run \"solana-keygen new\" to create a \ + keypair file: {e}" )) .into()), Ok(file) => Ok(file), @@ -1152,7 +1157,8 @@ fn encodable_key_from_seed_phrase( let seed_phrase = prompt_password(format!("[{key_name}] seed phrase: "))?; let seed_phrase = seed_phrase.trim(); let passphrase_prompt = format!( - "[{key_name}] If this seed phrase has an associated passphrase, enter it now. Otherwise, press ENTER to continue: ", + "[{key_name}] If this seed phrase has an associated passphrase, enter it now. Otherwise, \ + press ENTER to continue: ", ); let key = if skip_validation { @@ -1349,7 +1355,7 @@ mod tests { legacy: false, }; - let signer_arg = format!("{}={}", pubkey, signature); + let signer_arg = format!("{pubkey}={signature}"); let clap_app = Command::new("test").arg( Arg::new(SIGNER_ARG.name) @@ -1390,7 +1396,7 @@ mod tests { legacy: false, }; - let signer_arg = format!("{}={}", pubkey, signature); + let signer_arg = format!("{pubkey}={signature}"); let clap_app = Command::new("test").arg( Arg::new(SIGNER_ARG.name) diff --git a/clap-v3-utils/src/nonce.rs b/clap-v3-utils/src/nonce.rs index 4c7cd0ff24ab20..4efc0470712184 100644 --- a/clap-v3-utils/src/nonce.rs +++ b/clap-v3-utils/src/nonce.rs @@ -9,7 +9,7 @@ pub const NONCE_ARG: ArgConstant<'static> = ArgConstant { help: "Provide the nonce account to use when creating a nonced \n\ transaction. Nonced transactions are useful when a transaction \n\ requires a lengthy signing process. Learn more about nonced \n\ - transactions at https://docs.solanalabs.com/cli/examples/durable-nonce", + transactions at https://docs.anza.xyz/cli/examples/durable-nonce", }; pub const NONCE_AUTHORITY_ARG: ArgConstant<'static> = ArgConstant { diff --git a/cli-config/Cargo.toml b/cli-config/Cargo.toml index dd322b07b68148..abe7a7e382164d 100644 --- a/cli-config/Cargo.toml +++ b/cli-config/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "solana-cli-config" -description = "Blockchain, Rebuilt for Scale" documentation = "https://docs.rs/solana-cli-config" version = { workspace = true } authors = { workspace = true } +description = { workspace = true } repository = { workspace = true } homepage = { workspace = true } license = { workspace = true } diff --git a/cli-output/Cargo.toml b/cli-output/Cargo.toml index f8a6b403636f10..8954b83b38626c 100644 --- a/cli-output/Cargo.toml +++ b/cli-output/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "solana-cli-output" -description = "Blockchain, Rebuilt for Scale" documentation = "https://docs.rs/solana-cli-output" version = { workspace = true } authors = { workspace = true } +description = { workspace = true } repository = { workspace = true } homepage = { workspace = true } license = { workspace = true } @@ -34,7 +34,6 @@ solana-clock = { workspace = true } solana-epoch-info = { workspace = true, features = ["serde"] } solana-hash = { workspace = true } solana-message = { workspace = true } -solana-native-token = { workspace = true } solana-packet = { workspace = true } solana-pubkey = { workspace = true } solana-rpc-client-api = { workspace = true } @@ -42,7 +41,6 @@ solana-sdk-ids = { workspace = true } solana-signature = { workspace = true } solana-stake-interface = { workspace = true } solana-system-interface = { workspace = true } -solana-sysvar = { workspace = true } solana-transaction = { workspace = true, features = ["verify"] } solana-transaction-error = { workspace = true } solana-transaction-status = { workspace = true } diff --git a/cli-output/src/cli_output.rs b/cli-output/src/cli_output.rs index a34af79a43d211..b0e2e6676c27dc 100644 --- a/cli-output/src/cli_output.rs +++ b/cli-output/src/cli_output.rs @@ -25,15 +25,16 @@ use { solana_clock::{Epoch, Slot, UnixTimestamp}, solana_epoch_info::EpochInfo, solana_hash::Hash, - solana_native_token::lamports_to_sol, solana_pubkey::Pubkey, solana_rpc_client_api::response::{ RpcAccountBalance, RpcContactInfo, RpcInflationGovernor, RpcInflationRate, RpcKeyedAccount, RpcSupply, RpcVoteAccountInfo, }, solana_signature::Signature, - solana_stake_interface::state::{Authorized, Lockup}, - solana_sysvar::stake_history::StakeHistoryEntry, + solana_stake_interface::{ + stake_history::StakeHistoryEntry, + state::{Authorized, Lockup}, + }, solana_transaction::{versioned::VersionedTransaction, Transaction}, solana_transaction_status::{ EncodedConfirmedBlock, EncodedTransaction, TransactionConfirmationStatus, @@ -117,9 +118,9 @@ impl VerboseDisplay for CliPrioritizationFeeStats { fn write_str(&self, f: &mut dyn std::fmt::Write) -> fmt::Result { writeln!(f, "{:<11} prioritization_fee", "slot")?; for fee in &self.fees { - write!(f, "{}", fee)?; + write!(f, "{fee}")?; } - write!(f, "{}", self) + write!(f, "{self}") } } @@ -1077,8 +1078,8 @@ impl VerboseDisplay for CliKeyedEpochRewards { keyed_reward.address, reward.effective_slot, Utc.timestamp_opt(reward.block_time, 0).unwrap(), - lamports_to_sol(reward.amount), - lamports_to_sol(reward.post_balance), + build_balance_message(reward.amount, false, false), + build_balance_message(reward.post_balance, false, false), reward.percent_change, reward .apr @@ -1122,8 +1123,8 @@ impl fmt::Display for CliKeyedEpochRewards { f, " {:<44} ◎{:<17.9} ◎{:<17.9} {:>13.9}% {:>7} {:>10}", keyed_reward.address, - lamports_to_sol(reward.amount), - lamports_to_sol(reward.post_balance), + build_balance_message(reward.amount, false, false), + build_balance_message(reward.post_balance, false, false), reward.percent_change, reward .apr @@ -1298,13 +1299,13 @@ fn show_epoch_rewards( format_as!( f, "{},{},{},{},{},{}%,{},{}", - " {:<6} {:<11} {:<26} ◎{:<17.9} ◎{:<17.9} {:>13.3}% {:>14} {:>10}", + " {:<6} {:<11} {:<26} ◎{:<17.11} ◎{:<17.11} {:>13.3}% {:>14} {:>10}", fmt, reward.epoch, reward.effective_slot, Utc.timestamp_opt(reward.block_time, 0).unwrap(), - lamports_to_sol(reward.amount), - lamports_to_sol(reward.post_balance), + build_balance_message(reward.amount, false, false), + build_balance_message(reward.post_balance, false, false), reward.percent_change, reward .apr @@ -2025,7 +2026,10 @@ impl fmt::Display for CliAccountBalances { f, "{:<44} {}", account.address, - &format!("{} SOL", lamports_to_sol(account.lamports)) + &format!( + "{} SOL", + build_balance_message(account.lamports, false, false) + ), )?; } Ok(()) @@ -2060,16 +2064,26 @@ impl VerboseDisplay for CliSupply {} impl fmt::Display for CliSupply { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - writeln_name_value(f, "Total:", &format!("{} SOL", lamports_to_sol(self.total)))?; + writeln_name_value( + f, + "Total:", + &format!("{} SOL", build_balance_message(self.total, false, false)), + )?; writeln_name_value( f, "Circulating:", - &format!("{} SOL", lamports_to_sol(self.circulating)), + &format!( + "{} SOL", + build_balance_message(self.circulating, false, false) + ), )?; writeln_name_value( f, "Non-Circulating:", - &format!("{} SOL", lamports_to_sol(self.non_circulating)), + &format!( + "{} SOL", + build_balance_message(self.non_circulating, false, false) + ), )?; if self.print_accounts { writeln!(f)?; @@ -2835,14 +2849,14 @@ impl fmt::Display for CliBlock { format!( "{}◎{:<14.9}", sign, - lamports_to_sol(reward.lamports.unsigned_abs()) + build_balance_message(reward.lamports.unsigned_abs(), false, false) ), if reward.post_balance == 0 { " - -".to_string() } else { format!( "◎{:<19.9} {:>13.9}%", - lamports_to_sol(reward.post_balance), + build_balance_message(reward.post_balance, false, false), (reward.lamports.abs() as f64 / (reward.post_balance as f64 - reward.lamports as f64)) * 100.0 @@ -2860,7 +2874,7 @@ impl fmt::Display for CliBlock { f, "Total Rewards: {}◎{:<12.9}", sign, - lamports_to_sol(total_rewards.unsigned_abs()) + build_balance_message(total_rewards.unsigned_abs(), false, false) )?; } for (index, transaction_with_meta) in @@ -3527,13 +3541,42 @@ mod tests { recent_timestamp: BlockTimestamp::default(), ..CliVoteAccount::default() }; + #[rustfmt::skip] + let expected_output_common = + "Account Balance: 0.00001 SOL\n\ + Validator Identity: 11111111111111111111111111111111\n\ + Vote Authority: None\n\ + Withdraw Authority: \n\ + Credits: 0\n\ + Commission: 0%\n\ + Root Slot: ~\n\ + Recent Timestamp: 1970-01-01T00:00:00Z from slot 0\n"; + let s = format!("{c}"); - assert_eq!(s, "Account Balance: 0.00001 SOL\nValidator Identity: 11111111111111111111111111111111\nVote Authority: None\nWithdraw Authority: \nCredits: 0\nCommission: 0%\nRoot Slot: ~\nRecent Timestamp: 1970-01-01T00:00:00Z from slot 0\nEpoch Rewards:\n Epoch Reward Slot Time Amount New Balance Percent Change APR Commission\n 1 100 1970-01-01 00:00:00 UTC ◎0.000000010 ◎0.000000100 11.000% 10.00% 1%\n 2 200 1970-01-12 13:46:40 UTC ◎0.000000012 ◎0.000000100 11.000% 13.00% 1%\n"); + #[rustfmt::skip] + let expected_epoch_rewards_output = + "Epoch Rewards:\n \ + Epoch Reward Slot Time Amount New Balance Percent Change APR Commission\n \ + 1 100 1970-01-01 00:00:00 UTC ◎0.00000001 ◎0.0000001 11.000% 10.00% 1%\n \ + 2 200 1970-01-12 13:46:40 UTC ◎0.000000012 ◎0.0000001 11.000% 13.00% 1%\n"; + assert_eq!( + s, + format!("{expected_output_common}{expected_epoch_rewards_output}") + ); println!("{s}"); c.use_csv = true; let s = format!("{c}"); - assert_eq!(s, "Account Balance: 0.00001 SOL\nValidator Identity: 11111111111111111111111111111111\nVote Authority: None\nWithdraw Authority: \nCredits: 0\nCommission: 0%\nRoot Slot: ~\nRecent Timestamp: 1970-01-01T00:00:00Z from slot 0\nEpoch Rewards:\nEpoch,Reward Slot,Time,Amount,New Balance,Percent Change,APR,Commission\n1,100,1970-01-01 00:00:00 UTC,0.00000001,0.0000001,11%,10.00%,1%\n2,200,1970-01-12 13:46:40 UTC,0.000000012,0.0000001,11%,13.00%,1%\n"); + #[rustfmt::skip] + let expected_epoch_rewards_output = + "Epoch Rewards:\n\ + Epoch,Reward Slot,Time,Amount,New Balance,Percent Change,APR,Commission\n\ + 1,100,1970-01-01 00:00:00 UTC,0.00000001,0.0000001,11%,10.00%,1%\n\ + 2,200,1970-01-12 13:46:40 UTC,0.000000012,0.0000001,11%,13.00%,1%\n"; + assert_eq!( + s, + format!("{expected_output_common}{expected_epoch_rewards_output}") + ); println!("{s}"); } } diff --git a/cli-output/src/display.rs b/cli-output/src/display.rs index 115471b76fad7f..d31fb4ab3ae323 100644 --- a/cli-output/src/display.rs +++ b/cli-output/src/display.rs @@ -10,7 +10,6 @@ use { solana_clock::UnixTimestamp, solana_hash::Hash, solana_message::{compiled_instruction::CompiledInstruction, v0::MessageAddressTableLookup}, - solana_native_token::lamports_to_sol, solana_pubkey::Pubkey, solana_signature::Signature, solana_stake_interface as stake, @@ -52,7 +51,8 @@ pub fn build_balance_message_with_config( let value = if config.use_lamports_unit { lamports.to_string() } else { - let sol = lamports_to_sol(lamports); + const LAMPORTS_PER_SOL_F64: f64 = 1_000_000_000.; + let sol = lamports as f64 / LAMPORTS_PER_SOL_F64; let sol_str = format!("{sol:.9}"); if config.trim_trailing_zeros { sol_str @@ -530,8 +530,8 @@ fn write_rewards( "-".to_string() }, sign, - lamports_to_sol(reward.lamports.unsigned_abs()), - lamports_to_sol(reward.post_balance) + build_balance_message(reward.lamports.unsigned_abs(), false, false), + build_balance_message(reward.post_balance, false, false) )?; } } @@ -556,7 +556,12 @@ fn write_status( } fn write_fees(w: &mut W, transaction_fee: u64, prefix: &str) -> io::Result<()> { - writeln!(w, "{} Fee: ◎{}", prefix, lamports_to_sol(transaction_fee)) + writeln!( + w, + "{} Fee: ◎{}", + prefix, + build_balance_message(transaction_fee, false, false) + ) } fn write_balances( @@ -580,7 +585,7 @@ fn write_balances( "{} Account {} balance: ◎{}", prefix, i, - lamports_to_sol(*pre) + build_balance_message(*pre, false, false) )?; } else { writeln!( @@ -588,8 +593,8 @@ fn write_balances( "{} Account {} balance: ◎{} -> ◎{}", prefix, i, - lamports_to_sol(*pre), - lamports_to_sol(*post) + build_balance_message(*pre, false, false), + build_balance_message(*post, false, false) )?; } } @@ -740,7 +745,7 @@ mod test { let secret = ed25519_dalek::SecretKey::from_bytes(&[0u8; 32]).unwrap(); let public = ed25519_dalek::PublicKey::from(&secret); let keypair = ed25519_dalek::Keypair { secret, public }; - Keypair::from_bytes(&keypair.to_bytes()).unwrap() + Keypair::try_from(keypair.to_bytes().as_ref()).unwrap() } fn new_test_v0_transaction() -> VersionedTransaction { @@ -857,7 +862,7 @@ Return Data from Program 8qbHbw2BbbTHBW1sbeqakYXVKRQM8Ne7pLK7m6CVfeR: 0000: 01 02 03 ... Rewards: Address Type Amount New Balance \0 - 4vJ9JU1bJJE96FWSJKvHsmmFADCg4gpZQff4P3bkLKi rent -◎0.000000100 ◎0.000009900 \0 + 4vJ9JU1bJJE96FWSJKvHsmmFADCg4gpZQff4P3bkLKi rent -◎0.0000001 ◎0.0000099 \0 ".replace("\\0", "") // replace marker used to subvert trailing whitespace linter on CI ); } @@ -946,7 +951,7 @@ Return Data from Program 8qbHbw2BbbTHBW1sbeqakYXVKRQM8Ne7pLK7m6CVfeR: 0000: 01 02 03 ... Rewards: Address Type Amount New Balance \0 - CktRuQ2mttgRGkXJtyksdKHjUdc2C4TgDzyB98oEzy8 rent -◎0.000000100 ◎0.000014900 \0 + CktRuQ2mttgRGkXJtyksdKHjUdc2C4TgDzyB98oEzy8 rent -◎0.0000001 ◎0.0000149 \0 ".replace("\\0", "") // replace marker used to subvert trailing whitespace linter on CI ); } diff --git a/cli/Cargo.toml b/cli/Cargo.toml index df533962f0f026..da2d60b31f155c 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "solana-cli" -description = "Blockchain, Rebuilt for Scale" documentation = "https://docs.rs/solana-cli" version = { workspace = true } authors = { workspace = true } +description = { workspace = true } repository = { workspace = true } homepage = { workspace = true } license = { workspace = true } @@ -16,6 +16,9 @@ targets = ["x86_64-unknown-linux-gnu"] name = "solana" path = "src/main.rs" +[features] +dev-context-only-utils = ["solana-faucet/dev-context-only-utils"] + [dependencies] agave-feature-set = { workspace = true } agave-syscalls = { workspace = true } @@ -32,82 +35,82 @@ humantime = { workspace = true } log = { workspace = true } num-traits = { workspace = true } pretty-hex = { workspace = true } -reqwest = { workspace = true, features = ["blocking", "brotli", "deflate", "gzip", "rustls-tls", "json"] } +reqwest = { workspace = true, features = ["blocking", "brotli", "deflate", "gzip", "rustls-tls", "rustls-tls-native-roots", "json"] } semver = { workspace = true } serde = { workspace = true } serde_derive = { workspace = true } serde_json = { workspace = true } -solana-account = "=2.2.1" +solana-account = "=3.0.0" solana-account-decoder = { workspace = true } solana-address-lookup-table-interface = { workspace = true } -solana-borsh = "=2.2.1" +solana-borsh = "=3.0.0" solana-clap-utils = { workspace = true } solana-cli-config = { workspace = true } solana-cli-output = { workspace = true } solana-client = { workspace = true } -solana-clock = "=2.2.2" -solana-cluster-type = "=2.2.1" -solana-commitment-config = "=2.2.1" -solana-compute-budget-interface = { version = "=2.2.2", features = ["borsh"] } -solana-config-interface = "=1.0.0" -solana-config-program-client = { workspace = true, features = ["serde"] } +solana-clock = "=3.0.0" +solana-cluster-type = "=3.0.0" +solana-commitment-config = "=3.0.0" +solana-compute-budget-interface = { version = "=3.0.0", features = ["borsh"] } +solana-config-interface = "=2.0.0" solana-connection-cache = { workspace = true } -solana-epoch-schedule = "=2.2.1" -solana-feature-gate-client = "=0.0.2" -solana-feature-gate-interface = "=2.2.2" -solana-fee-calculator = "=2.2.1" -solana-fee-structure = "=2.3.0" -solana-hash = "=2.3.0" -solana-instruction = "=2.3.0" -solana-keypair = "=2.2.1" -solana-loader-v3-interface = { version = "=5.0.0", features = ["bincode"] } -solana-loader-v4-interface = "=2.2.1" +solana-epoch-schedule = "=3.0.0" +solana-feature-gate-interface = { version = "=3.0.0", features = ["bincode"] } +solana-fee-calculator = "=3.0.0" +solana-fee-structure = "=3.0.0" +solana-hash = "=3.0.0" +solana-instruction = "=3.0.0" +solana-keypair = "=3.0.1" +solana-loader-v3-interface = { version = "=6.1.0", features = ["bincode"] } +solana-loader-v4-interface = "=3.1.0" solana-loader-v4-program = { workspace = true } -solana-logger = "=2.3.1" -solana-message = "=2.4.0" -solana-native-token = "=2.2.2" -solana-nonce = "=2.2.1" -solana-offchain-message = { version = "=2.2.1", features = ["verify"] } -solana-packet = "=2.2.1" +solana-logger = "=3.0.0" +solana-message = "=3.0.1" +solana-native-token = "=3.0.0" +solana-nonce = "=3.0.0" +solana-offchain-message = { version = "=3.0.0", features = ["verify"] } +solana-packet = "=3.0.0" solana-program-runtime = { workspace = true } -solana-pubkey = { version = "=2.4.0", default-features = false } +solana-pubkey = { version = "=3.0.0", default-features = false } solana-pubsub-client = { workspace = true } solana-quic-client = { workspace = true } solana-remote-wallet = { workspace = true, features = ["default"] } -solana-rent = "=2.2.1" +solana-rent = "=3.0.0" solana-rpc-client = { workspace = true, features = ["default"] } solana-rpc-client-api = { workspace = true } solana-rpc-client-nonce-utils = { workspace = true, features = ["clap"] } -solana-sbpf = { workspace = true } -solana-sdk-ids = "=2.2.1" -solana-signature = { version = "=2.3.0", default-features = false } -solana-signer = "=2.2.1" -solana-slot-history = "=2.2.1" -solana-stake-interface = "=1.2.1" +solana-sbpf = { workspace = true, features = ["jit"] } +solana-sdk-ids = "=3.0.0" +solana-signature = { version = "=3.1.0", default-features = false } +solana-signer = "=3.0.0" +solana-slot-history = "=3.0.0" +solana-stake-interface = "=2.0.1" solana-streamer = { workspace = true } -solana-system-interface = { version = "=1.0", features = ["bincode"] } -solana-sysvar = "=2.2.2" +solana-system-interface = { version = "=2.0", features = ["bincode"] } +solana-sysvar = "=3.0.0" solana-tps-client = { workspace = true } solana-tpu-client = { workspace = true, features = ["default"] } -solana-transaction = "=2.2.3" -solana-transaction-error = "=2.2.1" +solana-transaction = "=3.0.1" +solana-transaction-error = "=3.0.0" solana-transaction-status = { workspace = true } solana-transaction-status-client-types = { workspace = true } solana-udp-client = { workspace = true } solana-version = { workspace = true } solana-vote-program = { workspace = true } -spl-memo-interface = { version = "=1.0.0" } +spl-memo-interface = { version = "=2.0.0" } thiserror = { workspace = true } tiny-bip39 = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } -solana-faucet = { workspace = true } +solana-client = { workspace = true, features = ["dev-context-only-utils"] } +solana-faucet = { workspace = true, features = ["dev-context-only-utils"] } solana-nonce-account = { workspace = true } solana-presigner = { workspace = true } solana-rpc = { workspace = true } solana-sha256-hasher = { workspace = true } solana-streamer = { workspace = true } solana-test-validator = { workspace = true } +solana-tps-client = { workspace = true, features = ["dev-context-only-utils"] } tempfile = { workspace = true } test-case = { workspace = true } diff --git a/cli/src/address_lookup_table.rs b/cli/src/address_lookup_table.rs index cb5f1a45b4c0aa..437d15d38bd01c 100644 --- a/cli/src/address_lookup_table.rs +++ b/cli/src/address_lookup_table.rs @@ -78,8 +78,8 @@ impl AddressLookupTableSubCommands for App<'_, '_> { .takes_value(true) .validator(is_pubkey_or_keypair) .help( - "Lookup table authority address \ - [default: the default configured keypair].", + "Lookup table authority address [default: the default \ + configured keypair].", ), ) .arg( @@ -113,8 +113,8 @@ impl AddressLookupTableSubCommands for App<'_, '_> { .takes_value(true) .validator(is_valid_signer) .help( - "Lookup table authority \ - [default: the default configured keypair]", + "Lookup table authority [default: the default configured \ + keypair]", ), ) .arg( @@ -143,8 +143,8 @@ impl AddressLookupTableSubCommands for App<'_, '_> { .takes_value(true) .validator(is_valid_signer) .help( - "Lookup table authority \ - [default: the default configured keypair]", + "Lookup table authority [default: the default configured \ + keypair]", ), ) .arg( @@ -187,8 +187,8 @@ impl AddressLookupTableSubCommands for App<'_, '_> { .takes_value(true) .validator(is_valid_signer) .help( - "Lookup table authority \ - [default: the default configured keypair]", + "Lookup table authority [default: the default configured \ + keypair]", ), ) .arg( @@ -227,8 +227,8 @@ impl AddressLookupTableSubCommands for App<'_, '_> { .takes_value(true) .validator(is_valid_signer) .help( - "Lookup table authority \ - [default: the default configured keypair]", + "Lookup table authority [default: the default configured \ + keypair]", ), ), ) diff --git a/cli/src/checks.rs b/cli/src/checks.rs index 7a4b64ee1f55da..01805429406831 100644 --- a/cli/src/checks.rs +++ b/cli/src/checks.rs @@ -1,6 +1,6 @@ use { - crate::cli::CliError, solana_commitment_config::CommitmentConfig, solana_message::Message, - solana_native_token::lamports_to_sol, solana_pubkey::Pubkey, + crate::cli::CliError, solana_cli_output::display::build_balance_message, + solana_commitment_config::CommitmentConfig, solana_message::Message, solana_pubkey::Pubkey, solana_rpc_client::rpc_client::RpcClient, solana_rpc_client_api::client_error::Result as ClientResult, }; @@ -83,8 +83,8 @@ pub fn check_account_for_spend_and_fee_with_commitment( balance .checked_add(fee) .ok_or(CliError::InsufficientFundsForSpendAndFee( - lamports_to_sol(balance), - lamports_to_sol(fee), + build_balance_message(balance, false, false), + build_balance_message(fee, false, false), *account_pubkey, ))?; @@ -96,13 +96,13 @@ pub fn check_account_for_spend_and_fee_with_commitment( )? { if balance > 0 { return Err(CliError::InsufficientFundsForSpendAndFee( - lamports_to_sol(balance), - lamports_to_sol(fee), + build_balance_message(balance, false, false), + build_balance_message(fee, false, false), *account_pubkey, )); } else { return Err(CliError::InsufficientFundsForFee( - lamports_to_sol(fee), + build_balance_message(fee, false, false), *account_pubkey, )); } diff --git a/cli/src/clap_app.rs b/cli/src/clap_app.rs index 3f62ba48abb323..3f17268b7ef5cd 100644 --- a/cli/src/clap_app.rs +++ b/cli/src/clap_app.rs @@ -44,8 +44,8 @@ pub fn get_clap_app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> A .global(true) .validator(is_url_or_moniker) .help( - "URL for Solana's JSON RPC or moniker (or their first letter): \ - [mainnet-beta, testnet, devnet, localhost]", + "URL for Solana's JSON RPC or moniker (or their first letter): [mainnet-beta, \ + testnet, devnet, localhost]", ), ) .arg( @@ -84,8 +84,8 @@ pub fn get_clap_app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> A .hide_possible_values(true) .global(true) .help( - "Return information at the selected commitment level \ - [possible values: processed, confirmed, finalized]", + "Return information at the selected commitment level [possible values: \ + processed, confirmed, finalized]", ), ) .arg( diff --git a/cli/src/cli.rs b/cli/src/cli.rs index e19965175979e2..5241f3368da7a8 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -1,3 +1,7 @@ +#[cfg(not(feature = "dev-context-only-utils"))] +use solana_tps_client::utils::create_connection_cache; +#[cfg(feature = "dev-context-only-utils")] +use solana_tps_client::utils::create_connection_cache_for_tests; use { crate::{ address_lookup_table::*, clap_app::*, cluster_query::*, feature::*, inflation::*, nonce::*, @@ -30,7 +34,7 @@ use { solana_signature::Signature, solana_signer::{Signer, SignerError}, solana_stake_interface::{instruction::LockupArgs, state::Lockup}, - solana_tps_client::{utils::create_connection_cache, TpsClient}, + solana_tps_client::TpsClient, solana_tpu_client::tpu_client::{ TpuClient, TpuClientConfig, DEFAULT_TPU_CONNECTION_POOL_SIZE, DEFAULT_TPU_ENABLE_UDP, }, @@ -488,11 +492,11 @@ pub enum CliError { #[error("Command not recognized: {0}")] CommandNotRecognized(String), #[error("Account {1} has insufficient funds for fee ({0} SOL)")] - InsufficientFundsForFee(f64, Pubkey), + InsufficientFundsForFee(String, Pubkey), #[error("Account {1} has insufficient funds for spend ({0} SOL)")] - InsufficientFundsForSpend(f64, Pubkey), + InsufficientFundsForSpend(String, Pubkey), #[error("Account {2} has insufficient funds for spend ({0} SOL) + fee ({1} SOL)")] - InsufficientFundsForSpendAndFee(f64, f64, Pubkey), + InsufficientFundsForSpendAndFee(String, String, Pubkey), #[error(transparent)] InvalidNonce(solana_rpc_client_nonce_utils::Error), #[error("Dynamic program error: {0}")] @@ -718,11 +722,11 @@ pub fn parse_command( ("delegate-stake", Some(matches)) => { parse_stake_delegate_stake(matches, default_signer, wallet_manager) } - ("redelegate-stake", _) => { - Err(CliError::CommandNotRecognized( - "`redelegate-stake` no longer exists and will be completely removed in a future release".to_string(), - )) - } + ("redelegate-stake", _) => Err(CliError::CommandNotRecognized( + "`redelegate-stake` no longer exists and will be completely removed in a future \ + release" + .to_string(), + )), ("withdraw-stake", Some(matches)) => { parse_stake_withdraw_stake(matches, default_signer, wallet_manager) } @@ -944,6 +948,15 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => { let client_dyn: Arc = if config.use_tpu_client { let keypair = read_keypair_file(&config.keypair_path).unwrap_or(Keypair::new()); + #[cfg(feature = "dev-context-only-utils")] + let connection_cache = create_connection_cache_for_tests( + DEFAULT_TPU_CONNECTION_POOL_SIZE, + config.use_quic, + "127.0.0.1".parse().unwrap(), + Some(&keypair), + rpc_client.clone(), + ); + #[cfg(not(feature = "dev-context-only-utils"))] let connection_cache = create_connection_cache( DEFAULT_TPU_CONNECTION_POOL_SIZE, config.use_quic, @@ -1744,6 +1757,26 @@ where } } +pub fn to_str_error_adapter(ix_error: &InstructionError) -> Option +where + E: 'static + std::error::Error + std::convert::TryFrom, +{ + match ix_error { + InstructionError::Custom(code) => E::try_from(*code).ok(), + _ => None, + } +} + +pub fn log_instruction_custom_error_to_str( + result: ClientResult, + config: &CliConfig, +) -> ProcessResult +where + E: 'static + std::error::Error + std::convert::TryFrom, +{ + log_instruction_custom_error_ex::(result, &config.output_format, to_str_error_adapter) +} + pub fn log_instruction_custom_error( result: ClientResult, config: &CliConfig, @@ -1760,7 +1793,7 @@ pub fn log_instruction_custom_error_ex( error_adapter: F, ) -> ProcessResult where - E: 'static + std::error::Error + FromPrimitive, + E: 'static + std::error::Error, F: Fn(&InstructionError) -> Option, { match result { diff --git a/cli/src/cluster_query.rs b/cli/src/cluster_query.rs index ff8392f9fcf013..0a8def6242bde7 100644 --- a/cli/src/cluster_query.rs +++ b/cli/src/cluster_query.rs @@ -31,7 +31,6 @@ use { solana_commitment_config::CommitmentConfig, solana_hash::Hash, solana_message::Message, - solana_native_token::lamports_to_sol, solana_nonce::state::State as NonceState, solana_pubkey::Pubkey, solana_pubsub_client::pubsub_client::PubsubClient, @@ -106,8 +105,8 @@ impl ClusterQuerySubCommands for App<'_, '_> { .multiple(true) .index(1) .help( - "A list of accounts which if provided the fee response will represent\ - the fee to land a transaction with those accounts as writable", + "A list of accounts which if provided the fee response will represent \ + the fee to land a transaction with those accounts as writable", ), ) .arg( @@ -302,8 +301,7 @@ impl ClusterQuerySubCommands for App<'_, '_> { .about("Stream transaction logs") .arg(pubkey!( Arg::with_name("address").index(1).value_name("ADDRESS"), - "Account to monitor \ - [default: monitor all transactions except for votes]." + "Account to monitor [default: monitor all transactions except for votes]." )) .arg( Arg::with_name("include_votes") @@ -328,8 +326,8 @@ impl ClusterQuerySubCommands for App<'_, '_> { .long("slot-limit") .takes_value(true) .help( - "Limit results to this many slots from the end of the epoch \ - [default: full epoch]", + "Limit results to this many slots from the end of the epoch [default: \ + full epoch]", ), ), ) @@ -352,8 +350,8 @@ impl ClusterQuerySubCommands for App<'_, '_> { .index(1) .value_name("VALIDATOR_ACCOUNT_PUBKEYS") .multiple(true), - "Only show stake accounts delegated to the provided pubkeys. \ - Accepts both vote and identity pubkeys." + "Only show stake accounts delegated to the provided pubkeys. Accepts both \ + vote and identity pubkeys." )) .arg(pubkey!( Arg::with_name("withdraw_authority") @@ -1413,7 +1411,10 @@ pub fn process_supply( pub fn process_total_supply(rpc_client: &RpcClient, _config: &CliConfig) -> ProcessResult { let supply = rpc_client.supply()?.value; - Ok(format!("{} SOL", lamports_to_sol(supply.total))) + Ok(format!( + "{} SOL", + build_balance_message(supply.total, false, false) + )) } pub fn process_get_transaction_count(rpc_client: &RpcClient, _config: &CliConfig) -> ProcessResult { @@ -1838,8 +1839,7 @@ pub fn process_show_stakes( if !pubkeys.is_empty() { return Err(CliError::RpcRequestError(format!( - "Failed to retrieve matching vote account for {:?}.", - pubkeys + "Failed to retrieve matching vote account for {pubkeys:?}." )) .into()); } @@ -2295,7 +2295,10 @@ pub fn process_calculate_rent( use_lamports_unit: bool, ) -> ProcessResult { if data_length > MAX_PERMITTED_DATA_LENGTH.try_into().unwrap() { - eprintln!("Warning: Maximum account size is {MAX_PERMITTED_DATA_LENGTH} bytes, {data_length} provided"); + eprintln!( + "Warning: Maximum account size is {MAX_PERMITTED_DATA_LENGTH} bytes, {data_length} \ + provided" + ); } let rent_account = rpc_client.get_account(&sysvar::rent::id())?; let rent: Rent = rent_account.deserialize_data()?; diff --git a/cli/src/compute_budget.rs b/cli/src/compute_budget.rs index 3a19adfc846419..1519fd9913d7d8 100644 --- a/cli/src/compute_budget.rs +++ b/cli/src/compute_budget.rs @@ -98,10 +98,19 @@ pub(crate) fn simulate_and_update_compute_unit_limit( }; match compute_unit_limit { - ComputeUnitLimit::Simulated => { - let compute_unit_limit = + ComputeUnitLimit::Simulated | ComputeUnitLimit::SimulatedWithExtraPercentage(_) => { + let base_compute_unit_limit = simulate_for_compute_unit_limit_unchecked(rpc_client, message)?; + let compute_unit_limit = + if let ComputeUnitLimit::SimulatedWithExtraPercentage(n) = compute_unit_limit { + (base_compute_unit_limit as u64) + .saturating_mul(100_u64.saturating_add(*n as u64)) + .saturating_div(100) as u32 + } else { + base_compute_unit_limit + }; + // Overwrite the compute unit limit instruction with the actual units consumed message.instructions[compute_unit_limit_ix_index].data = ComputeBudgetInstruction::set_compute_unit_limit(compute_unit_limit).data; @@ -138,7 +147,7 @@ impl WithComputeUnitConfig for Vec { compute_unit_limit, )); } - ComputeUnitLimit::Simulated => { + ComputeUnitLimit::Simulated | ComputeUnitLimit::SimulatedWithExtraPercentage(_) => { // Default to the max compute unit limit because later transactions will be // simulated to get the exact compute units consumed. self.push(ComputeBudgetInstruction::set_compute_unit_limit( diff --git a/cli/src/feature.rs b/cli/src/feature.rs index 432fd06ba37800..2f0cfbc115b2fc 100644 --- a/cli/src/feature.rs +++ b/cli/src/feature.rs @@ -1,8 +1,8 @@ use { crate::{ cli::{ - log_instruction_custom_error, CliCommand, CliCommandInfo, CliConfig, CliError, - ProcessResult, + log_instruction_custom_error, log_instruction_custom_error_to_str, CliCommand, + CliCommandInfo, CliConfig, CliError, ProcessResult, }, spend_utils::{resolve_spend_tx_and_check_account_balance, SpendAmount}, }, @@ -19,10 +19,10 @@ use { solana_clock::{Epoch, Slot}, solana_cluster_type::ClusterType, solana_epoch_schedule::EpochSchedule, - solana_feature_gate_client::{ - errors::SolanaFeatureGateError, instructions::RevokePendingActivation, + solana_feature_gate_interface::{ + activate_with_lamports, error::FeatureGateError, from_account, + instruction::revoke_pending_activation, Feature, }, - solana_feature_gate_interface::{activate_with_lamports, from_account, Feature}, solana_message::Message, solana_pubkey::Pubkey, solana_remote_wallet::remote_wallet::RemoteWalletManager, @@ -31,7 +31,6 @@ use { client_error::Error as ClientError, request::MAX_MULTIPLE_ACCOUNTS, response::RpcVoteAccountInfo, }, - solana_sdk_ids::{incinerator, system_program}, solana_system_interface::error::SystemError, solana_transaction::Transaction, std::{cmp::Ordering, collections::HashMap, fmt, rc::Rc, str::FromStr}, @@ -1086,12 +1085,7 @@ fn process_revoke( ComputeUnitLimit::Default, |_lamports| { Message::new( - &[RevokePendingActivation { - feature: feature_id, - incinerator: incinerator::id(), - system_program: system_program::id(), - } - .instruction()], + &[revoke_pending_activation(&feature_id)], Some(&fee_payer.pubkey()), ) }, @@ -1110,5 +1104,5 @@ fn process_revoke( config.commitment, config.send_transaction_config, ); - log_instruction_custom_error::(result, config) + log_instruction_custom_error_to_str::(result, config) } diff --git a/cli/src/program.rs b/cli/src/program.rs index 7c453cdeb841ab..76bbbbdc8495b5 100644 --- a/cli/src/program.rs +++ b/cli/src/program.rs @@ -234,9 +234,10 @@ impl ProgramSubCommands for App<'_, '_> { Arg::with_name("program_id") .long("program-id") .value_name("PROGRAM_ID"), - "Executable program; must be a signer for initial deploys, \ - can be an address for upgrades [default: address of keypair at \ - /path/to/program-keypair.json if present, otherwise a random address]." + "Executable program; must be a signer for initial deploys, can be an \ + address for upgrades [default: address of keypair at \ + /path/to/program-keypair.json if present, otherwise a random \ + address]." )) .arg( Arg::with_name("final") @@ -250,8 +251,8 @@ impl ProgramSubCommands for App<'_, '_> { .takes_value(true) .required(false) .help( - "Maximum length of the upgradeable program \ - [default: the length of the original deployed program]", + "Maximum length of the upgradeable program [default: the \ + length of the original deployed program]", ), ) .arg( @@ -272,20 +273,20 @@ impl ProgramSubCommands for App<'_, '_> { .default_value("5") .help( "Maximum number of attempts to sign or resign transactions \ - after blockhash expiration. \ - If any transactions sent during the program deploy are still \ - unconfirmed after the initially chosen recent blockhash \ - expires, those transactions will be resigned with a new \ - recent blockhash and resent. Use this setting to adjust \ - the maximum number of transaction signing iterations. Each \ - blockhash is valid for about 60 seconds, which means using \ - the default value of 5 will lead to sending transactions \ - for at least 5 minutes or until all transactions are confirmed,\ - whichever comes first.", + after blockhash expiration. If any transactions sent during \ + the program deploy are still unconfirmed after the initially \ + chosen recent blockhash expires, those transactions will be \ + resigned with a new recent blockhash and resent. Use this \ + setting to adjust the maximum number of transaction signing \ + iterations. Each blockhash is valid for about 60 seconds, \ + which means using the default value of 5 will lead to \ + sending transactions for at least 5 minutes or until all \ + transactions are confirmed,whichever comes first.", ), ) .arg(Arg::with_name("use_rpc").long("use-rpc").help( - "Send write transactions to the configured RPC instead of validator TPUs", + "Send write transactions to the configured RPC instead of validator \ + TPUs", )) .arg(compute_unit_price_arg()) .arg( @@ -298,9 +299,12 @@ impl ProgramSubCommands for App<'_, '_> { Arg::with_name("skip_feature_verify") .long("skip-feature-verify") .takes_value(false) - .help("Don't verify program against the activated feature set. \ - This setting means a program containing a syscall not yet active on \ - mainnet will succeed local verification, but fail during the last step of deployment.") + .help( + "Don't verify program against the activated feature set. This \ + setting means a program containing a syscall not yet active \ + on mainnet will succeed local verification, but fail during \ + the last step of deployment.", + ), ), ) .subcommand( @@ -335,9 +339,12 @@ impl ProgramSubCommands for App<'_, '_> { Arg::with_name("skip_feature_verify") .long("skip-feature-verify") .takes_value(false) - .help("Don't verify program against the activated feature set. \ - This setting means a program containing a syscall not yet active on \ - mainnet will succeed local verification, but fail during the last step of deployment.") + .help( + "Don't verify program against the activated feature set. This \ + setting means a program containing a syscall not yet active \ + on mainnet will succeed local verification, but fail during \ + the last step of deployment.", + ), ) .offline_args(), ) @@ -378,8 +385,8 @@ impl ProgramSubCommands for App<'_, '_> { .takes_value(true) .required(false) .help( - "Maximum length of the upgradeable program \ - [default: the length of the original deployed program]", + "Maximum length of the upgradeable program [default: the \ + length of the original deployed program]", ), ) .arg( @@ -390,16 +397,15 @@ impl ProgramSubCommands for App<'_, '_> { .default_value("5") .help( "Maximum number of attempts to sign or resign transactions \ - after blockhash expiration. \ - If any transactions sent during the program deploy are still \ - unconfirmed after the initially chosen recent blockhash \ - expires, those transactions will be resigned with a new \ - recent blockhash and resent. Use this setting to adjust \ - the maximum number of transaction signing iterations. Each \ - blockhash is valid for about 60 seconds, which means using \ - the default value of 5 will lead to sending transactions \ - for at least 5 minutes or until all transactions are confirmed,\ - whichever comes first.", + after blockhash expiration. If any transactions sent during \ + the program deploy are still unconfirmed after the initially \ + chosen recent blockhash expires, those transactions will be \ + resigned with a new recent blockhash and resent. Use this \ + setting to adjust the maximum number of transaction signing \ + iterations. Each blockhash is valid for about 60 seconds, \ + which means using the default value of 5 will lead to \ + sending transactions for at least 5 minutes or until all \ + transactions are confirmed,whichever comes first.", ), ) .arg(Arg::with_name("use_rpc").long("use-rpc").help( @@ -410,9 +416,12 @@ impl ProgramSubCommands for App<'_, '_> { Arg::with_name("skip_feature_verify") .long("skip-feature-verify") .takes_value(false) - .help("Don't verify program against the activated feature set. \ - This setting means a program containing a syscall not yet active on \ - mainnet will succeed local verification, but fail during the last step of deployment.") + .help( + "Don't verify program against the activated feature set. This \ + setting means a program containing a syscall not yet active \ + on mainnet will succeed local verification, but fail during \ + the last step of deployment.", + ), ), ) .subcommand( @@ -598,8 +607,8 @@ impl ProgramSubCommands for App<'_, '_> { Arg::with_name("recipient_account") .long("recipient") .value_name("RECIPIENT_ADDRESS"), - "Recipient of closed account's lamports \ - [default: the default configured keypair]." + "Recipient of closed account's lamports [default: the default \ + configured keypair]." )) .arg( Arg::with_name("lamports") @@ -643,9 +652,7 @@ impl ProgramSubCommands for App<'_, '_> { ) .subcommand( SubCommand::with_name("migrate") - .about( - "Migrates an upgradeable program to loader-v4", - ) + .about("Migrates an upgradeable program to loader-v4") .arg( Arg::with_name("program_id") .index(1) @@ -662,8 +669,7 @@ impl ProgramSubCommands for App<'_, '_> { .takes_value(true) .validator(is_valid_signer) .help( - "Upgrade authority [default: the default configured \ - keypair]", + "Upgrade authority [default: the default configured keypair]", ), ) .arg(compute_unit_price_arg()), @@ -1533,10 +1539,7 @@ fn fetch_verified_buffer_program_data( }; verify_elf(&buffer_program_data, feature_set).map_err(|err| { - format!( - "Buffer account {buffer_pubkey} has invalid program data: {:?}", - err - ) + format!("Buffer account {buffer_pubkey} has invalid program data: {err:?}") })?; Ok(buffer_program_data) @@ -1569,8 +1572,8 @@ fn fetch_buffer_program_data( } if authority_address != Some(buffer_authority) { return Err(format!( - "Buffer's authority {:?} does not match authority provided {}", - authority_address, buffer_authority + "Buffer's authority {authority_address:?} does not match authority provided \ + {buffer_authority}" ) .into()); } @@ -3020,8 +3023,8 @@ fn extend_program_data_if_needed( let max_program_len = max_permitted_data_length .saturating_sub(UpgradeableLoaderState::size_of_programdata(0)); return Err(format!( - "New program ({program_id}) data account is too big: {required_len}.\n\ - Maximum program size: {max_program_len}.", + "New program ({program_id}) data account is too big: {required_len}.\nMaximum program \ + size: {max_program_len}.", ) .into()); } @@ -3202,7 +3205,12 @@ fn send_deploy_messages( } let connection_cache = if config.use_quic { - ConnectionCache::new_quic("connection_cache_cli_program_quic", 1) + #[cfg(feature = "dev-context-only-utils")] + let cache = + ConnectionCache::new_quic_for_tests("connection_cache_cli_program_quic", 1); + #[cfg(not(feature = "dev-context-only-utils"))] + let cache = ConnectionCache::new_quic("connection_cache_cli_program_quic", 1); + cache } else { ConnectionCache::with_udp("connection_cache_cli_program_udp", 1) }; @@ -3250,7 +3258,7 @@ fn send_deploy_messages( if !transaction_errors.is_empty() { for transaction_error in &transaction_errors { - error!("{:?}", transaction_error); + error!("{transaction_error:?}"); } return Err( format!("{} write transactions failed", transaction_errors.len()).into(), @@ -3717,8 +3725,8 @@ mod tests { "test", "program", "upgrade", - format!("{}", buffer_key).as_str(), - format!("{}", program_key).as_str(), + format!("{buffer_key}").as_str(), + format!("{program_key}").as_str(), "--skip-feature-verify", ]); assert_eq!( diff --git a/cli/src/program_v4.rs b/cli/src/program_v4.rs index 354046682904a8..2abaecc1e5fb15 100644 --- a/cli/src/program_v4.rs +++ b/cli/src/program_v4.rs @@ -169,9 +169,7 @@ impl ProgramV4SubCommands for App<'_, '_> { .value_name("PROGRAM_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help( - "Program account signer for deploying a new program", - ), + .help("Program account signer for deploying a new program"), ) .arg( Arg::with_name("program-id") @@ -186,9 +184,7 @@ impl ProgramV4SubCommands for App<'_, '_> { .value_name("BUFFER_SIGNER") .takes_value(true) .validator(is_valid_signer) - .help( - "Optional intermediate buffer account to write data to", - ), + .help("Optional intermediate buffer account to write data to"), ) .arg( Arg::with_name("authority") @@ -253,7 +249,8 @@ impl ProgramV4SubCommands for App<'_, '_> { .takes_value(true) .validator(is_valid_signer) .help( - "Current program authority [default: the default configured keypair]", + "Current program authority [default: the default configured \ + keypair]", ), ) .arg( @@ -263,9 +260,7 @@ impl ProgramV4SubCommands for App<'_, '_> { .takes_value(true) .required(true) .validator(is_valid_signer) - .help( - "New program authority", - ), + .help("New program authority"), ) .offline_args() .arg(compute_unit_price_arg()), @@ -298,7 +293,9 @@ impl ProgramV4SubCommands for App<'_, '_> { .takes_value(true) .validator(is_valid_signer) .help( - "Reserves the address and links it as the programs next-version, which is a hint that frontends can show to users", + "Reserves the address and links it as the programs \ + next-version, which is a hint that frontends can show to \ + users", ), ) .offline_args() @@ -672,12 +669,20 @@ pub fn process_deploy_program( { // Deploy new program if program_account_exists { - return Err("Program account does exist already. Did you perhaps intent to redeploy an existing program instead? Then use --program-id instead of --program-keypair.".into()); + return Err( + "Program account does exist already. Did you perhaps intent to redeploy an \ + existing program instead? Then use --program-id instead of --program-keypair." + .into(), + ); } } else { // Redeploy an existing program if !program_account_exists { - return Err("Program account does not exist. Did you perhaps intent to deploy a new program instead? Then use --program-keypair instead of --program-id.".into()); + return Err( + "Program account does not exist. Did you perhaps intent to deploy a new program \ + instead? Then use --program-keypair instead of --program-id." + .into(), + ); } } if let Some(program_account) = program_account.as_ref() { @@ -815,14 +820,13 @@ pub fn process_deploy_program( if upload_signer_index.is_none() { if upload_account.is_none() { return Err(format!( - "No ELF was provided or uploaded to the account {:?}", - upload_address, + "No ELF was provided or uploaded to the account {upload_address:?}", ) .into()); } } else { if upload_range.is_empty() { - return Err(format!("Attempting to upload empty range {:?}", upload_range).into()); + return Err(format!("Attempting to upload empty range {upload_range:?}").into()); } let first_write_message = Message::new( &[instruction::write( @@ -1208,7 +1212,11 @@ fn send_messages( if !write_messages.is_empty() { let connection_cache = if config.use_quic { - ConnectionCache::new_quic("connection_cache_cli_program_v4_quic", 1) + #[cfg(feature = "dev-context-only-utils")] + let cache = ConnectionCache::new_quic_for_tests("connection_cache_cli_program_quic", 1); + #[cfg(not(feature = "dev-context-only-utils"))] + let cache = ConnectionCache::new_quic("connection_cache_cli_program_quic", 1); + cache } else { ConnectionCache::with_udp("connection_cache_cli_program_v4_udp", 1) }; @@ -1258,7 +1266,7 @@ fn send_messages( if !transaction_errors.is_empty() { for transaction_error in &transaction_errors { - error!("{:?}", transaction_error); + error!("{transaction_error:?}"); } return Err(format!("{} write transactions failed", transaction_errors.len()).into()); } diff --git a/cli/src/spend_utils.rs b/cli/src/spend_utils.rs index b06739376c2f73..a6942808e18b92 100644 --- a/cli/src/spend_utils.rs +++ b/cli/src/spend_utils.rs @@ -9,10 +9,10 @@ use { solana_clap_utils::{ compute_budget::ComputeUnitLimit, input_parsers::lamports_of_sol, offline::SIGN_ONLY_ARG, }, + solana_cli_output::display::build_balance_message, solana_commitment_config::CommitmentConfig, solana_hash::Hash, solana_message::Message, - solana_native_token::lamports_to_sol, solana_pubkey::Pubkey, solana_rpc_client::rpc_client::RpcClient, }; @@ -162,22 +162,22 @@ where if from_pubkey == fee_pubkey { if from_balance == 0 || from_balance < spend.saturating_add(fee) { return Err(CliError::InsufficientFundsForSpendAndFee( - lamports_to_sol(spend), - lamports_to_sol(fee), + build_balance_message(spend, false, false), + build_balance_message(fee, false, false), *from_pubkey, )); } } else { if from_balance < spend { return Err(CliError::InsufficientFundsForSpend( - lamports_to_sol(spend), + build_balance_message(spend, false, false), *from_pubkey, )); } if !check_account_for_balance_with_commitment(rpc_client, fee_pubkey, fee, commitment)? { return Err(CliError::InsufficientFundsForFee( - lamports_to_sol(fee), + build_balance_message(fee, false, false), *fee_pubkey, )); } diff --git a/cli/src/stake.rs b/cli/src/stake.rs index a9667033ca7b7b..08681aecca9297 100644 --- a/cli/src/stake.rs +++ b/cli/src/stake.rs @@ -54,11 +54,11 @@ use { self as stake, error::StakeError, instruction::{self as stake_instruction, LockupArgs}, + stake_history::StakeHistory, state::{Authorized, Lockup, Meta, StakeActivationStatus, StakeAuthorize, StakeStateV2}, tools::{acceptable_reference_epoch_credits, eligible_for_deactivate_delinquent}, }, solana_system_interface::{error::SystemError, instruction as system_instruction}, - solana_sysvar::stake_history::StakeHistory, solana_transaction::Transaction, std::{ops::Deref, rc::Rc}, }; @@ -515,8 +515,8 @@ impl StakeSubCommands for App<'_, '_> { .takes_value(true) .validator(is_amount) .help( - "The rent-exempt amount to move into the new \ - stake account, in SOL. Required for offline signing.", + "The rent-exempt amount to move into the new stake account, in SOL. \ + Required for offline signing.", ), ), ) @@ -755,8 +755,8 @@ impl StakeSubCommands for App<'_, '_> { .default_value_if("with_rewards", None, "1") .requires("with_rewards") .help( - "Display rewards for NUM recent epochs, max 10 \ - [default: latest epoch only]", + "Display rewards for NUM recent epochs, max 10 [default: latest epoch \ + only]", ), ), ) @@ -1709,8 +1709,12 @@ pub fn process_deactivate_stake_account( *stake_account_pubkey }; + // DeactivateDelinquent parses a VoteState, which may change between simulation and execution let compute_unit_limit = match blockhash_query { BlockhashQuery::None(_) | BlockhashQuery::FeeCalculator(_, _) => ComputeUnitLimit::Default, + BlockhashQuery::All(_) if deactivate_delinquent => { + ComputeUnitLimit::SimulatedWithExtraPercentage(5) + } BlockhashQuery::All(_) => ComputeUnitLimit::Simulated, }; let ixs = vec![if deactivate_delinquent { @@ -2005,8 +2009,8 @@ pub fn process_split_stake( let lamports = Sol(lamports); let stake_minimum_delegation = Sol(stake_minimum_delegation); return Err(CliError::BadParameter(format!( - "need at least {stake_minimum_delegation} for minimum stake delegation, \ - provided: {lamports}" + "need at least {stake_minimum_delegation} for minimum stake delegation, provided: \ + {lamports}" )) .into()); } @@ -2019,7 +2023,8 @@ pub fn process_split_stake( owner if owner == system_program::id() => { if !account.data.is_empty() { Err(CliError::BadParameter(format!( - "Account {split_stake_account_address} has data and cannot be used to split stake" + "Account {split_stake_account_address} has data and cannot be used to \ + split stake" ))) } else { // if `stake_account`'s owner is the system_program and its data is @@ -2028,8 +2033,9 @@ pub fn process_split_stake( } } _ => Err(CliError::BadParameter(format!( - "Account {split_stake_account_address} already exists and cannot be used to split stake" - ))) + "Account {split_stake_account_address} already exists and cannot be used to \ + split stake" + ))), } }; let current_balance = @@ -2805,9 +2811,10 @@ pub fn process_delegate_stake( let recent_blockhash = blockhash_query.get_blockhash(rpc_client, config.commitment)?; + // DelegateStake parses a VoteState, which may change between simulation and execution let compute_unit_limit = match blockhash_query { BlockhashQuery::None(_) | BlockhashQuery::FeeCalculator(_, _) => ComputeUnitLimit::Default, - BlockhashQuery::All(_) => ComputeUnitLimit::Simulated, + BlockhashQuery::All(_) => ComputeUnitLimit::SimulatedWithExtraPercentage(5), }; let ixs = vec![stake_instruction::delegate_stake( stake_account_pubkey, diff --git a/cli/src/validator_info.rs b/cli/src/validator_info.rs index 9920e0b2431b69..605d4277a0bf9d 100644 --- a/cli/src/validator_info.rs +++ b/cli/src/validator_info.rs @@ -20,8 +20,10 @@ use { keypair::DefaultSigner, }, solana_cli_output::{CliValidatorInfo, CliValidatorInfoVec}, - solana_config_interface::instruction::{self as config_instruction}, - solana_config_program_client::{get_config_data, ConfigKeys}, + solana_config_interface::{ + instruction::{self as config_instruction}, + state::{get_config_data, ConfigKeys}, + }, solana_keypair::Keypair, solana_message::Message, solana_pubkey::Pubkey, @@ -130,7 +132,7 @@ fn parse_validator_info( pubkey: &Pubkey, account: &Account, ) -> Result<(Pubkey, Map), Box> { - if account.owner != solana_config_program_client::ID { + if account.owner != solana_config_interface::id() { return Err(format!("{pubkey} is not a validator info account").into()); } let key_list: ConfigKeys = deserialize(&account.data)?; @@ -303,7 +305,7 @@ pub fn process_set_validator_info( } // Check for existing validator-info account - let all_config = rpc_client.get_program_accounts(&solana_config_program_client::ID)?; + let all_config = rpc_client.get_program_accounts(&solana_config_interface::id())?; let existing_account = all_config .iter() .filter( @@ -432,7 +434,7 @@ pub fn process_get_validator_info( rpc_client.get_account(&validator_info_pubkey)?, )] } else { - let all_config = rpc_client.get_program_accounts(&solana_config_program_client::ID)?; + let all_config = rpc_client.get_program_accounts(&solana_config_interface::id())?; all_config .into_iter() .filter(|(_, validator_info_account)| { @@ -586,7 +588,7 @@ mod tests { parse_validator_info( &Pubkey::default(), &Account { - owner: solana_config_program_client::ID, + owner: solana_config_interface::id(), data, ..Account::default() } @@ -621,7 +623,7 @@ mod tests { assert!(parse_validator_info( &Pubkey::default(), &Account { - owner: solana_config_program_client::ID, + owner: solana_config_interface::id(), data, ..Account::default() }, diff --git a/cli/src/vote.rs b/cli/src/vote.rs index e3879581521f5b..a248a5aecc14c4 100644 --- a/cli/src/vote.rs +++ b/cli/src/vote.rs @@ -26,12 +26,11 @@ use { offline::*, }, solana_cli_output::{ - return_signers_with_config, CliEpochVotingHistory, CliLandedVote, CliVoteAccount, - ReturnSignersConfig, + display::build_balance_message, return_signers_with_config, CliEpochVotingHistory, + CliLandedVote, CliVoteAccount, ReturnSignersConfig, }, solana_commitment_config::CommitmentConfig, solana_message::Message, - solana_native_token::lamports_to_sol, solana_pubkey::Pubkey, solana_remote_wallet::remote_wallet::RemoteWalletManager, solana_rpc_client::rpc_client::RpcClient, @@ -42,9 +41,7 @@ use { solana_vote_program::{ vote_error::VoteError, vote_instruction::{self, withdraw, CreateVoteAccountConfig}, - vote_state::{ - VoteAuthorize, VoteInit, VoteState, VoteStateVersions, VOTE_CREDITS_MAXIMUM_PER_SLOT, - }, + vote_state::{VoteAuthorize, VoteInit, VoteStateV3, VOTE_CREDITS_MAXIMUM_PER_SLOT}, }, std::rc::Rc, }; @@ -371,8 +368,8 @@ impl VoteSubCommands for App<'_, '_> { .default_value_if("with_rewards", None, "1") .requires("with_rewards") .help( - "Display rewards for NUM recent epochs, max 10 \ - [default: latest epoch only]", + "Display rewards for NUM recent epochs, max 10 [default: latest epoch \ + only]", ), ), ) @@ -829,13 +826,13 @@ pub fn process_create_vote_account( )?; let required_balance = rpc_client - .get_minimum_balance_for_rent_exemption(VoteState::size_of())? + .get_minimum_balance_for_rent_exemption(VoteStateV3::size_of())? .max(1); let amount = SpendAmount::Some(required_balance); let fee_payer = config.signers[fee_payer]; let nonce_authority = config.signers[nonce_authority]; - let space = VoteStateVersions::vote_state_size_of(true) as u64; + let space = VoteStateV3::size_of() as u64; let compute_unit_limit = match blockhash_query { BlockhashQuery::None(_) | BlockhashQuery::FeeCalculator(_, _) => ComputeUnitLimit::Default, @@ -1261,7 +1258,7 @@ pub(crate) fn get_vote_account( rpc_client: &RpcClient, vote_account_pubkey: &Pubkey, commitment_config: CommitmentConfig, -) -> Result<(Account, VoteState), Box> { +) -> Result<(Account, VoteStateV3), Box> { let vote_account = rpc_client .get_account_with_commitment(vote_account_pubkey, commitment_config)? .value @@ -1275,7 +1272,7 @@ pub(crate) fn get_vote_account( )) .into()); } - let vote_state = VoteState::deserialize(&vote_account.data).map_err(|_| { + let vote_state = VoteStateV3::deserialize(&vote_account.data).map_err(|_| { CliError::RpcRequestError( "Account data could not be deserialized to vote state".to_string(), ) @@ -1429,14 +1426,14 @@ pub fn process_withdraw_from_vote_account( if !sign_only { let current_balance = rpc_client.get_balance(vote_account_pubkey)?; let minimum_balance = - rpc_client.get_minimum_balance_for_rent_exemption(VoteState::size_of())?; + rpc_client.get_minimum_balance_for_rent_exemption(VoteStateV3::size_of())?; if let SpendAmount::Some(withdraw_amount) = withdraw_amount { let balance_remaining = current_balance.saturating_sub(withdraw_amount); if balance_remaining < minimum_balance && balance_remaining != 0 { return Err(CliError::BadParameter(format!( "Withdraw amount too large. The vote account balance must be at least {} SOL \ to remain rent exempt", - lamports_to_sol(minimum_balance) + build_balance_message(minimum_balance, false, false) )) .into()); } diff --git a/cli/src/wallet.rs b/cli/src/wallet.rs index 32e3658fb71972..5400258c3cea2b 100644 --- a/cli/src/wallet.rs +++ b/cli/src/wallet.rs @@ -47,6 +47,30 @@ use { std::{fmt::Write as FmtWrite, fs::File, io::Write, rc::Rc, str::FromStr}, }; +// Formatted specifically for the manually-indented heredoc string +#[rustfmt::skip] +const CONFIRM_AFTER_HELP_MESSAGE: &str = + "Note: This will show more detailed information for finalized \ + transactions with verbose mode (-v/--verbose).\ + \n\ + \nAccount modes:\ + \n |srwx|\ + \n s: signed\ + \n r: readable (always true)\ + \n w: writable\ + \n x: program account (inner instructions excluded)"; + +#[rustfmt::skip] +const SEEDS_ARG_HELP_MESSAGE: &str = + "The seeds. \n\ + Each one must match the pattern PREFIX:VALUE. \n\ + PREFIX can be one of [string, pubkey, hex, u8] \n\ + or matches the pattern [u,i][16,32,64,128][le,be] \ + (for example u64le) for number values \n\ + [u,i] - represents whether the number is unsigned or signed, \n\ + [16,32,64,128] - represents the bit length, and \n\ + [le,be] - represents the byte order - little endian or big endian"; + pub trait WalletSubCommands { fn wallet_subcommands(self) -> Self; } @@ -135,19 +159,7 @@ impl WalletSubCommands for App<'_, '_> { .required(true) .help("The transaction signature to confirm"), ) - .after_help( - // Formatted specifically for the manually-indented heredoc string - "Note: This will show more detailed information for finalized \ - transactions with verbose mode (-v/--verbose).\ - \n\ - \nAccount modes:\ - \n |srwx|\ - \n s: signed\ - \n r: readable (always true)\ - \n w: writable\ - \n x: program account (inner instructions excluded)\ - ", - ), + .after_help(CONFIRM_AFTER_HELP_MESSAGE), ) .subcommand( SubCommand::with_name("create-address-with-seed") @@ -171,8 +183,8 @@ impl WalletSubCommands for App<'_, '_> { .takes_value(true) .required(true) .help( - "The program_id that the address will ultimately be used for, \n\ - or one of NONCE, STAKE, and VOTE keywords", + "The program_id that the address will ultimately be used for, or one \ + of NONCE, STAKE, and VOTE keywords", ), ) .arg(pubkey!( @@ -193,8 +205,8 @@ impl WalletSubCommands for App<'_, '_> { .takes_value(true) .required(true) .help( - "The program_id that the address will ultimately be used for, \n\ - or one of NONCE, STAKE, and VOTE keywords", + "The program_id that the address will ultimately be used for, or one \ + of NONCE, STAKE, and VOTE keywords", ), ) .arg( @@ -203,16 +215,7 @@ impl WalletSubCommands for App<'_, '_> { .value_name("SEED") .takes_value(true) .validator(is_structured_seed) - .help( - "The seeds. \n\ - Each one must match the pattern PREFIX:VALUE. \n\ - PREFIX can be one of [string, pubkey, hex, u8] \n\ - or matches the pattern [u,i][16,32,64,128][le,be] \ - (for example u64le) for number values \n\ - [u,i] - represents whether the number is unsigned or signed, \n\ - [16,32,64,128] - represents the bit length, and \n\ - [le,be] - represents the byte order - little endian or big endian", - ), + .help(SEEDS_ARG_HELP_MESSAGE), ), ) .subcommand( diff --git a/cli/tests/address_lookup_table.rs b/cli/tests/address_lookup_table.rs index 0081472d9a929c..085990a595c3b5 100644 --- a/cli/tests/address_lookup_table.rs +++ b/cli/tests/address_lookup_table.rs @@ -7,7 +7,7 @@ use { cli::{process_command, CliCommand, CliConfig}, }, solana_cli_output::{CliAddressLookupTable, CliAddressLookupTableCreated, OutputFormat}, - solana_faucet::faucet::run_local_faucet, + solana_faucet::faucet::run_local_faucet_with_unique_port_for_tests, solana_keypair::Keypair, solana_native_token::LAMPORTS_PER_SOL, solana_pubkey::Pubkey, @@ -21,7 +21,7 @@ use { fn test_cli_create_extend_and_freeze_address_lookup_table() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); - let faucet_addr = run_local_faucet(mint_keypair, None); + let faucet_addr = run_local_faucet_with_unique_port_for_tests(mint_keypair); let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); @@ -135,7 +135,7 @@ fn test_cli_create_extend_and_freeze_address_lookup_table() { fn test_cli_create_and_deactivate_address_lookup_table() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); - let faucet_addr = run_local_faucet(mint_keypair, None); + let faucet_addr = run_local_faucet_with_unique_port_for_tests(mint_keypair); let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); diff --git a/cli/tests/cluster_query.rs b/cli/tests/cluster_query.rs index b3c6f12f3a9ea3..cf5bf1f003d9e2 100644 --- a/cli/tests/cluster_query.rs +++ b/cli/tests/cluster_query.rs @@ -5,10 +5,10 @@ use { test_utils::check_ready, }, solana_commitment_config::CommitmentConfig, - solana_faucet::faucet::run_local_faucet, + solana_faucet::faucet::run_local_faucet_with_unique_port_for_tests, solana_fee_structure::FeeStructure, solana_keypair::Keypair, - solana_native_token::sol_to_lamports, + solana_native_token::LAMPORTS_PER_SOL, solana_rpc_client::rpc_client::RpcClient, solana_signer::Signer, solana_streamer::socket::SocketAddrSpace, @@ -24,7 +24,7 @@ fn test_ping(compute_unit_price: Option) { let fee = FeeStructure::default().get_max_fee(1, 0); let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); - let faucet_addr = run_local_faucet(mint_keypair, None); + let faucet_addr = run_local_faucet_with_unique_port_for_tests(mint_keypair); let test_validator = TestValidator::with_custom_fees( mint_pubkey, fee, @@ -42,9 +42,8 @@ fn test_ping(compute_unit_price: Option) { config.json_rpc_url = test_validator.rpc_url(); config.signers = vec![&default_signer]; - request_and_confirm_airdrop(&rpc_client, &config, &signer_pubkey, sol_to_lamports(1.0)) - .unwrap(); - check_balance!(sol_to_lamports(1.0), &rpc_client, &signer_pubkey); + request_and_confirm_airdrop(&rpc_client, &config, &signer_pubkey, LAMPORTS_PER_SOL).unwrap(); + check_balance!(LAMPORTS_PER_SOL, &rpc_client, &signer_pubkey); check_ready(&rpc_client); let count = 5; diff --git a/cli/tests/nonce.rs b/cli/tests/nonce.rs index 0245a8e26af492..d5188df3d09713 100644 --- a/cli/tests/nonce.rs +++ b/cli/tests/nonce.rs @@ -8,10 +8,10 @@ use { }, solana_cli_output::{parse_sign_only_reply_string, OutputFormat}, solana_commitment_config::CommitmentConfig, - solana_faucet::faucet::run_local_faucet, + solana_faucet::faucet::run_local_faucet_with_unique_port_for_tests, solana_hash::Hash, solana_keypair::{keypair_from_seed, Keypair}, - solana_native_token::sol_to_lamports, + solana_native_token::LAMPORTS_PER_SOL, solana_pubkey::Pubkey, solana_rpc_client::rpc_client::RpcClient, solana_rpc_client_nonce_utils::blockhash_query::{self, BlockhashQuery}, @@ -29,7 +29,7 @@ use { fn test_nonce(seed: Option, use_nonce_authority: bool, compute_unit_price: Option) { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); - let faucet_addr = run_local_faucet(mint_keypair, None); + let faucet_addr = run_local_faucet_with_unique_port_for_tests(mint_keypair); let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); @@ -46,11 +46,11 @@ fn test_nonce(seed: Option, use_nonce_authority: bool, compute_unit_pric &rpc_client, &config_payer, &config_payer.signers[0].pubkey(), - sol_to_lamports(2000.0), + 2000 * LAMPORTS_PER_SOL, ) .unwrap(); check_balance!( - sol_to_lamports(2000.0), + 2000 * LAMPORTS_PER_SOL, &rpc_client, &config_payer.signers[0].pubkey(), ); @@ -85,17 +85,17 @@ fn test_nonce(seed: Option, use_nonce_authority: bool, compute_unit_pric seed, nonce_authority: optional_authority, memo: None, - amount: SpendAmount::Some(sol_to_lamports(1000.0)), + amount: SpendAmount::Some(1000 * LAMPORTS_PER_SOL), compute_unit_price, }; process_command(&config_payer).unwrap(); check_balance!( - sol_to_lamports(1000.0), + 1000 * LAMPORTS_PER_SOL, &rpc_client, &config_payer.signers[0].pubkey(), ); - check_balance!(sol_to_lamports(1000.0), &rpc_client, &nonce_account); + check_balance!(1000 * LAMPORTS_PER_SOL, &rpc_client, &nonce_account); // Get nonce config_payer.signers.pop(); @@ -144,17 +144,17 @@ fn test_nonce(seed: Option, use_nonce_authority: bool, compute_unit_pric nonce_authority: index, memo: None, destination_account_pubkey: payee_pubkey, - lamports: sol_to_lamports(100.0), + lamports: 100 * LAMPORTS_PER_SOL, compute_unit_price, }; process_command(&config_payer).unwrap(); check_balance!( - sol_to_lamports(1000.0), + 1000 * LAMPORTS_PER_SOL, &rpc_client, &config_payer.signers[0].pubkey(), ); - check_balance!(sol_to_lamports(900.0), &rpc_client, &nonce_account); - check_balance!(sol_to_lamports(100.0), &rpc_client, &payee_pubkey); + check_balance!(900 * LAMPORTS_PER_SOL, &rpc_client, &nonce_account); + check_balance!(100 * LAMPORTS_PER_SOL, &rpc_client, &payee_pubkey); // Show nonce account config_payer.command = CliCommand::ShowNonceAccount { @@ -199,29 +199,29 @@ fn test_nonce(seed: Option, use_nonce_authority: bool, compute_unit_pric nonce_authority: 1, memo: None, destination_account_pubkey: payee_pubkey, - lamports: sol_to_lamports(100.0), + lamports: 100 * LAMPORTS_PER_SOL, compute_unit_price, }; process_command(&config_payer).unwrap(); check_balance!( - sol_to_lamports(1000.0), + 1000 * LAMPORTS_PER_SOL, &rpc_client, &config_payer.signers[0].pubkey(), ); - check_balance!(sol_to_lamports(800.0), &rpc_client, &nonce_account); - check_balance!(sol_to_lamports(200.0), &rpc_client, &payee_pubkey); + check_balance!(800 * LAMPORTS_PER_SOL, &rpc_client, &nonce_account); + check_balance!(200 * LAMPORTS_PER_SOL, &rpc_client, &payee_pubkey); } #[test] fn test_create_account_with_seed() { - const ONE_SIG_FEE: f64 = 0.000005; + const ONE_SIG_FEE: u64 = 5000; solana_logger::setup(); let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); - let faucet_addr = run_local_faucet(mint_keypair, None); + let faucet_addr = run_local_faucet_with_unique_port_for_tests(mint_keypair); let test_validator = TestValidator::with_custom_fees( mint_pubkey, - sol_to_lamports(ONE_SIG_FEE), + ONE_SIG_FEE, Some(faucet_addr), SocketAddrSpace::Unspecified, ); @@ -237,23 +237,23 @@ fn test_create_account_with_seed() { &rpc_client, &CliConfig::recent_for_tests(), &offline_nonce_authority_signer.pubkey(), - sol_to_lamports(42.0), + 42 * LAMPORTS_PER_SOL, ) .unwrap(); request_and_confirm_airdrop( &rpc_client, &CliConfig::recent_for_tests(), &online_nonce_creator_signer.pubkey(), - sol_to_lamports(4242.0), + 4242 * LAMPORTS_PER_SOL, ) .unwrap(); check_balance!( - sol_to_lamports(42.0), + 42 * LAMPORTS_PER_SOL, &rpc_client, &offline_nonce_authority_signer.pubkey(), ); check_balance!( - sol_to_lamports(4242.0), + 4242 * LAMPORTS_PER_SOL, &rpc_client, &online_nonce_creator_signer.pubkey(), ); @@ -277,18 +277,18 @@ fn test_create_account_with_seed() { seed: Some(seed), nonce_authority: Some(authority_pubkey), memo: None, - amount: SpendAmount::Some(sol_to_lamports(241.0)), + amount: SpendAmount::Some(241 * LAMPORTS_PER_SOL), compute_unit_price: None, }; process_command(&creator_config).unwrap(); - check_balance!(sol_to_lamports(241.0), &rpc_client, &nonce_address); + check_balance!(241 * LAMPORTS_PER_SOL, &rpc_client, &nonce_address); check_balance!( - sol_to_lamports(42.0), + 42 * LAMPORTS_PER_SOL, &rpc_client, &offline_nonce_authority_signer.pubkey(), ); check_balance!( - sol_to_lamports(4001.0 - ONE_SIG_FEE), + 4001 * LAMPORTS_PER_SOL - ONE_SIG_FEE, &rpc_client, &online_nonce_creator_signer.pubkey(), ); @@ -312,7 +312,7 @@ fn test_create_account_with_seed() { authority_config.command = CliCommand::ClusterVersion; process_command(&authority_config).unwrap_err(); authority_config.command = CliCommand::Transfer { - amount: SpendAmount::Some(sol_to_lamports(10.0)), + amount: SpendAmount::Some(10 * LAMPORTS_PER_SOL), to: to_address, from: 0, sign_only: true, @@ -339,7 +339,7 @@ fn test_create_account_with_seed() { submit_config.json_rpc_url = test_validator.rpc_url(); submit_config.signers = vec![&authority_presigner]; submit_config.command = CliCommand::Transfer { - amount: SpendAmount::Some(sol_to_lamports(10.0)), + amount: SpendAmount::Some(10 * LAMPORTS_PER_SOL), to: to_address, from: 0, sign_only: false, @@ -359,16 +359,16 @@ fn test_create_account_with_seed() { compute_unit_price: None, }; process_command(&submit_config).unwrap(); - check_balance!(sol_to_lamports(241.0), &rpc_client, &nonce_address); + check_balance!(241 * LAMPORTS_PER_SOL, &rpc_client, &nonce_address); check_balance!( - sol_to_lamports(32.0 - ONE_SIG_FEE), + 32 * LAMPORTS_PER_SOL - ONE_SIG_FEE, &rpc_client, &offline_nonce_authority_signer.pubkey(), ); check_balance!( - sol_to_lamports(4001.0 - ONE_SIG_FEE), + 4001 * LAMPORTS_PER_SOL - ONE_SIG_FEE, &rpc_client, &online_nonce_creator_signer.pubkey(), ); - check_balance!(sol_to_lamports(10.0), &rpc_client, &to_address); + check_balance!(10 * LAMPORTS_PER_SOL, &rpc_client, &to_address); } diff --git a/cli/tests/program.rs b/cli/tests/program.rs index ee7c848b8b5054..c29fdbfb0ec6d6 100644 --- a/cli/tests/program.rs +++ b/cli/tests/program.rs @@ -18,7 +18,7 @@ use { solana_client::rpc_config::RpcSendTransactionConfig, solana_commitment_config::CommitmentConfig, solana_compute_budget_interface::ComputeBudgetInstruction, - solana_faucet::faucet::run_local_faucet, + solana_faucet::faucet::run_local_faucet_with_unique_port_for_tests, solana_fee_calculator::FeeRateGovernor, solana_keypair::Keypair, solana_loader_v3_interface::state::UpgradeableLoaderState, @@ -56,7 +56,9 @@ fn test_validator_genesis(mint_keypair: Keypair) -> TestValidatorGenesis { exemption_threshold: 1.0, ..Rent::default() }) - .faucet_addr(Some(run_local_faucet(mint_keypair, None))); + .faucet_addr(Some(run_local_faucet_with_unique_port_for_tests( + mint_keypair, + ))); genesis } @@ -77,8 +79,7 @@ fn expect_command_failure(config: &CliConfig, should_fail_because: &str, error_e let error_actual = error_actual.to_string(); assert!( error_expected == error_actual, - "Command failed as expected, but with an unexpected error.\n\ - Expected: {error_expected}\n\ + "Command failed as expected, but with an unexpected error. Expected: {error_expected}, \ Actual: {error_actual}", ); } @@ -89,9 +90,8 @@ fn expect_account_absent(rpc_client: &RpcClient, pubkey: Pubkey, absent_because: let error_actual = error_actual.to_string(); assert!( format!("AccountNotFound: pubkey={pubkey}") == error_actual, - "Failed to retrieve an account details.\n\ - Expected account to be absent, but got a different error:\n\ - {error_actual}", + "Failed to retrieve an account details. Expected account to be absent, but got a \ + different error: {error_actual}", ); } @@ -476,10 +476,11 @@ fn test_cli_program_deploy_feature(enable_feature: bool, skip_preflight: bool) { assert!(res.is_ok()); } else { expect_command_failure( - &config, - "Program contains a syscall from a deactivated feature", - "ELF error: ELF error: Unresolved symbol (sol_alt_bn128_group_op) at instruction #49 (ELF file offset 0x188)" - ); + &config, + "Program contains a syscall from a deactivated feature", + "ELF error: ELF error: Unresolved symbol (sol_alt_bn128_group_op) at instruction #49 \ + (ELF file offset 0x188)", + ); // If we bypass the verification, there should be no error config.command = CliCommand::Program(ProgramCliCommand::Deploy { @@ -654,7 +655,12 @@ fn test_cli_program_upgrade_with_feature(enable_feature: bool) { expect_command_failure( &config, "Program contains a syscall to a disabled feature", - format!("Buffer account {} has invalid program data: \"ELF error: ELF error: Unresolved symbol (sol_alt_bn128_group_op) at instruction #49 (ELF file offset 0x188)\"", buffer_signer.pubkey()).as_str(), + format!( + "Buffer account {} has invalid program data: \"ELF error: ELF error: Unresolved \ + symbol (sol_alt_bn128_group_op) at instruction #49 (ELF file offset 0x188)\"", + buffer_signer.pubkey() + ) + .as_str(), ); // If we skip verification, the failure should be at a later stage @@ -1176,20 +1182,25 @@ fn test_cli_program_upgrade_auto_extend(skip_preflight: bool) { expect_command_failure( &config, "Cannot upgrade a program when ELF does not fit into the allocated data account", - "Deploying program failed: Error processing Instruction 0: account data too small for instruction", + "Deploying program failed: Error processing Instruction 0: account data too small for \ + instruction", ); } else { + #[rustfmt::skip] + let expected_error = + "Deploying program failed: \ + RPC response error -32002: \ + Transaction simulation failed: \ + Error processing Instruction 0: \ + account data too small for instruction; 3 log messages:\n \ + Program BPFLoaderUpgradeab1e11111111111111111111111 invoke [1]\n \ + ProgramData account not large enough\n \ + Program BPFLoaderUpgradeab1e11111111111111111111111 failed: account data too small \ + for instruction\n"; expect_command_failure( &config, "Can not upgrade a program when ELF does not fit into the allocated data account", - "Deploying program failed: \ - RPC response error -32002: \ - Transaction simulation failed: \ - Error processing Instruction 0: \ - account data too small for instruction; 3 log messages:\n \ - Program BPFLoaderUpgradeab1e11111111111111111111111 invoke [1]\n \ - ProgramData account not large enough\n \ - Program BPFLoaderUpgradeab1e11111111111111111111111 failed: account data too small for instruction\n", + expected_error, ); } @@ -1479,9 +1490,8 @@ fn test_cli_program_extend_program() { skip_feature_verification: true, }); - expect_command_failure( - &config, - "Program upgrade must fail, as the buffer is 1 byte too short", + #[rustfmt::skip] + let expected_error = "Deploying program failed: \ RPC response error -32002: \ Transaction simulation failed: \ @@ -1489,7 +1499,12 @@ fn test_cli_program_extend_program() { account data too small for instruction; 3 log messages:\n \ Program BPFLoaderUpgradeab1e11111111111111111111111 invoke [1]\n \ ProgramData account not large enough\n \ - Program BPFLoaderUpgradeab1e11111111111111111111111 failed: account data too small for instruction\n", + Program BPFLoaderUpgradeab1e11111111111111111111111 failed: account data too small for \ + instruction\n"; + expect_command_failure( + &config, + "Program upgrade must fail, as the buffer is 1 byte too short", + expected_error, ); // Wait one slot to avoid "Program was deployed in this block already" error @@ -1989,8 +2004,8 @@ fn test_cli_program_write_buffer() { &config, "It should not be possible to deploy a program into an account that is too small", &format!( - "Buffer account data size ({}) is smaller than the minimum size ({})", - buffer_account_len, min_buffer_account_len + "Buffer account data size ({buffer_account_len}) is smaller than the minimum size \ + ({min_buffer_account_len})" ), ); } @@ -2065,7 +2080,8 @@ fn test_cli_program_write_buffer_feature(enable_feature: bool) { expect_command_failure( &config, "Program contains a syscall from a deactivated feature", - "ELF error: ELF error: Unresolved symbol (sol_alt_bn128_group_op) at instruction #49 (ELF file offset 0x188)" + "ELF error: ELF error: Unresolved symbol (sol_alt_bn128_group_op) at instruction #49 \ + (ELF file offset 0x188)", ); // If we bypass the verification, there should be no error @@ -2915,7 +2931,7 @@ fn test_cli_program_deploy_with_args(compute_unit_price: Option, use_rpc: b let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); - let faucet_addr = run_local_faucet(mint_keypair, None); + let faucet_addr = run_local_faucet_with_unique_port_for_tests(mint_keypair); let test_validator = TestValidatorGenesis::default() .fee_rate_governor(FeeRateGovernor::new(0, 0)) .rent(Rent { diff --git a/cli/tests/request_airdrop.rs b/cli/tests/request_airdrop.rs index ed93fdc6401b96..78e17955cfe48e 100644 --- a/cli/tests/request_airdrop.rs +++ b/cli/tests/request_airdrop.rs @@ -2,9 +2,9 @@ use { solana_cli::cli::{process_command, CliCommand, CliConfig}, solana_commitment_config::CommitmentConfig, - solana_faucet::faucet::run_local_faucet, + solana_faucet::faucet::run_local_faucet_with_unique_port_for_tests, solana_keypair::Keypair, - solana_native_token::sol_to_lamports, + solana_native_token::LAMPORTS_PER_SOL, solana_rpc_client::rpc_client::RpcClient, solana_signer::Signer, solana_streamer::socket::SocketAddrSpace, @@ -15,7 +15,7 @@ use { fn test_cli_request_airdrop() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); - let faucet_addr = run_local_faucet(mint_keypair, None); + let faucet_addr = run_local_faucet_with_unique_port_for_tests(mint_keypair); let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); @@ -23,7 +23,7 @@ fn test_cli_request_airdrop() { bob_config.json_rpc_url = test_validator.rpc_url(); bob_config.command = CliCommand::Airdrop { pubkey: None, - lamports: sol_to_lamports(50.0), + lamports: 50 * LAMPORTS_PER_SOL, }; let keypair = Keypair::new(); bob_config.signers = vec![&keypair]; @@ -37,5 +37,5 @@ fn test_cli_request_airdrop() { let balance = rpc_client .get_balance(&bob_config.signers[0].pubkey()) .unwrap(); - assert_eq!(balance, sol_to_lamports(50.0)); + assert_eq!(balance, 50 * LAMPORTS_PER_SOL); } diff --git a/cli/tests/stake.rs b/cli/tests/stake.rs index 25fd1a2e733697..08d89bc5c9cb34 100644 --- a/cli/tests/stake.rs +++ b/cli/tests/stake.rs @@ -12,11 +12,11 @@ use { solana_cli_output::{parse_sign_only_reply_string, OutputFormat}, solana_commitment_config::CommitmentConfig, solana_epoch_schedule::EpochSchedule, - solana_faucet::faucet::run_local_faucet, + solana_faucet::faucet::run_local_faucet_with_unique_port_for_tests, solana_fee_calculator::FeeRateGovernor, solana_fee_structure::FeeStructure, solana_keypair::{keypair_from_seed, Keypair}, - solana_native_token::sol_to_lamports, + solana_native_token::LAMPORTS_PER_SOL, solana_nonce::state::State as NonceState, solana_pubkey::Pubkey, solana_rent::Rent, @@ -39,7 +39,7 @@ fn test_stake_delegation_force() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); let authorized_withdrawer = Keypair::new().pubkey(); - let faucet_addr = run_local_faucet(mint_keypair, None); + let faucet_addr = run_local_faucet_with_unique_port_for_tests(mint_keypair); let slots_per_epoch = 32; let test_validator = TestValidatorGenesis::default() .fee_rate_governor(FeeRateGovernor::new(0, 0)) @@ -204,7 +204,7 @@ fn test_seed_stake_delegation_and_deactivation(compute_unit_price: Option) let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); - let faucet_addr = run_local_faucet(mint_keypair, None); + let faucet_addr = run_local_faucet_with_unique_port_for_tests(mint_keypair); let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); @@ -299,7 +299,7 @@ fn test_stake_delegation_and_withdraw_available() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); - let faucet_addr = run_local_faucet(mint_keypair, None); + let faucet_addr = run_local_faucet_with_unique_port_for_tests(mint_keypair); let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); @@ -318,7 +318,7 @@ fn test_stake_delegation_and_withdraw_available() { &rpc_client, &config_validator, &config_validator.signers[0].pubkey(), - sol_to_lamports(100.0), + 100 * LAMPORTS_PER_SOL, ) .unwrap(); check_balance!( @@ -336,7 +336,7 @@ fn test_stake_delegation_and_withdraw_available() { withdrawer: None, withdrawer_signer: None, lockup: Lockup::default(), - amount: SpendAmount::Some(sol_to_lamports(50.0)), + amount: SpendAmount::Some(50 * LAMPORTS_PER_SOL), sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), @@ -395,10 +395,10 @@ fn test_stake_delegation_and_withdraw_available() { &rpc_client, &config_validator, &stake_keypair.pubkey(), - sol_to_lamports(5.0), + 5 * LAMPORTS_PER_SOL, ) .unwrap(); - check_balance!(sol_to_lamports(55.0), &rpc_client, &stake_keypair.pubkey()); + check_balance!(55 * LAMPORTS_PER_SOL, &rpc_client, &stake_keypair.pubkey()); // Withdraw available stake config_validator.signers = vec![&validator_keypair]; @@ -420,7 +420,7 @@ fn test_stake_delegation_and_withdraw_available() { }; process_command(&config_validator).unwrap(); // Extra (inactive) SOL is withdrawn - check_balance!(sol_to_lamports(5.0), &rpc_client, &recipient_pubkey); + check_balance!(5 * LAMPORTS_PER_SOL, &rpc_client, &recipient_pubkey); // Deactivate stake config_validator.command = CliCommand::DeactivateStake { @@ -459,7 +459,7 @@ fn test_stake_delegation_and_withdraw_available() { }; process_command(&config_validator).unwrap(); // Complete balance is withdrawn because all stake is inactive - check_balance!(sol_to_lamports(55.0), &rpc_client, &recipient_pubkey); + check_balance!(55 * LAMPORTS_PER_SOL, &rpc_client, &recipient_pubkey); } #[test] @@ -468,7 +468,7 @@ fn test_stake_delegation_and_withdraw_all() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); - let faucet_addr = run_local_faucet(mint_keypair, None); + let faucet_addr = run_local_faucet_with_unique_port_for_tests(mint_keypair); let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); @@ -487,7 +487,7 @@ fn test_stake_delegation_and_withdraw_all() { &rpc_client, &config_validator, &config_validator.signers[0].pubkey(), - sol_to_lamports(100.0), + 100 * LAMPORTS_PER_SOL, ) .unwrap(); check_balance!( @@ -505,7 +505,7 @@ fn test_stake_delegation_and_withdraw_all() { withdrawer: None, withdrawer_signer: None, lockup: Lockup::default(), - amount: SpendAmount::Some(sol_to_lamports(50.0)), + amount: SpendAmount::Some(50 * LAMPORTS_PER_SOL), sign_only: false, dump_transaction_message: false, blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), @@ -561,10 +561,10 @@ fn test_stake_delegation_and_withdraw_all() { &rpc_client, &config_validator, &stake_keypair.pubkey(), - sol_to_lamports(5.0), + 5 * LAMPORTS_PER_SOL, ) .unwrap(); - check_balance!(sol_to_lamports(55.0), &rpc_client, &stake_keypair.pubkey()); + check_balance!(55 * LAMPORTS_PER_SOL, &rpc_client, &stake_keypair.pubkey()); // Withdraw all stake still fails, because it attempts to withdraw both // activating and inactive stake @@ -623,7 +623,7 @@ fn test_stake_delegation_and_withdraw_all() { compute_unit_price: None, }; process_command(&config_validator).unwrap(); - check_balance!(sol_to_lamports(55.0), &rpc_client, &recipient_pubkey); + check_balance!(55 * LAMPORTS_PER_SOL, &rpc_client, &recipient_pubkey); } #[test_case(None; "base")] @@ -633,7 +633,7 @@ fn test_stake_delegation_and_deactivation(compute_unit_price: Option) { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); - let faucet_addr = run_local_faucet(mint_keypair, None); + let faucet_addr = run_local_faucet_with_unique_port_for_tests(mint_keypair); let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); @@ -725,7 +725,7 @@ fn test_offline_stake_delegation_and_deactivation(compute_unit_price: Option let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); - let faucet_addr = run_local_faucet(mint_keypair, None); + let faucet_addr = run_local_faucet_with_unique_port_for_tests(mint_keypair); let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); @@ -1014,7 +1014,7 @@ fn test_stake_authorize(compute_unit_price: Option) { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); - let faucet_addr = run_local_faucet(mint_keypair, None); + let faucet_addr = run_local_faucet_with_unique_port_for_tests(mint_keypair); let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); @@ -1344,7 +1344,7 @@ fn test_stake_authorize_with_fee_payer() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); - let faucet_addr = run_local_faucet(mint_keypair, None); + let faucet_addr = run_local_faucet_with_unique_port_for_tests(mint_keypair); let test_validator = TestValidator::with_custom_fees( mint_pubkey, fee_one_sig, @@ -1524,7 +1524,7 @@ fn test_stake_split(compute_unit_price: Option) { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); - let faucet_addr = run_local_faucet(mint_keypair, None); + let faucet_addr = run_local_faucet_with_unique_port_for_tests(mint_keypair); let test_validator = TestValidator::with_custom_fees( mint_pubkey, 1, @@ -1685,7 +1685,7 @@ fn test_stake_set_lockup(compute_unit_price: Option) { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); - let faucet_addr = run_local_faucet(mint_keypair, None); + let faucet_addr = run_local_faucet_with_unique_port_for_tests(mint_keypair); let test_validator = TestValidator::with_custom_fees( mint_pubkey, 1, @@ -1973,7 +1973,7 @@ fn test_offline_nonced_create_stake_account_and_withdraw(compute_unit_price: Opt let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); - let faucet_addr = run_local_faucet(mint_keypair, None); + let faucet_addr = run_local_faucet_with_unique_port_for_tests(mint_keypair); let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); @@ -2216,7 +2216,7 @@ fn test_stake_checked_instructions() { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); - let faucet_addr = run_local_faucet(mint_keypair, None); + let faucet_addr = run_local_faucet_with_unique_port_for_tests(mint_keypair); let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); diff --git a/cli/tests/transfer.rs b/cli/tests/transfer.rs index 5badc4e015620a..694f8ace916dce 100644 --- a/cli/tests/transfer.rs +++ b/cli/tests/transfer.rs @@ -9,11 +9,11 @@ use { solana_cli_output::{parse_sign_only_reply_string, OutputFormat}, solana_commitment_config::CommitmentConfig, solana_compute_budget_interface::ComputeBudgetInstruction, - solana_faucet::faucet::run_local_faucet, + solana_faucet::faucet::run_local_faucet_with_unique_port_for_tests, solana_fee_structure::FeeStructure, solana_keypair::{keypair_from_seed, Keypair}, solana_message::Message, - solana_native_token::sol_to_lamports, + solana_native_token::LAMPORTS_PER_SOL, solana_nonce::state::State as NonceState, solana_pubkey::Pubkey, solana_rpc_client::rpc_client::RpcClient, @@ -34,7 +34,7 @@ fn test_transfer(skip_preflight: bool) { let fee_two_sig = FeeStructure::default().get_max_fee(2, 0); let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); - let faucet_addr = run_local_faucet(mint_keypair, None); + let faucet_addr = run_local_faucet_with_unique_port_for_tests(mint_keypair); let test_validator = TestValidator::with_custom_fees( mint_pubkey, fee_one_sig, @@ -56,16 +56,16 @@ fn test_transfer(skip_preflight: bool) { let sender_pubkey = config.signers[0].pubkey(); let recipient_pubkey = Pubkey::from([1u8; 32]); - request_and_confirm_airdrop(&rpc_client, &config, &sender_pubkey, sol_to_lamports(5.0)) + request_and_confirm_airdrop(&rpc_client, &config, &sender_pubkey, 5 * LAMPORTS_PER_SOL) .unwrap(); - check_balance!(sol_to_lamports(5.0), &rpc_client, &sender_pubkey); + check_balance!(5 * LAMPORTS_PER_SOL, &rpc_client, &sender_pubkey); check_balance!(0, &rpc_client, &recipient_pubkey); check_ready(&rpc_client); // Plain ole transfer config.command = CliCommand::Transfer { - amount: SpendAmount::Some(sol_to_lamports(1.0)), + amount: SpendAmount::Some(LAMPORTS_PER_SOL), to: recipient_pubkey, from: 0, sign_only: false, @@ -83,15 +83,15 @@ fn test_transfer(skip_preflight: bool) { }; process_command(&config).unwrap(); check_balance!( - sol_to_lamports(4.0) - fee_one_sig, + 4 * LAMPORTS_PER_SOL - fee_one_sig, &rpc_client, &sender_pubkey ); - check_balance!(sol_to_lamports(1.0), &rpc_client, &recipient_pubkey); + check_balance!(LAMPORTS_PER_SOL, &rpc_client, &recipient_pubkey); // Plain ole transfer, failure due to InsufficientFundsForSpendAndFee config.command = CliCommand::Transfer { - amount: SpendAmount::Some(sol_to_lamports(4.0)), + amount: SpendAmount::Some(4 * LAMPORTS_PER_SOL), to: recipient_pubkey, from: 0, sign_only: false, @@ -109,11 +109,11 @@ fn test_transfer(skip_preflight: bool) { }; assert!(process_command(&config).is_err()); check_balance!( - sol_to_lamports(4.0) - fee_one_sig, + 4 * LAMPORTS_PER_SOL - fee_one_sig, &rpc_client, &sender_pubkey ); - check_balance!(sol_to_lamports(1.0), &rpc_client, &recipient_pubkey); + check_balance!(LAMPORTS_PER_SOL, &rpc_client, &recipient_pubkey); let mut offline = CliConfig::recent_for_tests(); offline.json_rpc_url = String::default(); @@ -123,14 +123,13 @@ fn test_transfer(skip_preflight: bool) { process_command(&offline).unwrap_err(); let offline_pubkey = offline.signers[0].pubkey(); - request_and_confirm_airdrop(&rpc_client, &offline, &offline_pubkey, sol_to_lamports(1.0)) - .unwrap(); - check_balance!(sol_to_lamports(1.0), &rpc_client, &offline_pubkey); + request_and_confirm_airdrop(&rpc_client, &offline, &offline_pubkey, LAMPORTS_PER_SOL).unwrap(); + check_balance!(LAMPORTS_PER_SOL, &rpc_client, &offline_pubkey); // Offline transfer let blockhash = rpc_client.get_latest_blockhash().unwrap(); offline.command = CliCommand::Transfer { - amount: SpendAmount::Some(sol_to_lamports(0.5)), + amount: SpendAmount::Some(LAMPORTS_PER_SOL / 2), to: recipient_pubkey, from: 0, sign_only: true, @@ -153,7 +152,7 @@ fn test_transfer(skip_preflight: bool) { let offline_presigner = sign_only.presigner_of(&offline_pubkey).unwrap(); config.signers = vec![&offline_presigner]; config.command = CliCommand::Transfer { - amount: SpendAmount::Some(sol_to_lamports(0.5)), + amount: SpendAmount::Some(LAMPORTS_PER_SOL / 2), to: recipient_pubkey, from: 0, sign_only: false, @@ -171,11 +170,11 @@ fn test_transfer(skip_preflight: bool) { }; process_command(&config).unwrap(); check_balance!( - sol_to_lamports(0.5) - fee_one_sig, + LAMPORTS_PER_SOL / 2 - fee_one_sig, &rpc_client, &offline_pubkey ); - check_balance!(sol_to_lamports(1.5), &rpc_client, &recipient_pubkey); + check_balance!(1_500_000_000, &rpc_client, &recipient_pubkey); // Create nonce account let nonce_account = keypair_from_seed(&[3u8; 32]).unwrap(); @@ -193,7 +192,7 @@ fn test_transfer(skip_preflight: bool) { }; process_command(&config).unwrap(); check_balance!( - sol_to_lamports(4.0) - fee_one_sig - fee_two_sig - minimum_nonce_balance, + 4 * LAMPORTS_PER_SOL - fee_one_sig - fee_two_sig - minimum_nonce_balance, &rpc_client, &sender_pubkey, ); @@ -211,7 +210,7 @@ fn test_transfer(skip_preflight: bool) { // Nonced transfer config.signers = vec![&default_signer]; config.command = CliCommand::Transfer { - amount: SpendAmount::Some(sol_to_lamports(1.0)), + amount: SpendAmount::Some(LAMPORTS_PER_SOL), to: recipient_pubkey, from: 0, sign_only: false, @@ -232,11 +231,11 @@ fn test_transfer(skip_preflight: bool) { }; process_command(&config).unwrap(); check_balance!( - sol_to_lamports(3.0) - 2 * fee_one_sig - fee_two_sig - minimum_nonce_balance, + 3 * LAMPORTS_PER_SOL - 2 * fee_one_sig - fee_two_sig - minimum_nonce_balance, &rpc_client, &sender_pubkey, ); - check_balance!(sol_to_lamports(2.5), &rpc_client, &recipient_pubkey); + check_balance!(2_500_000_000, &rpc_client, &recipient_pubkey); let new_nonce_hash = solana_rpc_client_nonce_utils::get_account_with_commitment( &rpc_client, &nonce_account.pubkey(), @@ -258,7 +257,7 @@ fn test_transfer(skip_preflight: bool) { }; process_command(&config).unwrap(); check_balance!( - sol_to_lamports(3.0) - 3 * fee_one_sig - fee_two_sig - minimum_nonce_balance, + 3 * LAMPORTS_PER_SOL - 3 * fee_one_sig - fee_two_sig - minimum_nonce_balance, &rpc_client, &sender_pubkey, ); @@ -276,7 +275,7 @@ fn test_transfer(skip_preflight: bool) { // Offline, nonced transfer offline.signers = vec![&default_offline_signer]; offline.command = CliCommand::Transfer { - amount: SpendAmount::Some(sol_to_lamports(0.4)), + amount: SpendAmount::Some(400_000_000), to: recipient_pubkey, from: 0, sign_only: true, @@ -298,7 +297,7 @@ fn test_transfer(skip_preflight: bool) { let offline_presigner = sign_only.presigner_of(&offline_pubkey).unwrap(); config.signers = vec![&offline_presigner]; config.command = CliCommand::Transfer { - amount: SpendAmount::Some(sol_to_lamports(0.4)), + amount: SpendAmount::Some(400_000_000), to: recipient_pubkey, from: 0, sign_only: false, @@ -319,11 +318,11 @@ fn test_transfer(skip_preflight: bool) { }; process_command(&config).unwrap(); check_balance!( - sol_to_lamports(0.1) - 2 * fee_one_sig, + LAMPORTS_PER_SOL / 10 - 2 * fee_one_sig, &rpc_client, &offline_pubkey ); - check_balance!(sol_to_lamports(2.9), &rpc_client, &recipient_pubkey); + check_balance!(2_900_000_000, &rpc_client, &recipient_pubkey); } #[test] @@ -333,7 +332,7 @@ fn test_transfer_multisession_signing() { let fee_two_sig = FeeStructure::default().get_max_fee(2, 0); let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); - let faucet_addr = run_local_faucet(mint_keypair, None); + let faucet_addr = run_local_faucet_with_unique_port_for_tests(mint_keypair); let test_validator = TestValidator::with_custom_fees( mint_pubkey, fee_one_sig, @@ -353,23 +352,23 @@ fn test_transfer_multisession_signing() { &rpc_client, &CliConfig::recent_for_tests(), &offline_from_signer.pubkey(), - sol_to_lamports(43.0), + 43 * LAMPORTS_PER_SOL, ) .unwrap(); request_and_confirm_airdrop( &rpc_client, &CliConfig::recent_for_tests(), &offline_fee_payer_signer.pubkey(), - sol_to_lamports(1.0) + 2 * fee_two_sig, + LAMPORTS_PER_SOL + 2 * fee_two_sig, ) .unwrap(); check_balance!( - sol_to_lamports(43.0), + 43 * LAMPORTS_PER_SOL, &rpc_client, &offline_from_signer.pubkey(), ); check_balance!( - sol_to_lamports(1.0) + 2 * fee_two_sig, + LAMPORTS_PER_SOL + 2 * fee_two_sig, &rpc_client, &offline_fee_payer_signer.pubkey(), ); @@ -387,7 +386,7 @@ fn test_transfer_multisession_signing() { fee_payer_config.command = CliCommand::ClusterVersion; process_command(&fee_payer_config).unwrap_err(); fee_payer_config.command = CliCommand::Transfer { - amount: SpendAmount::Some(sol_to_lamports(42.0)), + amount: SpendAmount::Some(42 * LAMPORTS_PER_SOL), to: to_pubkey, from: 1, sign_only: true, @@ -419,7 +418,7 @@ fn test_transfer_multisession_signing() { from_config.command = CliCommand::ClusterVersion; process_command(&from_config).unwrap_err(); from_config.command = CliCommand::Transfer { - amount: SpendAmount::Some(sol_to_lamports(42.0)), + amount: SpendAmount::Some(42 * LAMPORTS_PER_SOL), to: to_pubkey, from: 1, sign_only: true, @@ -448,7 +447,7 @@ fn test_transfer_multisession_signing() { config.json_rpc_url = test_validator.rpc_url(); config.signers = vec![&fee_payer_presigner, &from_presigner]; config.command = CliCommand::Transfer { - amount: SpendAmount::Some(sol_to_lamports(42.0)), + amount: SpendAmount::Some(42 * LAMPORTS_PER_SOL), to: to_pubkey, from: 1, sign_only: false, @@ -466,17 +465,13 @@ fn test_transfer_multisession_signing() { }; process_command(&config).unwrap(); + check_balance!(LAMPORTS_PER_SOL, &rpc_client, &offline_from_signer.pubkey(),); check_balance!( - sol_to_lamports(1.0), - &rpc_client, - &offline_from_signer.pubkey(), - ); - check_balance!( - sol_to_lamports(1.0) + fee_two_sig, + LAMPORTS_PER_SOL + fee_two_sig, &rpc_client, &offline_fee_payer_signer.pubkey(), ); - check_balance!(sol_to_lamports(42.0), &rpc_client, &to_pubkey); + check_balance!(42 * LAMPORTS_PER_SOL, &rpc_client, &to_pubkey); } #[test_case(None; "default")] @@ -486,7 +481,7 @@ fn test_transfer_all(compute_unit_price: Option) { let lamports_per_signature = FeeStructure::default().get_max_fee(1, 0); let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); - let faucet_addr = run_local_faucet(mint_keypair, None); + let faucet_addr = run_local_faucet_with_unique_port_for_tests(mint_keypair); let test_validator = TestValidator::with_custom_fees( mint_pubkey, lamports_per_signature, @@ -563,7 +558,7 @@ fn test_transfer_unfunded_recipient() { solana_logger::setup(); let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); - let faucet_addr = run_local_faucet(mint_keypair, None); + let faucet_addr = run_local_faucet_with_unique_port_for_tests(mint_keypair); let test_validator = TestValidator::with_custom_fees( mint_pubkey, 1, @@ -619,7 +614,7 @@ fn test_transfer_with_seed() { let fee = FeeStructure::default().get_max_fee(1, 0); let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); - let faucet_addr = run_local_faucet(mint_keypair, None); + let faucet_addr = run_local_faucet_with_unique_port_for_tests(mint_keypair); let test_validator = TestValidator::with_custom_fees( mint_pubkey, fee, @@ -647,19 +642,18 @@ fn test_transfer_with_seed() { ) .unwrap(); - request_and_confirm_airdrop(&rpc_client, &config, &sender_pubkey, sol_to_lamports(1.0)) - .unwrap(); - request_and_confirm_airdrop(&rpc_client, &config, &derived_address, sol_to_lamports(5.0)) + request_and_confirm_airdrop(&rpc_client, &config, &sender_pubkey, LAMPORTS_PER_SOL).unwrap(); + request_and_confirm_airdrop(&rpc_client, &config, &derived_address, 5 * LAMPORTS_PER_SOL) .unwrap(); - check_balance!(sol_to_lamports(1.0), &rpc_client, &sender_pubkey); - check_balance!(sol_to_lamports(5.0), &rpc_client, &derived_address); + check_balance!(LAMPORTS_PER_SOL, &rpc_client, &sender_pubkey); + check_balance!(5 * LAMPORTS_PER_SOL, &rpc_client, &derived_address); check_balance!(0, &rpc_client, &recipient_pubkey); check_ready(&rpc_client); // Transfer with seed config.command = CliCommand::Transfer { - amount: SpendAmount::Some(sol_to_lamports(5.0)), + amount: SpendAmount::Some(5 * LAMPORTS_PER_SOL), to: recipient_pubkey, from: 0, sign_only: false, @@ -676,7 +670,7 @@ fn test_transfer_with_seed() { compute_unit_price: None, }; process_command(&config).unwrap(); - check_balance!(sol_to_lamports(1.0) - fee, &rpc_client, &sender_pubkey); - check_balance!(sol_to_lamports(5.0), &rpc_client, &recipient_pubkey); + check_balance!(LAMPORTS_PER_SOL - fee, &rpc_client, &sender_pubkey); + check_balance!(5 * LAMPORTS_PER_SOL, &rpc_client, &recipient_pubkey); check_balance!(0, &rpc_client, &derived_address); } diff --git a/cli/tests/validator_info.rs b/cli/tests/validator_info.rs index a0e8d7cae09c5b..b693b9d09cef83 100644 --- a/cli/tests/validator_info.rs +++ b/cli/tests/validator_info.rs @@ -5,7 +5,7 @@ use { cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig}, }, solana_commitment_config::CommitmentConfig, - solana_faucet::faucet::run_local_faucet, + solana_faucet::faucet::run_local_faucet_with_unique_port_for_tests, solana_keypair::{keypair_from_seed, Keypair}, solana_rpc_client::rpc_client::RpcClient, solana_signer::Signer, @@ -21,7 +21,7 @@ fn test_publish(compute_unit_price: Option) { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); - let faucet_addr = run_local_faucet(mint_keypair, None); + let faucet_addr = run_local_faucet_with_unique_port_for_tests(mint_keypair); let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); diff --git a/cli/tests/vote.rs b/cli/tests/vote.rs index c490be1a5fb2c2..6f9b09bb1c25d1 100644 --- a/cli/tests/vote.rs +++ b/cli/tests/vote.rs @@ -1,6 +1,6 @@ #![allow(clippy::arithmetic_side_effects)] use { - solana_account::state_traits::StateMut, + solana_account::ReadableAccount, solana_cli::{ check_balance, cli::{process_command, request_and_confirm_airdrop, CliCommand, CliConfig}, @@ -8,14 +8,14 @@ use { }, solana_cli_output::{parse_sign_only_reply_string, OutputFormat}, solana_commitment_config::CommitmentConfig, - solana_faucet::faucet::run_local_faucet, + solana_faucet::faucet::run_local_faucet_with_unique_port_for_tests, solana_keypair::Keypair, solana_rpc_client::rpc_client::RpcClient, solana_rpc_client_nonce_utils::blockhash_query::{self, BlockhashQuery}, solana_signer::{null_signer::NullSigner, Signer}, solana_streamer::socket::SocketAddrSpace, solana_test_validator::TestValidator, - solana_vote_program::vote_state::{VoteAuthorize, VoteStateV3, VoteStateVersions}, + solana_vote_program::vote_state::{VoteAuthorize, VoteStateV3}, test_case::test_case, }; @@ -24,7 +24,7 @@ use { fn test_vote_authorize_and_withdraw(compute_unit_price: Option) { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); - let faucet_addr = run_local_faucet(mint_keypair, None); + let faucet_addr = run_local_faucet_with_unique_port_for_tests(mint_keypair); let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); @@ -63,8 +63,8 @@ fn test_vote_authorize_and_withdraw(compute_unit_price: Option) { let vote_account = rpc_client .get_account(&vote_account_keypair.pubkey()) .unwrap(); - let vote_state: VoteStateVersions = vote_account.state().unwrap(); - let authorized_withdrawer = vote_state.convert_to_current().authorized_withdrawer; + let vote_state = VoteStateV3::deserialize(vote_account.data()).unwrap(); + let authorized_withdrawer = vote_state.authorized_withdrawer; assert_eq!(authorized_withdrawer, config.signers[0].pubkey()); let expected_balance = rpc_client .get_minimum_balance_for_rent_exemption(VoteStateV3::size_of()) @@ -117,8 +117,8 @@ fn test_vote_authorize_and_withdraw(compute_unit_price: Option) { let vote_account = rpc_client .get_account(&vote_account_keypair.pubkey()) .unwrap(); - let vote_state: VoteStateVersions = vote_account.state().unwrap(); - let authorized_withdrawer = vote_state.convert_to_current().authorized_withdrawer; + let vote_state = VoteStateV3::deserialize(vote_account.data()).unwrap(); + let authorized_withdrawer = vote_state.authorized_withdrawer; assert_eq!(authorized_withdrawer, first_withdraw_authority.pubkey()); // Authorize vote account withdrawal to another signer with checked instruction @@ -164,8 +164,8 @@ fn test_vote_authorize_and_withdraw(compute_unit_price: Option) { let vote_account = rpc_client .get_account(&vote_account_keypair.pubkey()) .unwrap(); - let vote_state: VoteStateVersions = vote_account.state().unwrap(); - let authorized_withdrawer = vote_state.convert_to_current().authorized_withdrawer; + let vote_state = VoteStateV3::deserialize(vote_account.data()).unwrap(); + let authorized_withdrawer = vote_state.authorized_withdrawer; assert_eq!(authorized_withdrawer, withdraw_authority.pubkey()); // Withdraw from vote account @@ -229,7 +229,7 @@ fn test_vote_authorize_and_withdraw(compute_unit_price: Option) { fn test_offline_vote_authorize_and_withdraw(compute_unit_price: Option) { let mint_keypair = Keypair::new(); let mint_pubkey = mint_keypair.pubkey(); - let faucet_addr = run_local_faucet(mint_keypair, None); + let faucet_addr = run_local_faucet_with_unique_port_for_tests(mint_keypair); let test_validator = TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); @@ -291,8 +291,8 @@ fn test_offline_vote_authorize_and_withdraw(compute_unit_price: Option) { let vote_account = rpc_client .get_account(&vote_account_keypair.pubkey()) .unwrap(); - let vote_state: VoteStateVersions = vote_account.state().unwrap(); - let authorized_withdrawer = vote_state.convert_to_current().authorized_withdrawer; + let vote_state = VoteStateV3::deserialize(vote_account.data()).unwrap(); + let authorized_withdrawer = vote_state.authorized_withdrawer; assert_eq!(authorized_withdrawer, offline_keypair.pubkey()); let expected_balance = rpc_client .get_minimum_balance_for_rent_exemption(VoteStateV3::size_of()) @@ -368,8 +368,8 @@ fn test_offline_vote_authorize_and_withdraw(compute_unit_price: Option) { let vote_account = rpc_client .get_account(&vote_account_keypair.pubkey()) .unwrap(); - let vote_state: VoteStateVersions = vote_account.state().unwrap(); - let authorized_withdrawer = vote_state.convert_to_current().authorized_withdrawer; + let vote_state = VoteStateV3::deserialize(vote_account.data()).unwrap(); + let authorized_withdrawer = vote_state.authorized_withdrawer; assert_eq!(authorized_withdrawer, withdraw_authority.pubkey()); // Withdraw from vote account offline diff --git a/client-test/tests/client.rs b/client-test/tests/client.rs index 8d8e938abc8405..eac55d7f60e60a 100644 --- a/client-test/tests/client.rs +++ b/client-test/tests/client.rs @@ -5,7 +5,7 @@ use { solana_commitment_config::{CommitmentConfig, CommitmentLevel}, solana_keypair::Keypair, solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path_auto_delete}, - solana_native_token::sol_to_lamports, + solana_native_token::LAMPORTS_PER_SOL, solana_pubkey::Pubkey, solana_pubsub_client::{nonblocking, pubsub_client::PubsubClient}, solana_rpc::{ @@ -80,7 +80,7 @@ fn test_rpc_client() { let blockhash = client.get_latest_blockhash().unwrap(); - let tx = system_transaction::transfer(&alice, &bob_pubkey, sol_to_lamports(20.0), blockhash); + let tx = system_transaction::transfer(&alice, &bob_pubkey, 20 * LAMPORTS_PER_SOL, blockhash); let signature = client.send_transaction(&tx).unwrap(); let mut confirmed_tx = false; @@ -106,14 +106,14 @@ fn test_rpc_client() { .get_balance_with_commitment(&bob_pubkey, CommitmentConfig::processed()) .unwrap() .value, - sol_to_lamports(20.0) + 20 * LAMPORTS_PER_SOL ); assert_eq!( client .get_balance_with_commitment(&alice.pubkey(), CommitmentConfig::processed()) .unwrap() .value, - original_alice_balance - sol_to_lamports(20.0) + original_alice_balance - 20 * LAMPORTS_PER_SOL ); } diff --git a/client-test/tests/send_and_confirm_transactions_in_parallel.rs b/client-test/tests/send_and_confirm_transactions_in_parallel.rs index 570007acc47809..839041ff119a15 100644 --- a/client-test/tests/send_and_confirm_transactions_in_parallel.rs +++ b/client-test/tests/send_and_confirm_transactions_in_parallel.rs @@ -9,7 +9,7 @@ use { solana_commitment_config::CommitmentConfig, solana_keypair::Keypair, solana_message::Message, - solana_native_token::sol_to_lamports, + solana_native_token::LAMPORTS_PER_SOL, solana_pubkey::Pubkey, solana_rpc_client::rpc_client::RpcClient, solana_signer::Signer, @@ -21,15 +21,15 @@ use { const NUM_TRANSACTIONS: usize = 1000; -fn create_messages(from: Pubkey, to: Pubkey) -> (Vec, f64) { +fn create_messages(from: Pubkey, to: Pubkey) -> (Vec, u64) { let mut messages = vec![]; - let mut sum = 0.0; + let mut sum = 0u64; for i in 1..NUM_TRANSACTIONS { - let amount_to_transfer = i as f64; - let ix = system_instruction::transfer(&from, &to, sol_to_lamports(amount_to_transfer)); + let amount_to_transfer = (i as u64).checked_mul(LAMPORTS_PER_SOL).unwrap(); + let ix = system_instruction::transfer(&from, &to, amount_to_transfer); let message = Message::new(&[ix], Some(&from)); messages.push(message); - sum += amount_to_transfer; + sum = sum.checked_add(amount_to_transfer).unwrap(); } (messages, sum) } @@ -80,14 +80,14 @@ fn test_send_and_confirm_transactions_in_parallel_without_tpu_client() { .get_balance_with_commitment(&bob_pubkey, CommitmentConfig::processed()) .unwrap() .value, - sol_to_lamports(sum) + sum ); assert_eq!( rpc_client .get_balance_with_commitment(&alice_pubkey, CommitmentConfig::processed()) .unwrap() .value, - original_alice_balance - sol_to_lamports(sum) + original_alice_balance - sum ); } @@ -145,13 +145,13 @@ fn test_send_and_confirm_transactions_in_parallel_with_tpu_client() { .get_balance_with_commitment(&bob_pubkey, CommitmentConfig::processed()) .unwrap() .value, - sol_to_lamports(sum) + sum ); assert_eq!( rpc_client .get_balance_with_commitment(&alice_pubkey, CommitmentConfig::processed()) .unwrap() .value, - original_alice_balance - sol_to_lamports(sum) + original_alice_balance - sum ); } diff --git a/client/Cargo.toml b/client/Cargo.toml index f2b6f01db49d0c..6000cd95b3388a 100644 --- a/client/Cargo.toml +++ b/client/Cargo.toml @@ -12,6 +12,9 @@ edition = { workspace = true } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] +[features] +dev-context-only-utils = [] + [dependencies] async-trait = { workspace = true } bincode = { workspace = true } @@ -33,6 +36,7 @@ solana-instruction = { workspace = true } solana-keypair = { workspace = true } solana-measure = { workspace = true } solana-message = { workspace = true } +solana-net-utils = { workspace = true } solana-pubkey = { workspace = true } solana-pubsub-client = { workspace = true } solana-quic-client = { workspace = true } @@ -51,7 +55,7 @@ solana-transaction-status-client-types = { workspace = true } solana-udp-client = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true, features = ["full"] } +tokio-util = { workspace = true } [dev-dependencies] crossbeam-channel = { workspace = true } -solana-net-utils = { workspace = true, features = ["dev-context-only-utils"] } diff --git a/client/src/client_option.rs b/client/src/client_option.rs new file mode 100644 index 00000000000000..4623a047130267 --- /dev/null +++ b/client/src/client_option.rs @@ -0,0 +1,18 @@ +use { + crate::connection_cache::ConnectionCache, + solana_keypair::Keypair, + std::{net::UdpSocket, sync::Arc}, + tokio::runtime::Handle as RuntimeHandle, + tokio_util::sync::CancellationToken, +}; + +/// [`ClientOption`] enum represents the available client types for TPU +/// communication: +/// * [`ConnectionCacheClient`]: Uses a shared [`ConnectionCache`] to manage +/// connections efficiently. +/// * [`TpuClientNextClient`]: Relies on the `tpu-client-next` crate and +/// requires a reference to a [`Keypair`]. +pub enum ClientOption<'a> { + ConnectionCache(Arc), + TpuClientNext(&'a Keypair, UdpSocket, RuntimeHandle, CancellationToken), +} diff --git a/client/src/connection_cache.rs b/client/src/connection_cache.rs index 28ae2157266de6..68927c98c0059d 100644 --- a/client/src/connection_cache.rs +++ b/client/src/connection_cache.rs @@ -74,6 +74,17 @@ impl ConnectionCache { Self::new_with_client_options(name, connection_pool_size, None, None, None) } + #[cfg(feature = "dev-context-only-utils")] + pub fn new_quic_for_tests(name: &'static str, connection_pool_size: usize) -> Self { + Self::new_with_client_options( + name, + connection_pool_size, + Some(solana_net_utils::sockets::bind_to_localhost_unique().unwrap()), + None, + None, + ) + } + /// Create a quic connection_cache with more client options pub fn new_with_client_options( name: &'static str, @@ -168,7 +179,7 @@ pub(crate) use dispatch; impl ClientConnection for BlockingClientConnection { dispatch!(fn server_addr(&self) -> &SocketAddr); dispatch!(fn send_data(&self, buffer: &[u8]) -> TransportResult<()>); - dispatch!(fn send_data_async(&self, buffer: Vec) -> TransportResult<()>); + dispatch!(fn send_data_async(&self, buffer: Arc>) -> TransportResult<()>); dispatch!(fn send_data_batch(&self, buffers: &[Vec]) -> TransportResult<()>); dispatch!(fn send_data_batch_async(&self, buffers: Vec>) -> TransportResult<()>); } diff --git a/client/src/lib.rs b/client/src/lib.rs index 59d63c22a25009..1dd13906080768 100644 --- a/client/src/lib.rs +++ b/client/src/lib.rs @@ -1,5 +1,6 @@ #![allow(clippy::arithmetic_side_effects)] +pub mod client_option; pub mod connection_cache; pub mod nonblocking; pub mod send_and_confirm_transactions_in_parallel; diff --git a/client/src/send_and_confirm_transactions_in_parallel.rs b/client/src/send_and_confirm_transactions_in_parallel.rs index f6ea54655961ed..634ab923c01043 100644 --- a/client/src/send_and_confirm_transactions_in_parallel.rs +++ b/client/src/send_and_confirm_transactions_in_parallel.rs @@ -54,17 +54,6 @@ struct BlockHashData { pub last_valid_block_height: u64, } -// Deprecated struct to maintain backward compatibility -#[deprecated( - since = "2.2.0", - note = "Use SendAndConfirmConfigV2 with send_and_confirm_transactions_in_parallel_v2" -)] -#[derive(Clone, Debug, Copy)] -pub struct SendAndConfirmConfig { - pub with_spinner: bool, - pub resign_txs_count: Option, -} - // New struct with RpcSendTransactionConfig for non-breaking change #[derive(Clone, Debug, Copy)] pub struct SendAndConfirmConfigV2 { @@ -73,55 +62,6 @@ pub struct SendAndConfirmConfigV2 { pub rpc_send_transaction_config: RpcSendTransactionConfig, } -#[allow(deprecated)] -#[deprecated( - since = "2.2.0", - note = "Use send_and_confirm_transactions_in_parallel_v2" -)] -pub async fn send_and_confirm_transactions_in_parallel( - rpc_client: Arc, - tpu_client: Option, - messages: &[Message], - signers: &T, - config: SendAndConfirmConfig, -) -> Result>> { - let config_v2 = SendAndConfirmConfigV2 { - with_spinner: config.with_spinner, - resign_txs_count: config.resign_txs_count, - rpc_send_transaction_config: RpcSendTransactionConfig { - ..RpcSendTransactionConfig::default() - }, - }; - send_and_confirm_transactions_in_parallel_v2( - rpc_client, tpu_client, messages, signers, config_v2, - ) - .await -} - -#[allow(deprecated)] -#[deprecated( - since = "2.2.0", - note = "Use send_and_confirm_transactions_in_parallel_blocking_v2" -)] -pub fn send_and_confirm_transactions_in_parallel_blocking( - rpc_client: Arc, - tpu_client: Option, - messages: &[Message], - signers: &T, - config: SendAndConfirmConfig, -) -> Result>> { - let config_v2 = SendAndConfirmConfigV2 { - with_spinner: config.with_spinner, - resign_txs_count: config.resign_txs_count, - rpc_send_transaction_config: RpcSendTransactionConfig { - ..RpcSendTransactionConfig::default() - }, - }; - send_and_confirm_transactions_in_parallel_blocking_v2( - rpc_client, tpu_client, messages, signers, config_v2, - ) -} - /// Sends and confirms transactions concurrently in a sync context pub fn send_and_confirm_transactions_in_parallel_blocking_v2( rpc_client: Arc, diff --git a/compute-budget-instruction/src/builtin_programs_filter.rs b/compute-budget-instruction/src/builtin_programs_filter.rs index 409e5530f024d7..ed2024719c8771 100644 --- a/compute-budget-instruction/src/builtin_programs_filter.rs +++ b/compute-budget-instruction/src/builtin_programs_filter.rs @@ -64,8 +64,10 @@ impl BuiltinProgramsFilter { #[cfg(test)] mod test { use { - super::*, agave_feature_set as feature_set, - solana_builtins_default_costs::get_migration_feature_position, + super::*, + solana_builtins_default_costs::{ + get_migration_feature_id, BuiltinCost, MigratingBuiltinCost, MIGRATING_BUILTINS_COSTS, + }, }; const DUMMY_PROGRAM_ID: &str = "dummmy1111111111111111111111111111111111111"; @@ -110,15 +112,29 @@ mod test { ); // migrating builtins - index += 1; - assert_eq!( - test_store.get_program_kind(index, &solana_sdk_ids::stake::id()), - ProgramKind::MigratingBuiltin { - core_bpf_migration_feature_index: get_migration_feature_position( - &feature_set::migrate_stake_program_to_core_bpf::id() - ), - } - ); + for (program_id, migrating_builtin) in MIGRATING_BUILTINS_COSTS { + index += 1; + + let BuiltinCost::Migrating(MigratingBuiltinCost { + core_bpf_migration_feature, + position, + }) = migrating_builtin + else { + panic!("MIGRATING_BUILTINS_COSTS must only contain BuiltinCost::Migrating"); + }; + + assert_eq!( + get_migration_feature_id(*position), + core_bpf_migration_feature + ); + + assert_eq!( + test_store.get_program_kind(index, program_id), + ProgramKind::MigratingBuiltin { + core_bpf_migration_feature_index: *position, + } + ); + } } #[test] diff --git a/compute-budget-instruction/src/compute_budget_instruction_details.rs b/compute-budget-instruction/src/compute_budget_instruction_details.rs index c25847bd238f8a..7a2012e046c213 100644 --- a/compute-budget-instruction/src/compute_budget_instruction_details.rs +++ b/compute-budget-instruction/src/compute_budget_instruction_details.rs @@ -223,7 +223,9 @@ impl ComputeBudgetInstructionDetails { mod test { use { super::*, - solana_builtins_default_costs::get_migration_feature_position, + solana_builtins_default_costs::{ + get_migration_feature_position, BuiltinCost, MigratingBuiltinCost, + }, solana_instruction::Instruction, solana_keypair::Keypair, solana_message::Message, @@ -514,58 +516,63 @@ mod test { #[test] fn test_builtin_program_migration() { - let tx = build_sanitized_transaction(&[ - Instruction::new_with_bincode(Pubkey::new_unique(), &(), vec![]), - solana_stake_interface::instruction::delegate_stake( - &Pubkey::new_unique(), - &Pubkey::new_unique(), - &Pubkey::new_unique(), - ), - ]); - let feature_id_index = get_migration_feature_position( - &agave_feature_set::migrate_stake_program_to_core_bpf::id(), - ); - let mut expected_details = ComputeBudgetInstructionDetails { - num_non_compute_budget_instructions: Saturating(2), - num_non_builtin_instructions: Saturating(1), - ..ComputeBudgetInstructionDetails::default() - }; - expected_details - .migrating_builtin_feature_counters - .migrating_builtin[feature_id_index] = Saturating(1); - let expected_details = Ok(expected_details); - let details = - ComputeBudgetInstructionDetails::try_from(SVMMessage::program_instructions_iter(&tx)); - assert_eq!(details, expected_details); - let details = details.unwrap(); - - let mut feature_set = FeatureSet::default(); - - // migrate_stake_program_to_core_bpf: false; - // expect: 1 bpf ix, 1 non-compute-budget builtin, cu-limit = 200K + 3K - let cu_limits = details.sanitize_and_convert_to_compute_budget_limits(&feature_set); - assert_eq!( - cu_limits, - Ok(ComputeBudgetLimits { - compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT - + MAX_BUILTIN_ALLOCATION_COMPUTE_UNIT_LIMIT, - ..ComputeBudgetLimits::default() - }) - ); - - // migrate_stake_program_to_core_bpf: true; - // expect: 2 bpf ix, cu-limit = 2 * 200K - feature_set.activate( - &agave_feature_set::migrate_stake_program_to_core_bpf::id(), - 0, - ); - let cu_limits = details.sanitize_and_convert_to_compute_budget_limits(&feature_set); - assert_eq!( - cu_limits, - Ok(ComputeBudgetLimits { - compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT * 2, - ..ComputeBudgetLimits::default() - }) - ); + for (program_id, builtin_cost) in MIGRATING_BUILTINS_COSTS { + let BuiltinCost::Migrating(MigratingBuiltinCost { + core_bpf_migration_feature: feature_id, + position, + }) = builtin_cost + else { + panic!("MIGRATING_BUILTINS_COSTS must only contain BuiltinCost::Migrating"); + }; + + assert_eq!(get_migration_feature_id(*position), feature_id); + assert_eq!(get_migration_feature_position(feature_id), *position); + + let tx = build_sanitized_transaction(&[ + Instruction::new_with_bincode(Pubkey::new_unique(), &(), vec![]), + Instruction::new_with_bincode(*program_id, &(), vec![]), + ]); + + let mut expected_details = ComputeBudgetInstructionDetails { + num_non_compute_budget_instructions: Saturating(2), + num_non_builtin_instructions: Saturating(1), + ..ComputeBudgetInstructionDetails::default() + }; + expected_details + .migrating_builtin_feature_counters + .migrating_builtin[*position] = Saturating(1); + let expected_details = Ok(expected_details); + let details = ComputeBudgetInstructionDetails::try_from( + SVMMessage::program_instructions_iter(&tx), + ); + assert_eq!(details, expected_details); + let details = details.unwrap(); + + let mut feature_set = FeatureSet::default(); + + // migrate bpf program: false; + // expect: 1 bpf ix, 1 non-compute-budget builtin, cu-limit = 200K + 3K + let cu_limits = details.sanitize_and_convert_to_compute_budget_limits(&feature_set); + assert_eq!( + cu_limits, + Ok(ComputeBudgetLimits { + compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT + + MAX_BUILTIN_ALLOCATION_COMPUTE_UNIT_LIMIT, + ..ComputeBudgetLimits::default() + }) + ); + + // migrate bpf program: true; + // expect: 2 bpf ix, cu-limit = 2 * 200K + feature_set.activate(feature_id, 0); + let cu_limits = details.sanitize_and_convert_to_compute_budget_limits(&feature_set); + assert_eq!( + cu_limits, + Ok(ComputeBudgetLimits { + compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT * 2, + ..ComputeBudgetLimits::default() + }) + ); + } } } diff --git a/compute-budget/src/compute_budget.rs b/compute-budget/src/compute_budget.rs index 87bbc9f81affe2..06904f849f5e6c 100644 --- a/compute-budget/src/compute_budget.rs +++ b/compute-budget/src/compute_budget.rs @@ -40,8 +40,6 @@ pub struct ComputeBudget { pub stack_frame_size: usize, /// Number of compute units consumed by logging a `Pubkey` pub log_pubkey_units: u64, - /// Maximum cross-program invocation instruction size - pub max_cpi_instruction_size: usize, /// Number of account data bytes per compute unit charged during a cross-program invocation pub cpi_bytes_per_unit: u64, /// Base number of compute units consumed to get a sysvar @@ -130,9 +128,9 @@ impl Default for ComputeBudget { } impl ComputeBudget { - pub fn new_with_defaults(simd_0296_active: bool) -> Self { + pub fn new_with_defaults(simd_0268_active: bool) -> Self { Self::from_budget_and_cost( - &SVMTransactionExecutionBudget::new_with_defaults(simd_0296_active), + &SVMTransactionExecutionBudget::new_with_defaults(simd_0268_active), &SVMTransactionExecutionCost::default(), ) } @@ -154,7 +152,6 @@ impl ComputeBudget { max_call_depth: budget.max_call_depth, stack_frame_size: budget.stack_frame_size, log_pubkey_units: cost.log_pubkey_units, - max_cpi_instruction_size: budget.max_cpi_instruction_size, cpi_bytes_per_unit: cost.cpi_bytes_per_unit, sysvar_base_cost: cost.sysvar_base_cost, secp256k1_recover_cost: cost.secp256k1_recover_cost, @@ -199,7 +196,6 @@ impl ComputeBudget { sha256_max_slices: self.sha256_max_slices, max_call_depth: self.max_call_depth, stack_frame_size: self.stack_frame_size, - max_cpi_instruction_size: self.max_cpi_instruction_size, heap_size: self.heap_size, } } diff --git a/compute-budget/src/compute_budget_limits.rs b/compute-budget/src/compute_budget_limits.rs index 970a9e11cfbf9c..23c3ddd6dcdc47 100644 --- a/compute-budget/src/compute_budget_limits.rs +++ b/compute-budget/src/compute_budget_limits.rs @@ -40,13 +40,13 @@ impl ComputeBudgetLimits { &self, loaded_accounts_data_size_limit: NonZeroU32, fee_details: FeeDetails, - simd_0296_active: bool, + simd_0268_active: bool, ) -> SVMTransactionExecutionAndFeeBudgetLimits { SVMTransactionExecutionAndFeeBudgetLimits { budget: SVMTransactionExecutionBudget { compute_unit_limit: u64::from(self.compute_unit_limit), heap_size: self.updated_heap_bytes, - ..SVMTransactionExecutionBudget::new_with_defaults(simd_0296_active) + ..SVMTransactionExecutionBudget::new_with_defaults(simd_0268_active) }, loaded_accounts_data_size_limit, fee_details, diff --git a/connection-cache/src/client_connection.rs b/connection-cache/src/client_connection.rs index 1469910633e703..00edf341e5612b 100644 --- a/connection-cache/src/client_connection.rs +++ b/connection-cache/src/client_connection.rs @@ -1,7 +1,10 @@ use { solana_metrics::MovingStat, solana_transaction_error::TransportResult, - std::{net::SocketAddr, sync::atomic::AtomicU64}, + std::{ + net::SocketAddr, + sync::{atomic::AtomicU64, Arc}, + }, }; #[derive(Default)] @@ -35,7 +38,7 @@ pub trait ClientConnection: Sync + Send { fn send_data(&self, buffer: &[u8]) -> TransportResult<()>; - fn send_data_async(&self, buffer: Vec) -> TransportResult<()>; + fn send_data_async(&self, buffer: Arc>) -> TransportResult<()>; fn send_data_batch(&self, buffers: &[Vec]) -> TransportResult<()>; diff --git a/connection-cache/src/connection_cache.rs b/connection-cache/src/connection_cache.rs index fca357e3bf3c29..69ac8feeee54c1 100644 --- a/connection-cache/src/connection_cache.rs +++ b/connection-cache/src/connection_cache.rs @@ -514,9 +514,7 @@ mod tests { async_trait::async_trait, rand::{Rng, SeedableRng}, rand_chacha::ChaChaRng, - solana_net_utils::sockets::{ - bind_with_any_port_with_config, SocketConfiguration as SocketConfig, - }, + solana_net_utils::sockets::bind_to_localhost_unique, solana_transaction_error::TransportResult, std::{ net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket}, @@ -573,13 +571,7 @@ mod tests { impl Default for MockUdpConfig { fn default() -> Self { Self { - udp_socket: Arc::new( - bind_with_any_port_with_config( - IpAddr::V4(Ipv4Addr::UNSPECIFIED), - SocketConfig::default(), - ) - .expect("Unable to bind to UDP socket"), - ), + udp_socket: Arc::new(bind_to_localhost_unique().unwrap()), } } } @@ -588,11 +580,7 @@ mod tests { fn new() -> Result { Ok(Self { udp_socket: Arc::new( - bind_with_any_port_with_config( - IpAddr::V4(Ipv4Addr::UNSPECIFIED), - SocketConfig::default(), - ) - .map_err(Into::::into)?, + bind_to_localhost_unique().map_err(Into::::into)?, ), }) } @@ -662,7 +650,7 @@ mod tests { fn send_data(&self, _buffer: &[u8]) -> TransportResult<()> { unimplemented!() } - fn send_data_async(&self, _data: Vec) -> TransportResult<()> { + fn send_data_async(&self, _data: Arc>) -> TransportResult<()> { unimplemented!() } fn send_data_batch(&self, _buffers: &[Vec]) -> TransportResult<()> { diff --git a/core/Cargo.toml b/core/Cargo.toml index 5ababa363c0539..b5c77836b7defa 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "solana-core" -description = "Blockchain, Rebuilt for Scale" documentation = "https://docs.rs/solana-core" readme = "../README.md" version = { workspace = true } authors = { workspace = true } +description = { workspace = true } repository = { workspace = true } homepage = { workspace = true } license = { workspace = true } @@ -45,6 +45,7 @@ agave-banking-stage-ingress-types = { workspace = true } agave-feature-set = { workspace = true } agave-transaction-view = { workspace = true } agave-verified-packet-receiver = { workspace = true } +agave-votor = { workspace = true, features = ["agave-unstable-api"] } ahash = { workspace = true } anyhow = { workspace = true } arrayvec = { workspace = true } @@ -53,13 +54,13 @@ async-trait = { workspace = true } base64 = { workspace = true } bincode = { workspace = true } bs58 = { workspace = true } +bytemuck = { workspace = true } bytes = { workspace = true } chrono = { workspace = true, features = ["default", "serde"] } conditional-mod = { workspace = true } crossbeam-channel = { workspace = true } dashmap = { workspace = true, features = ["rayon", "raw-api"] } derive_more = { workspace = true } -etcd-client = { workspace = true, features = ["tls"] } futures = { workspace = true } histogram = { workspace = true } itertools = { workspace = true } @@ -88,6 +89,7 @@ solana-bloom = { workspace = true } solana-builtins-default-costs = { workspace = true } solana-client = { workspace = true } solana-clock = { workspace = true } +solana-cluster-type = { workspace = true } solana-compute-budget = { workspace = true } solana-compute-budget-instruction = { workspace = true } solana-compute-budget-interface = { workspace = true } @@ -106,18 +108,18 @@ solana-frozen-abi-macro = { workspace = true, optional = true, features = [ ] } solana-genesis-config = { workspace = true } solana-geyser-plugin-manager = { workspace = true } -solana-gossip = { workspace = true } +solana-gossip = { workspace = true, features = ["agave-unstable-api"] } solana-hard-forks = { workspace = true } solana-hash = { workspace = true } solana-instruction = { workspace = true } solana-keypair = { workspace = true } -solana-ledger = { workspace = true } +solana-ledger = { workspace = true, features = ["agave-unstable-api"] } solana-loader-v3-interface = { workspace = true } solana-measure = { workspace = true } solana-message = { workspace = true } solana-metrics = { workspace = true } solana-native-token = { workspace = true } -solana-net-utils = { workspace = true } +solana-net-utils = { workspace = true, features = ["agave-unstable-api"] } solana-nonce = { workspace = true } solana-nonce-account = { workspace = true } solana-packet = { workspace = true } @@ -145,19 +147,19 @@ solana-slot-hashes = { workspace = true } solana-slot-history = { workspace = true } solana-streamer = { workspace = true } solana-svm = { workspace = true } +solana-svm-timings = { workspace = true } solana-svm-transaction = { workspace = true } solana-system-interface = { workspace = true } solana-system-transaction = { workspace = true } solana-sysvar = { workspace = true } solana-time-utils = { workspace = true } -solana-timings = { workspace = true } solana-tls-utils = { workspace = true } solana-tpu-client = { workspace = true } solana-tpu-client-next = { workspace = true } solana-transaction = { workspace = true } solana-transaction-error = { workspace = true } solana-transaction-status = { workspace = true } -solana-turbine = { workspace = true } +solana-turbine = { workspace = true, features = ["agave-unstable-api"] } solana-unified-scheduler-pool = { workspace = true } solana-validator-exit = { workspace = true } solana-version = { workspace = true } @@ -187,10 +189,11 @@ fs_extra = { workspace = true } serde_json = { workspace = true } serial_test = { workspace = true } solana-account = { workspace = true, features = ["dev-context-only-utils"] } -# See order-crates-for-publishing.py for using this unusual `path = "."` solana-bpf-loader-program = { workspace = true } +solana-client = { workspace = true, features = ["dev-context-only-utils"] } solana-compute-budget-interface = { workspace = true } solana-compute-budget-program = { workspace = true } +# See order-crates-for-publishing.py for using this unusual `path = "."` solana-core = { path = ".", features = ["dev-context-only-utils"] } solana-cost-model = { workspace = true, features = ["dev-context-only-utils"] } solana-keypair = { workspace = true } @@ -198,6 +201,7 @@ solana-ledger = { workspace = true, features = ["dev-context-only-utils"] } solana-logger = { workspace = true } solana-net-utils = { workspace = true, features = ["dev-context-only-utils"] } solana-poh = { workspace = true, features = ["dev-context-only-utils"] } +solana-program-binaries = { workspace = true } solana-program-runtime = { workspace = true, features = ["metrics"] } solana-rpc = { workspace = true, features = ["dev-context-only-utils"] } solana-stake-program = { workspace = true } @@ -211,9 +215,6 @@ spl-memo-interface = { workspace = true } static_assertions = { workspace = true } test-case = { workspace = true } -[badges] -codecov = { repository = "solana-labs/solana", branch = "master", service = "github" } - [[bench]] name = "banking_stage" diff --git a/core/benches/banking_stage.rs b/core/benches/banking_stage.rs index 49fc40ae64c304..e8e95ed7670d32 100644 --- a/core/benches/banking_stage.rs +++ b/core/benches/banking_stage.rs @@ -140,11 +140,11 @@ fn bench_banking( transaction_struct: TransactionStructure, ) { solana_logger::setup(); - let num_threads = BankingStage::num_threads() as usize; + let num_threads = BankingStage::default_num_workers(); // a multiple of packet chunk duplicates to avoid races const CHUNKS: usize = 8; const PACKETS_PER_BATCH: usize = 192; - let txes = PACKETS_PER_BATCH * num_threads * CHUNKS; + let txes = PACKETS_PER_BATCH * num_threads.get() * CHUNKS; let mint_total = 1_000_000_000_000; let GenesisConfigInfo { mut genesis_config, @@ -177,7 +177,7 @@ fn bench_banking( .unwrap() .set_limits(u64::MAX, u64::MAX, u64::MAX); - debug!("threads: {} txs: {}", num_threads, txes); + debug!("threads: {num_threads} txs: {txes}"); let transactions = match tx_type { TransactionType::Accounts | TransactionType::AccountsAndVotes => { @@ -232,22 +232,23 @@ fn bench_banking( let blockstore = Arc::new( Blockstore::open(ledger_path.path()).expect("Expected to be able to open database ledger"), ); - let (exit, poh_recorder, transaction_recorder, poh_service, signal_receiver) = + let (exit, poh_recorder, _poh_controller, transaction_recorder, poh_service, signal_receiver) = create_test_recorder(bank.clone(), blockstore, None, None); let (s, _r) = unbounded(); - let _banking_stage = BankingStage::new( + let _banking_stage = BankingStage::new_num_threads( block_production_method, transaction_struct, - &poh_recorder, + poh_recorder.clone(), transaction_recorder, non_vote_receiver, tpu_vote_receiver, gossip_vote_receiver, + num_threads, None, s, None, bank_forks, - &Arc::new(PrioritizationFeeCache::new(0u64)), + Arc::new(PrioritizationFeeCache::new(0u64)), ); let chunk_len = verified.len() / CHUNKS; diff --git a/core/benches/consumer.rs b/core/benches/consumer.rs index bb87152c927c59..0ac94b9cde39ae 100644 --- a/core/benches/consumer.rs +++ b/core/benches/consumer.rs @@ -121,7 +121,7 @@ fn setup() -> BenchFrame { let blockstore = Arc::new( Blockstore::open(ledger_path.path()).expect("Expected to be able to open database ledger"), ); - let (exit, _poh_recorder, transaction_recorder, poh_service, signal_receiver) = + let (exit, _poh_recorder, _poh_controller, transaction_recorder, poh_service, signal_receiver) = create_test_recorder(bank.clone(), blockstore, None, None); BenchFrame { diff --git a/core/benches/receive_and_buffer.rs b/core/benches/receive_and_buffer.rs index 1ada4ef7564a47..24cb7240a5be28 100644 --- a/core/benches/receive_and_buffer.rs +++ b/core/benches/receive_and_buffer.rs @@ -6,7 +6,6 @@ use { receive_and_buffer::{ ReceiveAndBuffer, SanitizedTransactionReceiveAndBuffer, TransactionViewReceiveAndBuffer, }, - scheduler_metrics::{SchedulerCountMetrics, SchedulerTimingMetrics}, transaction_state_container::StateContainer, }, std::time::{Duration, Instant}, @@ -41,8 +40,6 @@ fn bench_receive_and_buffer( let (bank, bank_forks) = Bank::new_for_benches(&genesis_config).wrap_with_bank_forks_for_tests(); - let bank_start = BankStart { - working_bank: bank.clone(), - bank_creation_time: Arc::new(Instant::now()), - }; let (sender, receiver) = unbounded(); let receive_and_buffer = T::create(receiver, bank_forks); - let decision = BufferedPacketsDecision::Consume(bank_start); + let decision = BufferedPacketsDecision::Consume(bank.clone()); let txs = generate_transactions( num_txs, diff --git a/core/benches/scheduler.rs b/core/benches/scheduler.rs index d003f3b21dda55..8fb3ba5df710fd 100644 --- a/core/benches/scheduler.rs +++ b/core/benches/scheduler.rs @@ -15,7 +15,6 @@ use { TransactionViewReceiveAndBuffer, }, scheduler::{PreLockFilterAction, Scheduler}, - scheduler_metrics::{SchedulerCountMetrics, SchedulerTimingMetrics}, transaction_state::TransactionState, transaction_state_container::StateContainer, }, @@ -201,15 +200,10 @@ fn timing_scheduler>( if sender.send(txs.clone()).is_err() { panic!("Unexpectedly dropped receiver!"); } - let mut count_metrics = SchedulerCountMetrics::default(); - let mut timing_metrics = SchedulerTimingMetrics::default(); - let res = receive_and_buffer.receive_and_buffer_packets( - &mut container, - &mut timing_metrics, - &mut count_metrics, - &decision, - ); - assert_eq!(res.unwrap(), num_txs); + let res = receive_and_buffer + .receive_and_buffer_packets(&mut container, &decision) + .unwrap(); + assert_eq!(res.num_received, num_txs); assert!(!container.is_empty()); let elapsed = { diff --git a/core/benches/shredder.rs b/core/benches/shredder.rs index e0e7acd0d3588d..24c23186d74fba 100644 --- a/core/benches/shredder.rs +++ b/core/benches/shredder.rs @@ -10,7 +10,8 @@ use { solana_keypair::Keypair, solana_ledger::shred::{ get_data_shred_bytes_per_batch_typical, max_entries_per_n_shred, max_ticks_per_n_shreds, - recover, ProcessShredsStats, ReedSolomonCache, Shred, Shredder, DATA_SHREDS_PER_FEC_BLOCK, + recover, ProcessShredsStats, ReedSolomonCache, Shred, Shredder, + CODING_SHREDS_PER_FEC_BLOCK, DATA_SHREDS_PER_FEC_BLOCK, }, solana_perf::test_tx, test::{black_box, Bencher}, @@ -42,7 +43,7 @@ fn bench_shredder_ticks(bencher: &mut Bencher) { let num_ticks = max_ticks_per_n_shreds(1, Some(SHRED_SIZE_TYPICAL)) * num_shreds as u64; let entries = create_ticks(num_ticks, 0, Hash::default()); let reed_solomon_cache = ReedSolomonCache::default(); - let chained_merkle_root = Some(Hash::new_from_array(rand::thread_rng().gen())); + let chained_merkle_root = Hash::new_from_array(rand::thread_rng().gen()); bencher.iter(|| { let shredder = Shredder::new(1, 0, 0, 0).unwrap(); shredder.entries_to_merkle_shreds_for_tests( @@ -70,7 +71,7 @@ fn bench_shredder_large_entries(bencher: &mut Bencher) { Some(shred_size), ); let entries = make_large_unchained_entries(txs_per_entry, num_entries); - let chained_merkle_root = Some(Hash::new_from_array(rand::thread_rng().gen())); + let chained_merkle_root = Hash::new_from_array(rand::thread_rng().gen()); let reed_solomon_cache = ReedSolomonCache::default(); // 1Mb bencher.iter(|| { @@ -97,7 +98,7 @@ fn bench_deshredder(bencher: &mut Bencher) { let num_ticks = max_ticks_per_n_shreds(1, Some(shred_size)) * num_shreds as u64; let entries = create_ticks(num_ticks, 0, Hash::default()); let shredder = Shredder::new(1, 0, 0, 0).unwrap(); - let chained_merkle_root = Some(Hash::new_from_array(rand::thread_rng().gen())); + let chained_merkle_root = Hash::new_from_array(rand::thread_rng().gen()); let (data_shreds, _) = shredder.entries_to_merkle_shreds_for_tests( &kp, &entries, @@ -119,7 +120,7 @@ fn bench_deshredder(bencher: &mut Bencher) { fn bench_deserialize_hdr(bencher: &mut Bencher) { let keypair = Keypair::new(); let shredder = Shredder::new(2, 1, 0, 0).unwrap(); - let merkle_root = Some(Hash::new_from_array(rand::thread_rng().gen())); + let merkle_root = Hash::new_from_array(rand::thread_rng().gen()); let mut stats = ProcessShredsStats::default(); let reed_solomon_cache = ReedSolomonCache::default(); let mut shreds = shredder @@ -154,7 +155,7 @@ fn bench_shredder_coding(bencher: &mut Bencher) { let entries = make_entries(); let shredder = Shredder::new(1, 0, 0, 0).unwrap(); let reed_solomon_cache = ReedSolomonCache::default(); - let merkle_root = Some(Hash::new_from_array(rand::thread_rng().gen())); + let merkle_root = Hash::new_from_array(rand::thread_rng().gen()); bencher.iter(|| { let result: Vec<_> = shredder .make_merkle_shreds_from_entries( @@ -177,8 +178,8 @@ fn bench_shredder_decoding(bencher: &mut Bencher) { let entries = make_entries(); let shredder = Shredder::new(1, 0, 0, 0).unwrap(); let reed_solomon_cache = ReedSolomonCache::default(); - let merkle_root = Some(Hash::new_from_array(rand::thread_rng().gen())); - let (_data_shreds, coding_shreds): (Vec<_>, Vec<_>) = shredder + let merkle_root = Hash::new_from_array(rand::thread_rng().gen()); + let (_data_shreds, mut coding_shreds): (Vec<_>, Vec<_>) = shredder .make_merkle_shreds_from_entries( &Keypair::new(), &entries, @@ -190,6 +191,7 @@ fn bench_shredder_decoding(bencher: &mut Bencher) { &mut ProcessShredsStats::default(), ) .partition(Shred::is_data); + coding_shreds.truncate(CODING_SHREDS_PER_FEC_BLOCK); bencher.iter(|| { for shred in recover(coding_shreds.clone(), &reed_solomon_cache).unwrap() { diff --git a/core/benches/sigverify_stage.rs b/core/benches/sigverify_stage.rs index 38198c40211d27..7069b47a7ff6db 100644 --- a/core/benches/sigverify_stage.rs +++ b/core/benches/sigverify_stage.rs @@ -53,7 +53,7 @@ fn run_bench_packet_discard(num_ips: usize, bencher: &mut Bencher) { p.meta_mut().addr = ips[ip_index]; } } - info!("total packets: {}", total); + info!("total packets: {total}"); bencher.iter(move || { SigVerifyStage::discard_excess_packets(&mut batches, 10_000); @@ -175,7 +175,7 @@ fn bench_sigverify_stage(bencher: &mut Bencher, use_same_tx: bool) { } let mut received = 0; let expected = if use_same_tx { 1 } else { sent_len }; - trace!("sent: {}, expected: {}", sent_len, expected); + trace!("sent: {sent_len}, expected: {expected}"); loop { if let Ok(verifieds) = verified_r.recv_timeout(Duration::from_millis(10)) { received += verifieds.iter().map(|batch| batch.len()).sum::(); @@ -185,7 +185,7 @@ fn bench_sigverify_stage(bencher: &mut Bencher, use_same_tx: bool) { } } } - trace!("received: {}", received); + trace!("received: {received}"); }); // This will wait for all packets to make it through sigverify. stage.join().unwrap(); diff --git a/core/src/admin_rpc_post_init.rs b/core/src/admin_rpc_post_init.rs index 61d1f1e5123a62..ae0b1dd950181a 100644 --- a/core/src/admin_rpc_post_init.rs +++ b/core/src/admin_rpc_post_init.rs @@ -1,13 +1,13 @@ use { crate::{ + banking_stage::BankingStage, cluster_slots_service::cluster_slots::ClusterSlots, repair::{outstanding_requests::OutstandingRequests, serve_repair::ShredRepairType}, }, - solana_gossip::cluster_info::ClusterInfo, + solana_gossip::{cluster_info::ClusterInfo, node::NodeMultihoming}, solana_pubkey::Pubkey, solana_quic_definitions::NotifyKeyUpdate, solana_runtime::bank_forks::BankForks, - solana_streamer::atomic_udp_socket::AtomicUdpSocket, std::{ collections::{HashMap, HashSet}, net::UdpSocket, @@ -79,5 +79,6 @@ pub struct AdminRpcRequestMetadataPostInit { pub repair_socket: Arc, pub outstanding_repair_requests: Arc>>, pub cluster_slots: Arc, - pub gossip_socket: Option, + pub node: Option>, + pub banking_stage: Arc>>, } diff --git a/core/src/banking_simulation.rs b/core/src/banking_simulation.rs index 4dc1a308f68692..80a8ee2bd37fe8 100644 --- a/core/src/banking_simulation.rs +++ b/core/src/banking_simulation.rs @@ -26,6 +26,7 @@ use { }, solana_net_utils::sockets::{bind_in_range_with_config, SocketConfiguration}, solana_poh::{ + poh_controller::PohController, poh_recorder::{PohRecorder, GRACE_TICKS_FACTOR, MAX_GRACE_SLOTS}, poh_service::{PohService, DEFAULT_HASHES_PER_BATCH, DEFAULT_PINNED_CPU_CORE}, transaction_recorder::TransactionRecorder, @@ -410,6 +411,7 @@ struct SimulatorLoop { freeze_time_by_slot: FreezeTimeBySlot, base_event_time: SystemTime, poh_recorder: Arc>, + poh_controller: PohController, simulated_leader: Pubkey, bank_forks: Arc>, blockstore: Arc, @@ -430,7 +432,7 @@ impl SimulatorLoop { } fn start( - self, + mut self, base_simulation_time: SystemTime, sender_thread: EventSenderThread, ) -> (EventSenderThread, Sender) { @@ -451,10 +453,9 @@ impl SimulatorLoop { GRACE_TICKS_FACTOR * MAX_GRACE_SLOTS, ); debug!("{next_leader_slot:?}"); - self.poh_recorder - .write() - .unwrap() - .reset(bank.clone_without_scheduler(), next_leader_slot); + self.poh_controller + .reset_sync(bank.clone_without_scheduler(), next_leader_slot) + .unwrap(); info!("Bank::new_from_parent()!"); logger.log_jitter(&bank); @@ -497,7 +498,7 @@ impl SimulatorLoop { self.retransmit_slots_sender.send(bank.slot()).unwrap(); update_bank_forks_and_poh_recorder_for_new_tpu_bank( &self.bank_forks, - &self.poh_recorder, + &mut self.poh_controller, new_bank, ); (bank, bank_created) = ( @@ -742,6 +743,7 @@ impl BankingSimulator { let poh_recorder = Arc::new(RwLock::new(poh_recorder)); let (record_sender, record_receiver) = unbounded(); let transaction_recorder = TransactionRecorder::new(record_sender, exit.clone()); + let (poh_controller, poh_service_message_receiver) = PohController::new(); let poh_service = PohService::new( poh_recorder.clone(), &genesis_config.poh_config, @@ -750,6 +752,7 @@ impl BankingSimulator { DEFAULT_PINNED_CPU_CORE, DEFAULT_HASHES_PER_BATCH, record_receiver, + poh_service_message_receiver, ); // Enable BankingTracer to approximate the real environment as close as possible because @@ -823,17 +826,17 @@ impl BankingSimulator { let banking_stage = BankingStage::new_num_threads( block_production_method.clone(), transaction_struct.clone(), - &poh_recorder, + poh_recorder.clone(), transaction_recorder, non_vote_receiver, tpu_vote_receiver, gossip_vote_receiver, - BankingStage::num_threads(), + BankingStage::default_num_workers(), None, replay_vote_sender, None, bank_forks.clone(), - prioritization_fee_cache, + prioritization_fee_cache.clone(), ); let (&_slot, &raw_base_event_time) = freeze_time_by_slot @@ -882,6 +885,7 @@ impl BankingSimulator { freeze_time_by_slot, base_event_time, poh_recorder, + poh_controller, simulated_leader, bank_forks, blockstore, diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index c8c43781141587..9d88efd1409aa9 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -27,7 +27,10 @@ use { solana_gossip::{cluster_info::ClusterInfo, contact_info::ContactInfoQuery}, solana_ledger::blockstore_processor::TransactionStatusSender, solana_perf::packet::PACKETS_PER_BATCH, - solana_poh::{poh_recorder::PohRecorder, transaction_recorder::TransactionRecorder}, + solana_poh::{ + poh_controller::PohController, poh_recorder::PohRecorder, + transaction_recorder::TransactionRecorder, + }, solana_pubkey::Pubkey, solana_runtime::{ bank::Bank, bank_forks::BankForks, prioritization_fee_cache::PrioritizationFeeCache, @@ -35,11 +38,10 @@ use { }, solana_time_utils::AtomicInterval, std::{ - cmp, env, - num::Saturating, + num::{NonZeroUsize, Saturating}, ops::Deref, sync::{ - atomic::{AtomicU64, AtomicUsize, Ordering}, + atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, Arc, RwLock, }, thread::{self, Builder, JoinHandle}, @@ -80,16 +82,14 @@ conditional_vis_mod!( ); conditional_vis_mod!(unified_scheduler, feature = "dev-context-only-utils", pub, pub(crate)); -// Fixed thread size seems to be fastest on GCP setup -pub const NUM_THREADS: u32 = 6; +/// The maximum number of worker threads that can be spawned by banking stage. +/// 64 because `ThreadAwareAccountLocks` uses a `u64` as a bitmask to +/// track thread placement. +const MAX_NUM_WORKERS: NonZeroUsize = NonZeroUsize::new(64).unwrap(); +const DEFAULT_NUM_WORKERS: NonZeroUsize = NonZeroUsize::new(4).unwrap(); #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] const TOTAL_BUFFERED_PACKETS: usize = 100_000; - -const NUM_VOTE_PROCESSING_THREADS: u32 = 2; -const MIN_THREADS_BANKING: u32 = 1; -const MIN_TOTAL_THREADS: u32 = NUM_VOTE_PROCESSING_THREADS + MIN_THREADS_BANKING; - const SLOT_BOUNDARY_CHECK_PERIOD: Duration = Duration::from_millis(10); #[derive(Debug, Default)] @@ -338,9 +338,10 @@ pub struct BatchedTransactionErrorDetails { pub batched_dropped_txs_per_account_data_total_limit_count: Saturating, } -/// Stores the stage's thread handle and output receiver. pub struct BankingStage { - bank_thread_hdls: Vec>, + // Only None during final join of BankingStage. + context: Option, + thread_hdls: Vec>, } pub trait LikeClusterInfo: Send + Sync + 'static + Clone { @@ -360,200 +361,173 @@ impl LikeClusterInfo for Arc { } impl BankingStage { - /// Create the stage using `bank`. Exit when `verified_receiver` is dropped. #[allow(clippy::too_many_arguments)] - pub fn new( + pub fn new_num_threads( block_production_method: BlockProductionMethod, transaction_struct: TransactionStructure, - poh_recorder: &Arc>, + poh_recorder: Arc>, transaction_recorder: TransactionRecorder, non_vote_receiver: BankingPacketReceiver, tpu_vote_receiver: BankingPacketReceiver, gossip_vote_receiver: BankingPacketReceiver, + num_workers: NonZeroUsize, transaction_status_sender: Option, replay_vote_sender: ReplayVoteSender, log_messages_bytes_limit: Option, bank_forks: Arc>, - prioritization_fee_cache: &Arc, + prioritization_fee_cache: Arc, ) -> Self { - Self::new_num_threads( - block_production_method, - transaction_struct, - poh_recorder, - transaction_recorder, - non_vote_receiver, - tpu_vote_receiver, - gossip_vote_receiver, - Self::num_threads(), + let committer = Committer::new( transaction_status_sender, replay_vote_sender, - log_messages_bytes_limit, - bank_forks, prioritization_fee_cache, - ) - } + ); + + let context = BankingStageContext { + exit_signal: Arc::new(AtomicBool::new(false)), + tpu_vote_receiver, + gossip_vote_receiver, + non_vote_receiver, + transaction_recorder, + poh_recorder, + bank_forks, + committer, + log_messages_bytes_limit, + }; + // + 1 for vote worker + // + 1 for the scheduler thread + let mut thread_hdls = Vec::with_capacity(num_workers.get() + 2); + thread_hdls.push(Self::spawn_vote_worker(&context)); - #[allow(clippy::too_many_arguments)] - pub fn new_num_threads( - block_production_method: BlockProductionMethod, - transaction_struct: TransactionStructure, - poh_recorder: &Arc>, - transaction_recorder: TransactionRecorder, - non_vote_receiver: BankingPacketReceiver, - tpu_vote_receiver: BankingPacketReceiver, - gossip_vote_receiver: BankingPacketReceiver, - num_threads: u32, - transaction_status_sender: Option, - replay_vote_sender: ReplayVoteSender, - log_messages_bytes_limit: Option, - bank_forks: Arc>, - prioritization_fee_cache: &Arc, - ) -> Self { let use_greedy_scheduler = matches!( block_production_method, BlockProductionMethod::CentralSchedulerGreedy ); + Self::new_central_scheduler( + &mut thread_hdls, transaction_struct, use_greedy_scheduler, - poh_recorder, - transaction_recorder, - non_vote_receiver, - tpu_vote_receiver, - gossip_vote_receiver, - num_threads, - transaction_status_sender, - replay_vote_sender, - log_messages_bytes_limit, - bank_forks, - prioritization_fee_cache, - ) + num_workers, + &context, + ); + + Self { + context: Some(context), + thread_hdls, + } } - #[allow(clippy::too_many_arguments)] - pub fn new_central_scheduler( + pub fn spawn_threads( + &mut self, transaction_struct: TransactionStructure, - use_greedy_scheduler: bool, - poh_recorder: &Arc>, - transaction_recorder: TransactionRecorder, - non_vote_receiver: BankingPacketReceiver, - tpu_vote_receiver: BankingPacketReceiver, - gossip_vote_receiver: BankingPacketReceiver, - num_threads: u32, - transaction_status_sender: Option, - replay_vote_sender: ReplayVoteSender, - log_messages_bytes_limit: Option, - bank_forks: Arc>, - prioritization_fee_cache: &Arc, - ) -> Self { - assert!(num_threads >= MIN_TOTAL_THREADS); - let vote_storage = { - let bank = bank_forks.read().unwrap().working_bank(); - VoteStorage::new(&bank) - }; - - let decision_maker = DecisionMaker::new(poh_recorder.clone()); - let committer = Committer::new( - transaction_status_sender.clone(), - replay_vote_sender.clone(), - prioritization_fee_cache.clone(), - ); + block_production_method: BlockProductionMethod, + num_workers: NonZeroUsize, + ) -> thread::Result<()> { + if let Some(context) = self.context.as_ref() { + info!("Shutting down banking stage threads"); + context.exit_signal.store(true, Ordering::Relaxed); + for bank_thread_hdl in self.thread_hdls.drain(..) { + bank_thread_hdl.join()?; + } - // + 1 for the central scheduler thread - let mut bank_thread_hdls = Vec::with_capacity(num_threads as usize + 1); + info!( + "Spawning new banking stage threads with block-production-method: \ + {block_production_method:?} transaction-structure: {transaction_struct:?} \ + num-workers: {num_workers}" + ); + context.exit_signal.store(false, Ordering::Relaxed); + self.thread_hdls.push(Self::spawn_vote_worker(context)); + Self::new_central_scheduler( + &mut self.thread_hdls, + transaction_struct, + matches!( + block_production_method, + BlockProductionMethod::CentralSchedulerGreedy + ), + num_workers, + context, + ) + } - // Spawn legacy voting thread - bank_thread_hdls.push(Self::spawn_vote_worker( - tpu_vote_receiver, - gossip_vote_receiver, - decision_maker.clone(), - bank_forks.clone(), - committer.clone(), - transaction_recorder.clone(), - log_messages_bytes_limit, - vote_storage, - )); + Ok(()) + } + fn new_central_scheduler( + non_vote_thread_hdls: &mut Vec>, + transaction_struct: TransactionStructure, + use_greedy_scheduler: bool, + num_workers: NonZeroUsize, + context: &BankingStageContext, + ) { match transaction_struct { TransactionStructure::Sdk => { let receive_and_buffer = SanitizedTransactionReceiveAndBuffer::new( - PacketDeserializer::new(non_vote_receiver), - bank_forks.clone(), + PacketDeserializer::new(context.non_vote_receiver.clone()), + context.bank_forks.clone(), ); Self::spawn_scheduler_and_workers( - &mut bank_thread_hdls, + non_vote_thread_hdls, receive_and_buffer, use_greedy_scheduler, - decision_maker, - committer, - poh_recorder, - transaction_recorder, - num_threads, - log_messages_bytes_limit, - bank_forks, - ); + num_workers, + context, + ) } TransactionStructure::View => { let receive_and_buffer = TransactionViewReceiveAndBuffer { - receiver: non_vote_receiver, - bank_forks: bank_forks.clone(), + receiver: context.non_vote_receiver.clone(), + bank_forks: context.bank_forks.clone(), }; Self::spawn_scheduler_and_workers( - &mut bank_thread_hdls, + non_vote_thread_hdls, receive_and_buffer, use_greedy_scheduler, - decision_maker, - committer, - poh_recorder, - transaction_recorder, - num_threads, - log_messages_bytes_limit, - bank_forks, - ); + num_workers, + context, + ) } } - - Self { bank_thread_hdls } } - #[allow(clippy::too_many_arguments)] fn spawn_scheduler_and_workers( - bank_thread_hdls: &mut Vec>, + non_vote_thread_hdls: &mut Vec>, receive_and_buffer: R, use_greedy_scheduler: bool, - decision_maker: DecisionMaker, - committer: Committer, - poh_recorder: &Arc>, - transaction_recorder: TransactionRecorder, - num_threads: u32, - log_messages_bytes_limit: Option, - bank_forks: Arc>, + num_workers: NonZeroUsize, + context: &BankingStageContext, ) { + assert!(num_workers <= BankingStage::max_num_workers()); + let num_workers = num_workers.get(); + + let exit = context.exit_signal.clone(); + // Create channels for communication between scheduler and workers - let num_workers = (num_threads).saturating_sub(NUM_VOTE_PROCESSING_THREADS); let (work_senders, work_receivers): (Vec>, Vec>) = (0..num_workers).map(|_| unbounded()).unzip(); let (finished_work_sender, finished_work_receiver) = unbounded(); // Spawn the worker threads - let mut worker_metrics = Vec::with_capacity(num_workers as usize); + let decision_maker = DecisionMaker::from(context.poh_recorder.read().unwrap().deref()); + let mut worker_metrics = Vec::with_capacity(num_workers); for (index, work_receiver) in work_receivers.into_iter().enumerate() { - let id = (index as u32).saturating_add(NUM_VOTE_PROCESSING_THREADS); + let id = index as u32; let consume_worker = ConsumeWorker::new( id, + exit.clone(), work_receiver, Consumer::new( - committer.clone(), - transaction_recorder.clone(), + context.committer.clone(), + context.transaction_recorder.clone(), QosService::new(id), - log_messages_bytes_limit, + context.log_messages_bytes_limit, ), finished_work_sender.clone(), - poh_recorder.read().unwrap().shared_working_bank(), + context.poh_recorder.read().unwrap().shared_working_bank(), ); worker_metrics.push(consume_worker.metrics_handle()); - bank_thread_hdls.push( + non_vote_thread_hdls.push( Builder::new() .name(format!("solCoWorker{id:02}")) .spawn(move || { @@ -568,12 +542,15 @@ impl BankingStage { // assignment without introducing `dyn`. macro_rules! spawn_scheduler { ($scheduler:ident) => { - bank_thread_hdls.push( + let exit = exit.clone(); + let bank_forks = context.bank_forks.clone(); + non_vote_thread_hdls.push( Builder::new() .name("solBnkTxSched".to_string()) .spawn(move || { let scheduler_controller = SchedulerController::new( - decision_maker.clone(), + exit, + decision_maker, receive_and_buffer, bank_forks, $scheduler, @@ -611,29 +588,25 @@ impl BankingStage { } } - fn spawn_vote_worker( - tpu_receiver: BankingPacketReceiver, - gossip_receiver: BankingPacketReceiver, - decision_maker: DecisionMaker, - bank_forks: Arc>, - committer: Committer, - transaction_recorder: TransactionRecorder, - log_messages_bytes_limit: Option, - vote_storage: VoteStorage, - ) -> JoinHandle<()> { - let tpu_receiver = PacketReceiver::new(tpu_receiver); - let gossip_receiver = PacketReceiver::new(gossip_receiver); + fn spawn_vote_worker(context: &BankingStageContext) -> JoinHandle<()> { + let vote_storage = VoteStorage::new(&context.bank_forks.read().unwrap().working_bank()); + let tpu_receiver = PacketReceiver::new(context.tpu_vote_receiver.clone()); + let gossip_receiver = PacketReceiver::new(context.gossip_vote_receiver.clone()); let consumer = Consumer::new( - committer, - transaction_recorder, + context.committer.clone(), + context.transaction_recorder.clone(), QosService::new(0), - log_messages_bytes_limit, + context.log_messages_bytes_limit, ); + let decision_maker = DecisionMaker::from(context.poh_recorder.read().unwrap().deref()); + let exit_signal = context.exit_signal.clone(); + let bank_forks = context.bank_forks.clone(); Builder::new() .name("solBanknStgVote".to_string()) .spawn(move || { VoteWorker::new( + exit_signal, decision_maker, tpu_receiver, gossip_receiver, @@ -646,31 +619,51 @@ impl BankingStage { .unwrap() } - pub fn num_threads() -> u32 { - cmp::max( - env::var("SOLANA_BANKING_THREADS") - .map(|x| x.parse().unwrap_or(NUM_THREADS)) - .unwrap_or(NUM_THREADS), - MIN_TOTAL_THREADS, - ) + pub fn default_num_workers() -> NonZeroUsize { + DEFAULT_NUM_WORKERS + } + + pub fn max_num_workers() -> NonZeroUsize { + MAX_NUM_WORKERS } - pub fn join(self) -> thread::Result<()> { - for bank_thread_hdl in self.bank_thread_hdls { + pub fn join(mut self) -> thread::Result<()> { + self.context + .take() + .expect("non-vote context must be Some") + .exit_signal + .store(true, Ordering::Relaxed); + for bank_thread_hdl in self.thread_hdls { bank_thread_hdl.join()?; } Ok(()) } } +// Context for spawning threads in the banking stage. +#[derive(Clone)] +struct BankingStageContext { + exit_signal: Arc, + tpu_vote_receiver: BankingPacketReceiver, + gossip_vote_receiver: BankingPacketReceiver, + non_vote_receiver: BankingPacketReceiver, + transaction_recorder: TransactionRecorder, + poh_recorder: Arc>, + bank_forks: Arc>, + committer: Committer, + log_messages_bytes_limit: Option, +} + #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] pub(crate) fn update_bank_forks_and_poh_recorder_for_new_tpu_bank( bank_forks: &RwLock, - poh_recorder: &RwLock, + poh_controller: &mut PohController, tpu_bank: Bank, ) { let tpu_bank = bank_forks.write().unwrap().insert(tpu_bank); - poh_recorder.write().unwrap().set_bank(tpu_bank); + if poh_controller.set_bank(tpu_bank).is_err() { + warn!("Failed to set poh bank, poh service is disconnected"); + } } #[cfg(test)] @@ -681,7 +674,7 @@ mod tests { agave_banking_stage_ingress_types::BankingPacketBatch, crossbeam_channel::{unbounded, Receiver}, itertools::Itertools, - solana_entry::entry::{self, Entry, EntrySlice}, + solana_entry::entry::{self, EntrySlice}, solana_hash::Hash, solana_keypair::Keypair, solana_ledger::{ @@ -742,23 +735,30 @@ mod tests { Blockstore::open(ledger_path.path()) .expect("Expected to be able to open database ledger"), ); - let (exit, poh_recorder, transaction_recorder, poh_service, _entry_receiever) = - create_test_recorder(bank, blockstore, None, None); + let ( + exit, + poh_recorder, + _poh_controller, + transaction_recorder, + poh_service, + _entry_receiever, + ) = create_test_recorder(bank, blockstore, None, None); let (replay_vote_sender, _replay_vote_receiver) = unbounded(); - let banking_stage = BankingStage::new( + let banking_stage = BankingStage::new_num_threads( BlockProductionMethod::CentralScheduler, transaction_struct, - &poh_recorder, + poh_recorder.clone(), transaction_recorder, non_vote_receiver, tpu_vote_receiver, gossip_vote_receiver, + DEFAULT_NUM_WORKERS, None, replay_vote_sender, None, bank_forks, - &Arc::new(PrioritizationFeeCache::new(0u64)), + Arc::new(PrioritizationFeeCache::new(0u64)), ); drop(non_vote_sender); drop(tpu_vote_sender); @@ -797,23 +797,30 @@ mod tests { target_tick_count: Some(bank.max_tick_height() + num_extra_ticks), ..PohConfig::default() }; - let (exit, poh_recorder, transaction_recorder, poh_service, entry_receiver) = - create_test_recorder(bank.clone(), blockstore, Some(poh_config), None); + let ( + exit, + poh_recorder, + _poh_controller, + transaction_recorder, + poh_service, + entry_receiver, + ) = create_test_recorder(bank.clone(), blockstore, Some(poh_config), None); let (replay_vote_sender, _replay_vote_receiver) = unbounded(); - let banking_stage = BankingStage::new( + let banking_stage = BankingStage::new_num_threads( BlockProductionMethod::CentralScheduler, transaction_struct, - &poh_recorder, + poh_recorder.clone(), transaction_recorder, non_vote_receiver, tpu_vote_receiver, gossip_vote_receiver, + DEFAULT_NUM_WORKERS, None, replay_vote_sender, None, bank_forks, - &Arc::new(PrioritizationFeeCache::new(0u64)), + Arc::new(PrioritizationFeeCache::new(0u64)), ); trace!("sending bank"); drop(non_vote_sender); @@ -822,6 +829,7 @@ mod tests { exit.store(true, Ordering::Relaxed); poh_service.join().unwrap(); drop(poh_recorder); + banking_stage.join().unwrap(); trace!("getting entries"); let entries: Vec<_> = entry_receiver @@ -832,7 +840,6 @@ mod tests { assert_eq!(entries.len(), genesis_config.ticks_per_slot as usize); assert!(entries.verify(&start_hash, &entry::thread_pool_for_tests())); assert_eq!(entries[entries.len() - 1].hash, bank.last_blockhash()); - banking_stage.join().unwrap(); } fn test_banking_stage_entries_only( @@ -861,33 +868,35 @@ mod tests { Blockstore::open(ledger_path.path()) .expect("Expected to be able to open database ledger"), ); - let (exit, poh_recorder, transaction_recorder, poh_service, entry_receiver) = - create_test_recorder(bank.clone(), blockstore, None, None); + let ( + exit, + poh_recorder, + _poh_controller, + transaction_recorder, + poh_service, + entry_receiver, + ) = create_test_recorder(bank.clone(), blockstore, None, None); let (replay_vote_sender, _replay_vote_receiver) = unbounded(); - let banking_stage = BankingStage::new( + let banking_stage = BankingStage::new_num_threads( block_production_method, transaction_struct, - &poh_recorder, + poh_recorder.clone(), transaction_recorder, non_vote_receiver, tpu_vote_receiver, gossip_vote_receiver, + DEFAULT_NUM_WORKERS, None, replay_vote_sender, None, bank_forks.clone(), // keep a local-copy of bank-forks so worker threads do not lose weak access to bank-forks - &Arc::new(PrioritizationFeeCache::new(0u64)), + Arc::new(PrioritizationFeeCache::new(0u64)), ); - // fund another account so we can send 2 good transactions in a single batch. - let keypair = Keypair::new(); - let fund_tx = system_transaction::transfer(&mint_keypair, &keypair.pubkey(), 2, start_hash); - bank.process_transaction(&fund_tx).unwrap(); - - // good tx, but no verify + // good tx, and no verify let to = solana_pubkey::new_rand(); - let tx_no_ver = system_transaction::transfer(&keypair, &to, 2, start_hash); + let tx_no_ver = system_transaction::transfer(&mint_keypair, &to, 2, start_hash); // good tx let to2 = solana_pubkey::new_rand(); @@ -913,41 +922,44 @@ mod tests { .send(BankingPacketBatch::new(packet_batches)) .unwrap(); + // capture the entry receiver until we've received all our entries. + let mut entries = Vec::with_capacity(100); + loop { + if let Ok((_bank, (entry, _))) = entry_receiver.try_recv() { + let tx_entry = !entry.transactions.is_empty(); + entries.push(entry); + if tx_entry { + break; // once we have the entry break. don't expect more than one. + } + } + sleep(Duration::from_millis(10)); + } + drop(non_vote_sender); drop(tpu_vote_sender); drop(gossip_vote_sender); - // wait until banking_stage to finish up all packets banking_stage.join().unwrap(); exit.store(true, Ordering::Relaxed); poh_service.join().unwrap(); drop(poh_recorder); - let mut blockhash = start_hash; + let blockhash = start_hash; let (bank, _bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); - bank.process_transaction(&fund_tx).unwrap(); - //receive entries + ticks - loop { - let entries: Vec = entry_receiver - .iter() - .map(|(_bank, (entry, _tick_height))| entry) - .collect(); - - assert!(entries.verify(&blockhash, &entry::thread_pool_for_tests())); - if !entries.is_empty() { - blockhash = entries.last().unwrap().hash; - for entry in entries { - bank.process_entry_transactions(entry.transactions) - .iter() - .for_each(|x| assert_eq!(*x, Ok(()))); - } - } - if bank.get_balance(&to2) == 1 { - break; - } + // receive entries + ticks. The sender has been dropped, so there + // are no more entries that will ever come in after the `iter` here. + entries.extend( + entry_receiver + .iter() + .map(|(_bank, (entry, _tick_height))| entry), + ); - sleep(Duration::from_millis(200)); + assert!(entries.verify(&blockhash, &entry::thread_pool_for_tests())); + for entry in entries { + bank.process_entry_transactions(entry.transactions) + .iter() + .for_each(|x| assert_eq!(*x, Ok(()))); } assert_eq!(bank.get_balance(&to2), 1); @@ -1015,21 +1027,28 @@ mod tests { let entry_receiver = { // start a banking_stage to eat verified receiver let (bank, bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); - let (exit, poh_recorder, transaction_recorder, poh_service, entry_receiver) = - create_test_recorder(bank.clone(), blockstore, None, None); - let _banking_stage = BankingStage::new( + let ( + exit, + poh_recorder, + _poh_controller, + transaction_recorder, + poh_service, + entry_receiver, + ) = create_test_recorder(bank.clone(), blockstore, None, None); + let _banking_stage = BankingStage::new_num_threads( BlockProductionMethod::CentralScheduler, transaction_struct, - &poh_recorder, + poh_recorder.clone(), transaction_recorder, non_vote_receiver, tpu_vote_receiver, gossip_vote_receiver, + DEFAULT_NUM_WORKERS, None, replay_vote_sender, None, bank_forks, - &Arc::new(PrioritizationFeeCache::new(0u64)), + Arc::new(PrioritizationFeeCache::new(0u64)), ); // wait for banking_stage to eat the packets @@ -1199,23 +1218,30 @@ mod tests { Blockstore::open(ledger_path.path()) .expect("Expected to be able to open database ledger"), ); - let (exit, poh_recorder, transaction_recorder, poh_service, _entry_receiver) = - create_test_recorder(bank.clone(), blockstore, None, None); + let ( + exit, + poh_recorder, + _poh_controller, + transaction_recorder, + poh_service, + _entry_receiver, + ) = create_test_recorder(bank.clone(), blockstore, None, None); let (replay_vote_sender, _replay_vote_receiver) = unbounded(); - let banking_stage = BankingStage::new( + let banking_stage = BankingStage::new_num_threads( BlockProductionMethod::CentralScheduler, transaction_struct, - &poh_recorder, + poh_recorder.clone(), transaction_recorder, non_vote_receiver, tpu_vote_receiver, gossip_vote_receiver, + DEFAULT_NUM_WORKERS, None, replay_vote_sender, None, bank_forks, - &Arc::new(PrioritizationFeeCache::new(0u64)), + Arc::new(PrioritizationFeeCache::new(0u64)), ); let keypairs = (0..100).map(|_| Keypair::new()).collect_vec(); diff --git a/core/src/banking_stage/committer.rs b/core/src/banking_stage/committer.rs index 6783627e9f98e9..0fba526f8bd64b 100644 --- a/core/src/banking_stage/committer.rs +++ b/core/src/banking_stage/committer.rs @@ -29,6 +29,7 @@ pub enum CommitTransactionDetails { Committed { compute_units: u64, loaded_accounts_data_size: u32, + fee_payer_post_balance: u64, result: Result<(), TransactionError>, }, NotCommitted(TransactionError), @@ -63,7 +64,7 @@ impl Committer { batch: &TransactionBatch, processing_results: Vec, starting_transaction_index: Option, - bank: &Arc, + bank: &Bank, balance_collector: Option, execute_and_commit_timings: &mut LeaderExecuteAndCommitTimings, processed_counts: &ProcessedTransactionCounts, @@ -88,6 +89,7 @@ impl Committer { .loaded_account_stats .loaded_accounts_data_size, result: committed_tx.status.clone(), + fee_payer_post_balance: committed_tx.fee_payer_post_balance, }, Err(err) => CommitTransactionDetails::NotCommitted(err.clone()), }) @@ -122,7 +124,7 @@ impl Committer { fn collect_balances_and_send_status_batch( &self, commit_results: Vec, - bank: &Arc, + bank: &Bank, batch: &TransactionBatch, balance_collector: Option, starting_transaction_index: Option, diff --git a/core/src/banking_stage/consume_worker.rs b/core/src/banking_stage/consume_worker.rs index 04a39410905c75..3d616e30f68731 100644 --- a/core/src/banking_stage/consume_worker.rs +++ b/core/src/banking_stage/consume_worker.rs @@ -4,6 +4,7 @@ use { leader_slot_timing_metrics::LeaderExecuteAndCommitTimings, scheduler_messages::{ConsumeWork, FinishedConsumeWork}, }, + crate::banking_stage::consumer::RetryableIndex, crossbeam_channel::{Receiver, RecvError, SendError, Sender}, solana_measure::measure_us, solana_poh::poh_recorder::SharedWorkingBank, @@ -30,6 +31,7 @@ pub enum ConsumeWorkerError { } pub(crate) struct ConsumeWorker { + exit: Arc, consume_receiver: Receiver>, consumer: Consumer, consumed_sender: Sender>, @@ -41,12 +43,14 @@ pub(crate) struct ConsumeWorker { impl ConsumeWorker { pub fn new( id: u32, + exit: Arc, consume_receiver: Receiver>, consumer: Consumer, consumed_sender: Sender>, shared_working_bank: SharedWorkingBank, ) -> Self { Self { + exit, consume_receiver, consumer, consumed_sender, @@ -60,10 +64,11 @@ impl ConsumeWorker { } pub fn run(self) -> Result<(), ConsumeWorkerError> { - loop { + while !self.exit.load(Ordering::Relaxed) { let work = self.consume_receiver.recv()?; self.consume_loop(work)?; } + Ok(()) } fn consume_loop(&self, work: ConsumeWork) -> Result<(), ConsumeWorkerError> { @@ -81,6 +86,13 @@ impl ConsumeWorker { .fetch_add(get_bank_us, Ordering::Relaxed); for work in try_drain_iter(work, &self.consume_receiver) { + self.metrics + .count_metrics + .max_queue_len + .fetch_max(self.consume_receiver.len() as u64, Ordering::Relaxed); + if self.exit.load(Ordering::Relaxed) { + return Ok(()); + } if bank.is_complete() || { // if working bank has changed, then try to get a new bank. self.working_bank() @@ -102,6 +114,10 @@ impl ConsumeWorker { return self.retry_drain(work); } } + self.metrics + .count_metrics + .num_messages_processed + .fetch_add(1, Ordering::Relaxed); self.consume(&bank, work)?; } @@ -154,6 +170,9 @@ impl ConsumeWorker { /// Retry current batch and all outstanding batches. fn retry_drain(&self, work: ConsumeWork) -> Result<(), ConsumeWorkerError> { for work in try_drain_iter(work, &self.consume_receiver) { + if self.exit.load(Ordering::Relaxed) { + return Ok(()); + } self.retry(work)?; } Ok(()) @@ -161,7 +180,12 @@ impl ConsumeWorker { /// Send transactions back to scheduler as retryable. fn retry(&self, work: ConsumeWork) -> Result<(), ConsumeWorkerError> { - let retryable_indexes: Vec<_> = (0..work.transactions.len()).collect(); + let retryable_indexes: Vec<_> = (0..work.transactions.len()) + .map(|index| RetryableIndex { + index, + immediately_retryable: true, + }) + .collect(); let num_retryable = retryable_indexes.len(); self.metrics .count_metrics @@ -436,6 +460,8 @@ impl ConsumeWorkerMetrics { } struct ConsumeWorkerCountMetrics { + max_queue_len: AtomicU64, + num_messages_processed: AtomicU64, transactions_attempted_processing_count: AtomicU64, processed_transactions_count: AtomicU64, processed_with_successful_result_count: AtomicU64, @@ -449,6 +475,8 @@ struct ConsumeWorkerCountMetrics { impl Default for ConsumeWorkerCountMetrics { fn default() -> Self { Self { + max_queue_len: AtomicU64::default(), + num_messages_processed: AtomicU64::default(), transactions_attempted_processing_count: AtomicU64::default(), processed_transactions_count: AtomicU64::default(), processed_with_successful_result_count: AtomicU64::default(), @@ -466,6 +494,12 @@ impl ConsumeWorkerCountMetrics { datapoint_info!( "banking_stage_worker_counts", "id" => id, + ("max_queue_len", self.max_queue_len.swap(0, Ordering::Relaxed), i64), + ( + "num_messages_processed", + self.num_messages_processed.swap(0, Ordering::Relaxed), + i64 + ), ( "transactions_attempted_processing_count", self.transactions_attempted_processing_count @@ -851,6 +885,7 @@ mod tests { let (consumed_sender, consumed_receiver) = unbounded(); let worker = ConsumeWorker::new( 0, + Arc::new(AtomicBool::new(false)), consume_receiver, consumer, consumed_sender, @@ -913,7 +948,10 @@ mod tests { assert_eq!(consumed.work.batch_id, bid); assert_eq!(consumed.work.ids, vec![id]); assert_eq!(consumed.work.max_ages, vec![max_age]); - assert_eq!(consumed.retryable_indexes, vec![0]); + assert_eq!( + consumed.retryable_indexes, + vec![RetryableIndex::new(0, true)] + ); drop(test_frame); let _ = worker_thread.join().unwrap(); @@ -962,7 +1000,7 @@ mod tests { assert_eq!(consumed.work.batch_id, bid); assert_eq!(consumed.work.ids, vec![id]); assert_eq!(consumed.work.max_ages, vec![max_age]); - assert_eq!(consumed.retryable_indexes, Vec::::new()); + assert_eq!(consumed.retryable_indexes, Vec::new()); drop(test_frame); let _ = worker_thread.join().unwrap(); @@ -1022,7 +1060,7 @@ mod tests { if relax_intrabatch_account_locks { vec![] } else { - vec![1] + vec![RetryableIndex::new(1, true)] } ); @@ -1093,13 +1131,13 @@ mod tests { assert_eq!(consumed.work.batch_id, bid1); assert_eq!(consumed.work.ids, vec![id1]); assert_eq!(consumed.work.max_ages, vec![max_age]); - assert_eq!(consumed.retryable_indexes, Vec::::new()); + assert_eq!(consumed.retryable_indexes, Vec::new()); let consumed = consumed_receiver.recv().unwrap(); assert_eq!(consumed.work.batch_id, bid2); assert_eq!(consumed.work.ids, vec![id2]); assert_eq!(consumed.work.max_ages, vec![max_age]); - assert_eq!(consumed.retryable_indexes, Vec::::new()); + assert_eq!(consumed.retryable_indexes, Vec::new()); drop(test_frame); let _ = worker_thread.join().unwrap(); @@ -1229,7 +1267,7 @@ mod tests { .unwrap(); let consumed = consumed_receiver.recv().unwrap(); - assert_eq!(consumed.retryable_indexes, Vec::::new()); + assert_eq!(consumed.retryable_indexes, Vec::new()); // all but one succeed. 6 for initial funding assert_eq!(bank.transaction_count(), 6 + 5); diff --git a/core/src/banking_stage/consumer.rs b/core/src/banking_stage/consumer.rs index 4b477812466270..060cb3394c13fc 100644 --- a/core/src/banking_stage/consumer.rs +++ b/core/src/banking_stage/consumer.rs @@ -28,12 +28,27 @@ use { transaction_processor::{ExecutionRecordingConfig, TransactionProcessingConfig}, }, solana_transaction_error::TransactionError, - std::{num::Saturating, sync::Arc}, + std::num::Saturating, }; /// Consumer will create chunks of transactions from buffer with up to this size. pub const TARGET_NUM_TRANSACTIONS_PER_BATCH: usize = 64; +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] +pub struct RetryableIndex { + pub index: usize, + pub immediately_retryable: bool, +} + +impl RetryableIndex { + pub fn new(index: usize, immediately_retryable: bool) -> Self { + Self { + index, + immediately_retryable, + } + } +} + pub struct ProcessTransactionBatchOutput { // The number of transactions filtered out by the cost model pub(crate) cost_model_throttled_transactions_count: u64, @@ -48,7 +63,7 @@ pub struct ExecuteAndCommitTransactionsOutput { pub(crate) transaction_counts: LeaderProcessedTransactionCounts, // Transactions that either were not executed, or were executed and failed to be committed due // to the block ending. - pub(crate) retryable_transaction_indexes: Vec, + pub(crate) retryable_transaction_indexes: Vec, // A result that indicates whether transactions were successfully // committed into the Poh stream. pub commit_transactions_result: Result, PohRecorderError>, @@ -94,7 +109,7 @@ impl Consumer { pub fn process_and_record_transactions( &self, - bank: &Arc, + bank: &Bank, txs: &[impl TransactionWithMeta], ) -> ProcessTransactionBatchOutput { let mut error_counters = TransactionErrorMetrics::default(); @@ -124,7 +139,7 @@ impl Consumer { pub fn process_and_record_aged_transactions( &self, - bank: &Arc, + bank: &Bank, txs: &[impl TransactionWithMeta], max_ages: &[MaxAge], ) -> ProcessTransactionBatchOutput { @@ -159,7 +174,7 @@ impl Consumer { fn process_and_record_transactions_with_pre_results( &self, - bank: &Arc, + bank: &Bank, txs: &[impl TransactionWithMeta], pre_results: impl Iterator>, ) -> ProcessTransactionBatchOutput { @@ -227,7 +242,7 @@ impl Consumer { fn execute_and_commit_transactions_locked( &self, - bank: &Arc, + bank: &Bank, batch: &TransactionBatch, ) -> ExecuteAndCommitTransactionsOutput { let transaction_status_sender_enabled = self.committer.transaction_status_sender_enabled(); @@ -256,23 +271,39 @@ impl Consumer { // following are retryable errors Err(TransactionError::AccountInUse) => { error_counters.account_in_use += 1; - Some(index) + // locking failure due to vote conflict or jito - immediately retry. + Some(RetryableIndex { + index, + immediately_retryable: true, + }) } Err(TransactionError::WouldExceedMaxBlockCostLimit) => { error_counters.would_exceed_max_block_cost_limit += 1; - Some(index) + Some(RetryableIndex { + index, + immediately_retryable: false, + }) } Err(TransactionError::WouldExceedMaxVoteCostLimit) => { error_counters.would_exceed_max_vote_cost_limit += 1; - Some(index) + Some(RetryableIndex { + index, + immediately_retryable: false, + }) } Err(TransactionError::WouldExceedMaxAccountCostLimit) => { error_counters.would_exceed_max_account_cost_limit += 1; - Some(index) + Some(RetryableIndex { + index, + immediately_retryable: false, + }) } Err(TransactionError::WouldExceedAccountDataBlockLimit) => { error_counters.would_exceed_account_data_block_limit += 1; - Some(index) + Some(RetryableIndex { + index, + immediately_retryable: false, + }) } // following are non-retryable errors Err(TransactionError::TooManyAccountLocks) => { @@ -369,7 +400,12 @@ impl Consumer { if let Err(recorder_err) = record_transactions_result { retryable_transaction_indexes.extend(processing_results.iter().enumerate().filter_map( - |(index, processing_result)| processing_result.was_processed().then_some(index), + |(index, processing_result)| { + processing_result.was_processed().then_some(RetryableIndex { + index, + immediately_retryable: true, // recording errors are always immediately retryable + }) + }, )); // retryable indexes are expected to be sorted - in this case the @@ -532,7 +568,7 @@ mod tests { borrow::Cow, sync::{ atomic::{AtomicBool, AtomicU64, Ordering}, - RwLock, + Arc, RwLock, }, thread::{Builder, JoinHandle}, time::Duration, @@ -754,7 +790,13 @@ mod tests { processed_with_successful_result_count: 1, } ); - assert_eq!(retryable_transaction_indexes, vec![0]); + assert_eq!( + retryable_transaction_indexes, + vec![RetryableIndex { + index: 0, + immediately_retryable: true + }] + ); assert_matches!( commit_transactions_result, Err(PohRecorderError::MaxHeightReached) @@ -843,7 +885,11 @@ mod tests { record.transaction_batches, ); poh_recorder.write().unwrap().tick(); - if record.sender.send(record_response).is_err() { + if record + .sender + .send(record_response.map(|r| r.starting_transaction_index)) + .is_err() + { panic!("Error returning mixin hash"); } } @@ -1148,7 +1194,10 @@ mod tests { commit_transactions_result.get(1), Some(CommitTransactionDetails::NotCommitted(_)) ); - assert_eq!(retryable_transaction_indexes, vec![1]); + assert_eq!( + retryable_transaction_indexes, + vec![RetryableIndex::new(1, true)] + ); let expected_block_cost = { let (actual_programs_execution_cost, actual_loaded_accounts_data_size_cost) = @@ -1157,6 +1206,7 @@ mod tests { compute_units, loaded_accounts_data_size, result: _, + fee_payer_post_balance: _, } => ( *compute_units, CostModel::calculate_loaded_accounts_data_size_cost( @@ -1306,9 +1356,12 @@ mod tests { // with simd3, duplicate transactions are not retryable if relax_intrabatch_account_locks && use_duplicate_transaction { - assert_eq!(retryable_transaction_indexes, Vec::::new()); + assert_eq!(retryable_transaction_indexes, Vec::<_>::new()); } else { - assert_eq!(retryable_transaction_indexes, vec![1]); + assert_eq!( + retryable_transaction_indexes, + vec![RetryableIndex::new(1, true)] + ); } } @@ -1364,7 +1417,9 @@ mod tests { assert_eq!( execute_and_commit_transactions_output.retryable_transaction_indexes, - (1..transactions_len - 1).collect::>() + (1..transactions_len - 1) + .map(|index| RetryableIndex::new(index, true)) + .collect::>() ); } @@ -1450,12 +1505,14 @@ mod tests { if relax_intrabatch_account_locks { assert_eq!( execute_and_commit_transactions_output.retryable_transaction_indexes, - Vec::::new() + Vec::<_>::new() ); } else { assert_eq!( execute_and_commit_transactions_output.retryable_transaction_indexes, - (1..transactions_len).collect::>() + (1..transactions_len) + .map(|index| RetryableIndex::new(index, true)) + .collect::>() ); } } @@ -1543,7 +1600,9 @@ mod tests { execute_and_commit_transactions_output .retryable_transaction_indexes .sort_unstable(); - let expected: Vec = (0..transactions.len()).collect(); + let expected: Vec<_> = (0..transactions.len()) + .map(|index| RetryableIndex::new(index, true)) + .collect(); assert_eq!( execute_and_commit_transactions_output.retryable_transaction_indexes, expected @@ -1560,7 +1619,7 @@ mod tests { mut genesis_config, mint_keypair, .. - } = create_slow_genesis_config(solana_native_token::sol_to_lamports(1000.0)); + } = create_slow_genesis_config(solana_native_token::LAMPORTS_PER_SOL * 1000); genesis_config.rent.lamports_per_byte_year = 50; genesis_config.rent.exemption_threshold = 2.0; let (bank, _bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); @@ -1804,6 +1863,7 @@ mod tests { compute_units, loaded_accounts_data_size, result: _, + fee_payer_post_balance: _, } = consumer_output .execute_and_commit_transactions_output .commit_transactions_result diff --git a/core/src/banking_stage/decision_maker.rs b/core/src/banking_stage/decision_maker.rs index 8dcf8b43016f9a..c534ccd5c698ef 100644 --- a/core/src/banking_stage/decision_maker.rs +++ b/core/src/banking_stage/decision_maker.rs @@ -3,118 +3,99 @@ use { DEFAULT_TICKS_PER_SLOT, FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET, HOLD_TRANSACTIONS_SLOT_OFFSET, }, - solana_poh::poh_recorder::{BankStart, PohRecorder}, + solana_poh::poh_recorder::{ + PohRecorder, SharedLeaderFirstTickHeight, SharedTickHeight, SharedWorkingBank, + }, + solana_runtime::bank::Bank, solana_unified_scheduler_pool::{BankingStageMonitor, BankingStageStatus}, - std::{ - sync::{atomic::{AtomicBool, Ordering::Relaxed}, Arc, RwLock}, - time::{Duration, Instant}, + std::sync::{ + atomic::{AtomicBool, Ordering::Relaxed}, + Arc, }, }; #[derive(Debug, Clone)] pub enum BufferedPacketsDecision { - Consume(BankStart), + Consume(Arc), Forward, ForwardAndHold, Hold, } impl BufferedPacketsDecision { - /// Returns the `BankStart` if the decision is `Consume`. Otherwise, returns `None`. - pub fn bank_start(&self) -> Option<&BankStart> { + /// Returns the `Bank` if the decision is `Consume`. Otherwise, returns `None`. + pub fn bank(&self) -> Option<&Arc> { match self { - Self::Consume(bank_start) => Some(bank_start), + Self::Consume(bank) => Some(bank), _ => None, } } } -#[derive(Clone, derive_more::Debug)] +#[derive(Clone)] pub struct DecisionMaker { - #[debug("{poh_recorder:p}")] - poh_recorder: Arc>, + shared_working_bank: SharedWorkingBank, + shared_tick_height: SharedTickHeight, + shared_leader_first_tick_height: SharedLeaderFirstTickHeight, +} - cached_decision: Option, - last_decision_time: Instant, +impl std::fmt::Debug for DecisionMaker { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("DecisionMaker") + .field("shared_working_bank", &self.shared_working_bank.load()) + .field("shared_tick_height", &self.shared_tick_height.load()) + .field( + "shared_leader_first_tick_height", + &self.shared_leader_first_tick_height.load(), + ) + .finish() + } } impl DecisionMaker { - pub fn new(poh_recorder: Arc>) -> Self { + pub fn new( + shared_working_bank: SharedWorkingBank, + shared_tick_height: SharedTickHeight, + shared_leader_first_tick_height: SharedLeaderFirstTickHeight, + ) -> Self { Self { - poh_recorder, - cached_decision: None, - last_decision_time: Instant::now(), + shared_working_bank, + shared_tick_height, + shared_leader_first_tick_height, } } - pub(crate) fn make_consume_or_forward_decision(&mut self) -> BufferedPacketsDecision { - const CACHE_DURATION: Duration = Duration::from_millis(5); - let now = Instant::now(); - - // If there is a cached decision that has not expired, return it now. - if let Some(decision) = &self.cached_decision { - if now.duration_since(self.last_decision_time) < CACHE_DURATION { - return decision.clone(); + pub(crate) fn make_consume_or_forward_decision(&self) -> BufferedPacketsDecision { + // Check if there is an active working bank. + if let Some(bank) = self.shared_working_bank.load() { + BufferedPacketsDecision::Consume(bank) + } else if let Some(first_leader_tick_height) = self.shared_leader_first_tick_height.load() { + let current_tick_height = self.shared_tick_height.load(); + let ticks_until_leader = first_leader_tick_height.saturating_sub(current_tick_height); + + if ticks_until_leader + <= (FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET - 1) * DEFAULT_TICKS_PER_SLOT + { + BufferedPacketsDecision::Hold + } else if ticks_until_leader < HOLD_TRANSACTIONS_SLOT_OFFSET * DEFAULT_TICKS_PER_SLOT { + BufferedPacketsDecision::ForwardAndHold + } else { + BufferedPacketsDecision::Forward } - } - - self.last_decision_time = now; - self.cached_decision = Some(self.make_consume_or_forward_decision_no_cache()); - self.cached_decision.as_ref().unwrap().clone() - } - - fn make_consume_or_forward_decision_no_cache(&self) -> BufferedPacketsDecision { - let decision; - { - let poh_recorder = self.poh_recorder.read().unwrap(); - decision = Self::consume_or_forward_packets( - || Self::bank_start(&poh_recorder), - || Self::would_be_leader_shortly(&poh_recorder), - || Self::would_be_leader(&poh_recorder), - ); - } - - decision - } - - fn consume_or_forward_packets( - bank_start_fn: impl FnOnce() -> Option, - would_be_leader_shortly_fn: impl FnOnce() -> bool, - would_be_leader_fn: impl FnOnce() -> bool, - ) -> BufferedPacketsDecision { - // If has active bank, then immediately process buffered packets - // otherwise, based on leader schedule to either forward or hold packets - if let Some(bank_start) = bank_start_fn() { - // If the bank is available, this node is the leader - BufferedPacketsDecision::Consume(bank_start) - } else if would_be_leader_shortly_fn() { - // If the node will be the leader soon, hold the packets for now - BufferedPacketsDecision::Hold - } else if would_be_leader_fn() { - // Node will be leader within ~20 slots, hold the transactions in - // case it is the only node which produces an accepted slot. - BufferedPacketsDecision::ForwardAndHold } else { - // If the current node is not the leader, forward the buffered packets BufferedPacketsDecision::Forward } } +} - fn bank_start(poh_recorder: &PohRecorder) -> Option { - poh_recorder - .bank_start() - .filter(|bank_start| bank_start.should_working_bank_still_be_processing_txs()) - } - - fn would_be_leader_shortly(poh_recorder: &PohRecorder) -> bool { - poh_recorder.would_be_leader( - (FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET - 1) * DEFAULT_TICKS_PER_SLOT, +impl From<&PohRecorder> for DecisionMaker { + fn from(poh_recorder: &PohRecorder) -> Self { + Self::new( + poh_recorder.shared_working_bank(), + poh_recorder.shared_tick_height(), + poh_recorder.shared_leader_first_tick_height(), ) } - - fn would_be_leader(poh_recorder: &PohRecorder) -> bool { - poh_recorder.would_be_leader(HOLD_TRANSACTIONS_SLOT_OFFSET * DEFAULT_TICKS_PER_SLOT) - } } #[derive(Debug)] @@ -124,10 +105,7 @@ pub(crate) struct DecisionMakerWrapper { } impl DecisionMakerWrapper { - pub(crate) fn new(decision_maker: DecisionMaker) -> Self { - // Clone-off before hand to avoid lock contentions. - let is_exited = decision_maker.poh_recorder.read().unwrap().is_exited.clone(); - + pub(crate) fn new(is_exited: Arc, decision_maker: DecisionMaker) -> Self { Self { is_exited, decision_maker, @@ -153,76 +131,53 @@ impl BankingStageMonitor for DecisionMakerWrapper { #[cfg(test)] mod tests { use { - super::*, - core::panic, - solana_clock::NUM_CONSECUTIVE_LEADER_SLOTS, - solana_ledger::{blockstore::Blockstore, genesis_utils::create_genesis_config}, - solana_poh::poh_recorder::create_test_recorder, - solana_pubkey::Pubkey, - solana_runtime::bank::Bank, - std::{ - env::temp_dir, - sync::{atomic::Ordering, Arc}, - time::Instant, - }, + super::*, solana_ledger::genesis_utils::create_genesis_config, solana_runtime::bank::Bank, }; #[test] - fn test_buffered_packet_decision_bank_start() { + fn test_buffered_packet_decision_bank() { let bank = Arc::new(Bank::default_for_tests()); - let bank_start = BankStart { - working_bank: bank, - bank_creation_time: Arc::new(Instant::now()), - }; - assert!(BufferedPacketsDecision::Consume(bank_start) - .bank_start() - .is_some()); - assert!(BufferedPacketsDecision::Forward.bank_start().is_none()); - assert!(BufferedPacketsDecision::ForwardAndHold - .bank_start() - .is_none()); - assert!(BufferedPacketsDecision::Hold.bank_start().is_none()); + assert!(BufferedPacketsDecision::Consume(bank).bank().is_some()); + assert!(BufferedPacketsDecision::Forward.bank().is_none()); + assert!(BufferedPacketsDecision::ForwardAndHold.bank().is_none()); + assert!(BufferedPacketsDecision::Hold.bank().is_none()); } #[test] fn test_make_consume_or_forward_decision() { let genesis_config = create_genesis_config(2).genesis_config; let (bank, _bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); - let ledger_path = temp_dir(); - let blockstore = Arc::new(Blockstore::open(ledger_path.as_path()).unwrap()); - let (exit, poh_recorder, _transaction_recorder, poh_service, _entry_receiver) = - create_test_recorder(bank.clone(), blockstore, None, None); - // Drop the poh service immediately to avoid potential ticking - exit.store(true, Ordering::Relaxed); - poh_service.join().unwrap(); - let my_pubkey = Pubkey::new_unique(); - let decision_maker = DecisionMaker::new(poh_recorder.clone()); - poh_recorder.write().unwrap().reset(bank.clone(), None); - let slot = bank.slot() + 1; - let bank = Arc::new(Bank::new_from_parent(bank, &my_pubkey, slot)); + let mut shared_working_bank = SharedWorkingBank::empty(); + let shared_tick_height = SharedTickHeight::new(0); + let mut shared_leader_first_tick_height = SharedLeaderFirstTickHeight::new(None); - // Currently Leader - Consume - { - poh_recorder - .write() - .unwrap() - .set_bank_for_test(bank.clone()); - let decision = decision_maker.make_consume_or_forward_decision_no_cache(); - assert_matches!(decision, BufferedPacketsDecision::Consume(_)); - } + let decision_maker = DecisionMaker::new( + shared_working_bank.clone(), + shared_tick_height.clone(), + shared_leader_first_tick_height.clone(), + ); + + // No active bank, no leader first tick height. + assert_matches!( + decision_maker.make_consume_or_forward_decision(), + BufferedPacketsDecision::Forward + ); + + // Active bank. + shared_working_bank.store(bank.clone()); + assert_matches!( + decision_maker.make_consume_or_forward_decision(), + BufferedPacketsDecision::Consume(_) + ); + shared_working_bank.clear(); // Will be leader shortly - Hold for next_leader_slot_offset in [0, 1].into_iter() { let next_leader_slot = bank.slot() + next_leader_slot_offset; - poh_recorder.write().unwrap().reset( - bank.clone(), - Some(( - next_leader_slot, - next_leader_slot + NUM_CONSECUTIVE_LEADER_SLOTS, - )), - ); - let decision = decision_maker.make_consume_or_forward_decision_no_cache(); + shared_leader_first_tick_height.store(Some(next_leader_slot * DEFAULT_TICKS_PER_SLOT)); + + let decision = decision_maker.make_consume_or_forward_decision(); assert!( matches!(decision, BufferedPacketsDecision::Hold), "next_leader_slot_offset: {next_leader_slot_offset}", @@ -232,70 +187,22 @@ mod tests { // Will be leader - ForwardAndHold for next_leader_slot_offset in [2, 19].into_iter() { let next_leader_slot = bank.slot() + next_leader_slot_offset; - poh_recorder.write().unwrap().reset( - bank.clone(), - Some(( - next_leader_slot, - next_leader_slot + NUM_CONSECUTIVE_LEADER_SLOTS + 1, - )), - ); - let decision = decision_maker.make_consume_or_forward_decision_no_cache(); + shared_leader_first_tick_height.store(Some(next_leader_slot * DEFAULT_TICKS_PER_SLOT)); + + let decision = decision_maker.make_consume_or_forward_decision(); assert!( matches!(decision, BufferedPacketsDecision::ForwardAndHold), "next_leader_slot_offset: {next_leader_slot_offset}", ); } - // Known leader, not me - Forward - { - poh_recorder.write().unwrap().reset(bank, None); - let decision = decision_maker.make_consume_or_forward_decision_no_cache(); - assert_matches!(decision, BufferedPacketsDecision::Forward); - } - } - - #[test] - fn test_should_process_or_forward_packets() { - let bank = Arc::new(Bank::default_for_tests()); - let bank_start = Some(BankStart { - working_bank: bank, - bank_creation_time: Arc::new(Instant::now()), - }); - // having active bank allows to consume immediately - assert_matches!( - DecisionMaker::consume_or_forward_packets( - || bank_start.clone(), - || panic!("should not be called"), - || panic!("should not be called"), - ), - BufferedPacketsDecision::Consume(_) - ); - // Leader other than me, forward the packets - assert_matches!( - DecisionMaker::consume_or_forward_packets( - || None, - || false, - || false, - ), - BufferedPacketsDecision::Forward - ); - // Will be leader shortly, hold the packets - assert_matches!( - DecisionMaker::consume_or_forward_packets( - || None, - || true, - || panic!("should not be called"), - ), - BufferedPacketsDecision::Hold - ); - // Will be leader (not shortly), forward and hold - assert_matches!( - DecisionMaker::consume_or_forward_packets( - || None, - || false, - || true, - ), - BufferedPacketsDecision::ForwardAndHold + // Longer period until next leader - Forward + let next_leader_slot = 20 + bank.slot(); + shared_leader_first_tick_height.store(Some(next_leader_slot * DEFAULT_TICKS_PER_SLOT)); + let decision = decision_maker.make_consume_or_forward_decision(); + assert!( + matches!(decision, BufferedPacketsDecision::Forward), + "next_leader_slot: {next_leader_slot}", ); } } diff --git a/core/src/banking_stage/immutable_deserialized_packet.rs b/core/src/banking_stage/immutable_deserialized_packet.rs index b6db7fcae50071..7e2b5c7c8e87a5 100644 --- a/core/src/banking_stage/immutable_deserialized_packet.rs +++ b/core/src/banking_stage/immutable_deserialized_packet.rs @@ -4,7 +4,7 @@ use { solana_compute_budget::compute_budget_limits::ComputeBudgetLimits, solana_compute_budget_instruction::instructions_processor::process_compute_budget_instructions, solana_hash::Hash, - solana_message::{v0::LoadedAddresses, AddressLoaderError, Message, SimpleAddressLoader}, + solana_message::{v0::LoadedAddresses, Message, SimpleAddressLoader}, solana_perf::packet::PacketRef, solana_pubkey::Pubkey, solana_runtime::bank::Bank, @@ -19,6 +19,7 @@ use { sanitized::{MessageHash, SanitizedTransaction}, versioned::{sanitized::SanitizedVersionedTransaction, VersionedTransaction}, }, + solana_transaction_error::AddressLoaderError, std::{cmp::Ordering, collections::HashSet, mem::size_of}, thiserror::Error, }; diff --git a/core/src/banking_stage/leader_slot_metrics.rs b/core/src/banking_stage/leader_slot_metrics.rs index c9640b03025dc2..32afe4ab77ddcb 100644 --- a/core/src/banking_stage/leader_slot_metrics.rs +++ b/core/src/banking_stage/leader_slot_metrics.rs @@ -6,9 +6,9 @@ use { vote_storage::VoteBatchInsertionMetrics, }, solana_clock::Slot, - solana_poh::poh_recorder::BankStart, + solana_runtime::bank::Bank, solana_svm::transaction_error_metrics::*, - std::{num::Saturating, time::Instant}, + std::{num::Saturating, sync::Arc}, }; /// A summary of what happened to transactions passed to the processing pipeline. @@ -397,13 +397,13 @@ pub(crate) struct LeaderSlotMetrics { } impl LeaderSlotMetrics { - pub(crate) fn new(slot: Slot, bank_creation_time: &Instant) -> Self { + pub(crate) fn new(slot: Slot) -> Self { Self { slot, packet_count_metrics: LeaderSlotPacketCountMetrics::new(), transaction_error_metrics: TransactionErrorMetrics::new(), vote_packet_count_metrics: VotePacketCountMetrics::new(), - timing_metrics: LeaderSlotTimingMetrics::new(bank_creation_time), + timing_metrics: LeaderSlotTimingMetrics::new(), is_reported: false, } } @@ -476,9 +476,9 @@ impl LeaderSlotMetricsTracker { // Check leader slot, return MetricsTrackerAction to be applied by apply_action() pub(crate) fn check_leader_slot_boundary( &mut self, - bank_start: Option<&BankStart>, + bank: Option<&Arc>, ) -> MetricsTrackerAction { - match (self.leader_slot_metrics.as_mut(), bank_start) { + match (self.leader_slot_metrics.as_mut(), bank) { (None, None) => MetricsTrackerAction::Noop, (Some(leader_slot_metrics), None) => { @@ -487,20 +487,16 @@ impl LeaderSlotMetricsTracker { } // Our leader slot has begain, time to create a new slot tracker - (None, Some(bank_start)) => { - MetricsTrackerAction::NewTracker(Some(LeaderSlotMetrics::new( - bank_start.working_bank.slot(), - &bank_start.bank_creation_time, - ))) + (None, Some(bank)) => { + MetricsTrackerAction::NewTracker(Some(LeaderSlotMetrics::new(bank.slot()))) } - (Some(leader_slot_metrics), Some(bank_start)) => { - if leader_slot_metrics.slot != bank_start.working_bank.slot() { + (Some(leader_slot_metrics), Some(bank)) => { + if leader_slot_metrics.slot != bank.slot() { // Last slot has ended, new slot has began leader_slot_metrics.mark_slot_end_detected(); MetricsTrackerAction::ReportAndNewTracker(Some(LeaderSlotMetrics::new( - bank_start.working_bank.slot(), - &bank_start.bank_creation_time, + bank.slot(), ))) } else { MetricsTrackerAction::Noop @@ -810,19 +806,13 @@ mod tests { struct TestSlotBoundaryComponents { first_bank: Arc, - first_poh_recorder_bank: BankStart, next_bank: Arc, - next_poh_recorder_bank: BankStart, leader_slot_metrics_tracker: LeaderSlotMetricsTracker, } fn setup_test_slot_boundary_banks() -> TestSlotBoundaryComponents { let genesis = create_genesis_config(10); let first_bank = Arc::new(Bank::new_for_tests(&genesis.genesis_config)); - let first_poh_recorder_bank = BankStart { - working_bank: first_bank.clone(), - bank_creation_time: Arc::new(Instant::now()), - }; // Create a child descended from the first bank let next_bank = Arc::new(Bank::new_from_parent( @@ -830,18 +820,12 @@ mod tests { &Pubkey::new_unique(), first_bank.slot() + 1, )); - let next_poh_recorder_bank = BankStart { - working_bank: next_bank.clone(), - bank_creation_time: Arc::new(Instant::now()), - }; let leader_slot_metrics_tracker = LeaderSlotMetricsTracker::default(); TestSlotBoundaryComponents { first_bank, - first_poh_recorder_bank, next_bank, - next_poh_recorder_bank, leader_slot_metrics_tracker, } } @@ -865,7 +849,7 @@ mod tests { #[test] pub fn test_update_on_leader_slot_boundary_not_leader_to_leader() { let TestSlotBoundaryComponents { - first_poh_recorder_bank, + first_bank, mut leader_slot_metrics_tracker, .. } = setup_test_slot_boundary_banks(); @@ -873,8 +857,7 @@ mod tests { // Test case where the thread has not detected a leader bank, and now sees a leader bank. // Metrics should not be reported because leader slot has not ended assert!(leader_slot_metrics_tracker.leader_slot_metrics.is_none()); - let action = - leader_slot_metrics_tracker.check_leader_slot_boundary(Some(&first_poh_recorder_bank)); + let action = leader_slot_metrics_tracker.check_leader_slot_boundary(Some(&first_bank)); assert_eq!( mem::discriminant(&MetricsTrackerAction::NewTracker(None)), mem::discriminant(&action) @@ -887,7 +870,6 @@ mod tests { pub fn test_update_on_leader_slot_boundary_leader_to_not_leader() { let TestSlotBoundaryComponents { first_bank, - first_poh_recorder_bank, mut leader_slot_metrics_tracker, .. } = setup_test_slot_boundary_banks(); @@ -897,8 +879,7 @@ mod tests { // because that leader slot has just ended. { // Setup first_bank - let action = leader_slot_metrics_tracker - .check_leader_slot_boundary(Some(&first_poh_recorder_bank)); + let action = leader_slot_metrics_tracker.check_leader_slot_boundary(Some(&first_bank)); assert!(leader_slot_metrics_tracker.apply_action(action).is_none()); } { @@ -928,7 +909,6 @@ mod tests { pub fn test_update_on_leader_slot_boundary_leader_to_leader_same_slot() { let TestSlotBoundaryComponents { first_bank, - first_poh_recorder_bank, mut leader_slot_metrics_tracker, .. } = setup_test_slot_boundary_banks(); @@ -937,14 +917,12 @@ mod tests { // implying the slot is still running. Metrics should not be reported { // Setup with first_bank - let action = leader_slot_metrics_tracker - .check_leader_slot_boundary(Some(&first_poh_recorder_bank)); + let action = leader_slot_metrics_tracker.check_leader_slot_boundary(Some(&first_bank)); assert!(leader_slot_metrics_tracker.apply_action(action).is_none()); } { // Assert nop-op if same bank - let action = leader_slot_metrics_tracker - .check_leader_slot_boundary(Some(&first_poh_recorder_bank)); + let action = leader_slot_metrics_tracker.check_leader_slot_boundary(Some(&first_bank)); assert_eq!( mem::discriminant(&MetricsTrackerAction::Noop), mem::discriminant(&action) @@ -970,9 +948,7 @@ mod tests { pub fn test_update_on_leader_slot_boundary_leader_to_leader_bigger_slot() { let TestSlotBoundaryComponents { first_bank, - first_poh_recorder_bank, next_bank, - next_poh_recorder_bank, mut leader_slot_metrics_tracker, } = setup_test_slot_boundary_banks(); @@ -981,14 +957,12 @@ mod tests { // smaller slot { // Setup with first_bank - let action = leader_slot_metrics_tracker - .check_leader_slot_boundary(Some(&first_poh_recorder_bank)); + let action = leader_slot_metrics_tracker.check_leader_slot_boundary(Some(&first_bank)); assert!(leader_slot_metrics_tracker.apply_action(action).is_none()); } { // Assert reporting if new bank - let action = leader_slot_metrics_tracker - .check_leader_slot_boundary(Some(&next_poh_recorder_bank)); + let action = leader_slot_metrics_tracker.check_leader_slot_boundary(Some(&next_bank)); assert_eq!( mem::discriminant(&MetricsTrackerAction::ReportAndNewTracker(None)), mem::discriminant(&action) @@ -1018,9 +992,7 @@ mod tests { pub fn test_update_on_leader_slot_boundary_leader_to_leader_smaller_slot() { let TestSlotBoundaryComponents { first_bank, - first_poh_recorder_bank, next_bank, - next_poh_recorder_bank, mut leader_slot_metrics_tracker, } = setup_test_slot_boundary_banks(); // Test case where the thread has a leader bank, and now detects there's a new leader bank @@ -1028,14 +1000,12 @@ mod tests { // bigger slot { // Setup with next_bank - let action = leader_slot_metrics_tracker - .check_leader_slot_boundary(Some(&next_poh_recorder_bank)); + let action = leader_slot_metrics_tracker.check_leader_slot_boundary(Some(&next_bank)); assert!(leader_slot_metrics_tracker.apply_action(action).is_none()); } { // Assert reporting if new bank - let action = leader_slot_metrics_tracker - .check_leader_slot_boundary(Some(&first_poh_recorder_bank)); + let action = leader_slot_metrics_tracker.check_leader_slot_boundary(Some(&first_bank)); assert_eq!( mem::discriminant(&MetricsTrackerAction::ReportAndNewTracker(None)), mem::discriminant(&action) diff --git a/core/src/banking_stage/leader_slot_timing_metrics.rs b/core/src/banking_stage/leader_slot_timing_metrics.rs index 3b03c82dbb77c9..ebbe52ba4cb023 100644 --- a/core/src/banking_stage/leader_slot_timing_metrics.rs +++ b/core/src/banking_stage/leader_slot_timing_metrics.rs @@ -1,6 +1,6 @@ use { solana_clock::Slot, solana_poh::transaction_recorder::RecordTransactionsTimings, - solana_timings::ExecuteTimings, std::time::Instant, + solana_svm_timings::ExecuteTimings, std::time::Instant, }; #[derive(Default, Debug)] @@ -77,9 +77,9 @@ pub(crate) struct LeaderSlotTimingMetrics { } impl LeaderSlotTimingMetrics { - pub(crate) fn new(bank_creation_time: &Instant) -> Self { + pub(crate) fn new() -> Self { Self { - outer_loop_timings: OuterLoopTimings::new(bank_creation_time), + outer_loop_timings: OuterLoopTimings::new(), process_buffered_packets_timings: ProcessBufferedPacketsTimings::default(), consume_buffered_packets_timings: ConsumeBufferedPacketsTimings::default(), process_packets_timings: ProcessPacketsTimings::default(), @@ -104,9 +104,6 @@ impl LeaderSlotTimingMetrics { pub(crate) struct OuterLoopTimings { pub bank_detected_time: Instant, - // Delay from when the bank was created to when this thread detected it - pub bank_detected_delay_us: u64, - // Time spent processing buffered packets pub process_buffered_packets_us: u64, @@ -122,10 +119,9 @@ pub(crate) struct OuterLoopTimings { } impl OuterLoopTimings { - fn new(bank_creation_time: &Instant) -> Self { + fn new() -> Self { Self { bank_detected_time: Instant::now(), - bank_detected_delay_us: bank_creation_time.elapsed().as_micros() as u64, process_buffered_packets_us: 0, receive_and_buffer_packets_us: 0, receive_and_buffer_packets_invoked_count: 0, @@ -148,12 +144,6 @@ impl OuterLoopTimings { self.bank_detected_to_slot_end_detected_us, i64 ), - ( - "bank_creation_to_slot_end_detected_us", - self.bank_detected_to_slot_end_detected_us + self.bank_detected_delay_us, - i64 - ), - ("bank_detected_delay_us", self.bank_detected_delay_us, i64), ( "process_buffered_packets_us", self.process_buffered_packets_us, diff --git a/core/src/banking_stage/qos_service.rs b/core/src/banking_stage/qos_service.rs index 0c947aeae0d103..b75567f69ba165 100644 --- a/core/src/banking_stage/qos_service.rs +++ b/core/src/banking_stage/qos_service.rs @@ -198,6 +198,7 @@ impl QosService { compute_units, loaded_accounts_data_size, result: _, + fee_payer_post_balance: _, } => { cost_tracker.update_execution_cost( tx_cost, @@ -752,6 +753,7 @@ mod tests { loaded_accounts_data_size: loaded_accounts_data_size + loaded_accounts_data_size_adjustment, result: Ok(()), + fee_payer_post_balance: 0, }) .collect(); let final_txs_cost = total_txs_cost @@ -883,6 +885,7 @@ mod tests { loaded_accounts_data_size: loaded_accounts_data_size + loaded_accounts_data_size_adjustment, result: Ok(()), + fee_payer_post_balance: 1, } } }) diff --git a/core/src/banking_stage/scheduler_messages.rs b/core/src/banking_stage/scheduler_messages.rs index 7905278323152e..37fdf92d2b3efa 100644 --- a/core/src/banking_stage/scheduler_messages.rs +++ b/core/src/banking_stage/scheduler_messages.rs @@ -1,4 +1,5 @@ use { + crate::banking_stage::consumer::RetryableIndex, solana_clock::{Epoch, Slot}, std::fmt::Display, }; @@ -47,5 +48,5 @@ pub struct ConsumeWork { /// Processed transactions. pub struct FinishedConsumeWork { pub work: ConsumeWork, - pub retryable_indexes: Vec, + pub retryable_indexes: Vec, } diff --git a/core/src/banking_stage/transaction_scheduler/receive_and_buffer.rs b/core/src/banking_stage/transaction_scheduler/receive_and_buffer.rs index 39dbac2da05543..a60702b34af3df 100644 --- a/core/src/banking_stage/transaction_scheduler/receive_and_buffer.rs +++ b/core/src/banking_stage/transaction_scheduler/receive_and_buffer.rs @@ -2,7 +2,6 @@ use qualifier_attr::qualifiers; use { super::{ - scheduler_metrics::{SchedulerCountMetrics, SchedulerTimingMetrics}, transaction_priority_id::TransactionPriorityId, transaction_state::TransactionState, transaction_state_container::{ @@ -38,8 +37,8 @@ use { solana_svm::transaction_error_metrics::TransactionErrorMetrics, solana_svm_transaction::svm_message::SVMMessage, solana_transaction::sanitized::{MessageHash, SanitizedTransaction}, + solana_transaction_error::TransactionError, std::{ - num::Saturating, sync::{Arc, RwLock}, time::Instant, }, @@ -49,6 +48,48 @@ use { #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] pub(crate) struct DisconnectedError; +/// Stats/metrics returned by `receive_and_buffer_packets`. +#[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] +pub(crate) struct ReceivingStats { + pub num_received: usize, + /// Count of packets that passed sigverify but were dropped + /// without further checks because we were outside the holding + /// window. + pub num_dropped_without_parsing: usize, + + pub num_dropped_on_parsing_and_sanitization: usize, + pub num_dropped_on_lock_validation: usize, + pub num_dropped_on_compute_budget: usize, + pub num_dropped_on_age: usize, + pub num_dropped_on_already_processed: usize, + pub num_dropped_on_fee_payer: usize, + pub num_dropped_on_capacity: usize, + + pub num_buffered: usize, + + pub receive_time_us: u64, + pub buffer_time_us: u64, +} + +impl ReceivingStats { + fn accumulate(&mut self, other: ReceivingStats) { + self.num_received += other.num_received; + self.num_dropped_without_parsing += other.num_dropped_without_parsing; + self.num_dropped_on_parsing_and_sanitization += + other.num_dropped_on_parsing_and_sanitization; + self.num_dropped_on_lock_validation += other.num_dropped_on_lock_validation; + self.num_dropped_on_compute_budget += other.num_dropped_on_compute_budget; + self.num_dropped_on_age += other.num_dropped_on_age; + self.num_dropped_on_already_processed += other.num_dropped_on_already_processed; + self.num_dropped_on_fee_payer += other.num_dropped_on_fee_payer; + self.num_dropped_on_capacity += other.num_dropped_on_capacity; + self.num_buffered += other.num_buffered; + + self.receive_time_us += other.receive_time_us; + self.buffer_time_us += other.buffer_time_us; + } +} + #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] pub(crate) trait ReceiveAndBuffer { type Transaction: TransactionWithMeta + Send + Sync; @@ -59,10 +100,8 @@ pub(crate) trait ReceiveAndBuffer { fn receive_and_buffer_packets( &mut self, container: &mut Self::Container, - timing_metrics: &mut SchedulerTimingMetrics, - count_metrics: &mut SchedulerCountMetrics, decision: &BufferedPacketsDecision, - ) -> Result; + ) -> Result; } #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] @@ -80,10 +119,8 @@ impl ReceiveAndBuffer for SanitizedTransactionReceiveAndBuffer { fn receive_and_buffer_packets( &mut self, container: &mut Self::Container, - timing_metrics: &mut SchedulerTimingMetrics, - count_metrics: &mut SchedulerCountMetrics, decision: &BufferedPacketsDecision, - ) -> Result { + ) -> Result { const MAX_RECEIVE_PACKETS: usize = 5_000; const MAX_PACKET_RECEIVE_TIME: Duration = Duration::from_millis(10); let (recv_timeout, should_buffer) = match decision { @@ -103,43 +140,80 @@ impl ReceiveAndBuffer for SanitizedTransactionReceiveAndBuffer { .packet_receiver .receive_packets(recv_timeout, MAX_RECEIVE_PACKETS)); - timing_metrics.update(|timing_metrics| { - timing_metrics.receive_time_us += receive_time_us; - }); - - let num_received = match received_packet_results { + match received_packet_results { Ok(receive_packet_results) => { - let num_received_packets = receive_packet_results.deserialized_packets.len(); - - count_metrics.update(|count_metrics| { - count_metrics.num_received += num_received_packets; - }); - + let num_received = + receive_packet_results.packet_stats.passed_sigverify_count.0 as usize; if should_buffer { - let (_, buffer_time_us) = measure_us!(self.buffer_packets( - container, - timing_metrics, - count_metrics, - receive_packet_results.deserialized_packets - )); - timing_metrics.update(|timing_metrics| { - timing_metrics.buffer_time_us += buffer_time_us; - }); + let num_dropped_on_initial_parsing = + num_received - receive_packet_results.deserialized_packets.len(); + + let (buffer_stats, buffer_time_us) = measure_us!( + self.buffer_packets(container, receive_packet_results.deserialized_packets) + ); + Ok(ReceivingStats { + num_received, + num_dropped_without_parsing: 0, + num_dropped_on_parsing_and_sanitization: num_dropped_on_initial_parsing + + buffer_stats.num_dropped_on_sanitization, + num_dropped_on_lock_validation: buffer_stats.num_dropped_on_lock_validation, + num_dropped_on_compute_budget: buffer_stats.num_dropped_on_compute_budget, + num_dropped_on_age: buffer_stats.num_dropped_on_age, + num_dropped_on_already_processed: buffer_stats + .num_dropped_on_already_processed, + num_dropped_on_fee_payer: buffer_stats.num_dropped_on_fee_payer, + num_dropped_on_capacity: buffer_stats.num_dropped_on_capacity, + num_buffered: buffer_stats.num_buffered, + receive_time_us, + buffer_time_us, + }) } else { - count_metrics.update(|count_metrics| { - count_metrics.num_dropped_on_receive += num_received_packets; - }); + Ok(ReceivingStats { + num_received, + num_dropped_without_parsing: num_received, + num_dropped_on_parsing_and_sanitization: 0, + num_dropped_on_lock_validation: 0, + num_dropped_on_compute_budget: 0, + num_dropped_on_age: 0, + num_dropped_on_already_processed: 0, + num_dropped_on_fee_payer: 0, + num_dropped_on_capacity: 0, + num_buffered: 0, + receive_time_us, + buffer_time_us: 0, + }) } - num_received_packets } - Err(RecvTimeoutError::Timeout) => 0, - Err(RecvTimeoutError::Disconnected) => return Err(DisconnectedError), - }; - - Ok(num_received) + Err(RecvTimeoutError::Timeout) => Ok(ReceivingStats { + num_received: 0, + num_dropped_without_parsing: 0, + num_dropped_on_parsing_and_sanitization: 0, + num_dropped_on_lock_validation: 0, + num_dropped_on_compute_budget: 0, + num_dropped_on_age: 0, + num_dropped_on_already_processed: 0, + num_dropped_on_fee_payer: 0, + num_dropped_on_capacity: 0, + num_buffered: 0, + receive_time_us, + buffer_time_us: 0, + }), + Err(RecvTimeoutError::Disconnected) => Err(DisconnectedError), + } } } +struct BufferStats { + num_dropped_on_sanitization: usize, + num_dropped_on_lock_validation: usize, + num_dropped_on_compute_budget: usize, + num_dropped_on_age: usize, + num_dropped_on_already_processed: usize, + num_dropped_on_fee_payer: usize, + num_dropped_on_capacity: usize, + num_buffered: usize, +} + impl SanitizedTransactionReceiveAndBuffer { pub fn new(packet_receiver: PacketDeserializer, bank_forks: Arc>) -> Self { Self { @@ -151,10 +225,8 @@ impl SanitizedTransactionReceiveAndBuffer { fn buffer_packets( &mut self, container: &mut TransactionStateContainer>, - _timing_metrics: &mut SchedulerTimingMetrics, - count_metrics: &mut SchedulerCountMetrics, packets: Vec, - ) { + ) -> BufferStats { // Convert to Arcs let packets: Vec<_> = packets.into_iter().map(Arc::new).collect(); // Sanitize packets, generate IDs, and insert into the container. @@ -176,41 +248,54 @@ impl SanitizedTransactionReceiveAndBuffer { let mut max_ages = ArrayVec::<_, CHUNK_SIZE>::new(); let mut fee_budget_limits_vec = ArrayVec::<_, CHUNK_SIZE>::new(); + let mut num_dropped_on_sanitization = 0; + let mut num_dropped_on_lock_validation = 0; + let mut num_dropped_on_compute_budget = 0; + let mut num_dropped_on_age = 0; + let mut num_dropped_on_already_processed = 0; + let mut num_dropped_on_fee_payer = 0; + let mut num_dropped_on_capacity = 0; + let mut num_buffered = 0; + let mut error_counts = TransactionErrorMetrics::default(); for chunk in packets.chunks(CHUNK_SIZE) { - let mut post_sanitization_count = Saturating::(0); - chunk - .iter() - .filter_map(|packet| { - packet.build_sanitized_transaction( - vote_only, - root_bank.as_ref(), - root_bank.get_reserved_account_keys(), - ) - }) - .inspect(|_| post_sanitization_count += 1) - .filter(|(tx, _deactivation_slot)| { - validate_account_locks( - tx.message().account_keys(), - transaction_account_lock_limit, - ) - .is_ok() - }) - .filter_map(|(tx, deactivation_slot)| { - tx.compute_budget_instruction_details() - .sanitize_and_convert_to_compute_budget_limits(&working_bank.feature_set) - .map(|compute_budget| (tx, deactivation_slot, compute_budget.into())) - .ok() - }) - .for_each(|(tx, deactivation_slot, fee_budget_limits)| { - transactions.push(tx); - max_ages.push(calculate_max_age( - sanitized_epoch, - deactivation_slot, - alt_resolved_slot, - )); - fee_budget_limits_vec.push(fee_budget_limits); - }); + for packet in chunk { + let Some((tx, deactivation_slot)) = packet.build_sanitized_transaction( + vote_only, + root_bank.as_ref(), + root_bank.get_reserved_account_keys(), + ) else { + num_dropped_on_sanitization += 1; + continue; + }; + + if validate_account_locks( + tx.message().account_keys(), + transaction_account_lock_limit, + ) + .is_err() + { + num_dropped_on_lock_validation += 1; + continue; + } + + let Ok(fee_budget_limits) = tx + .compute_budget_instruction_details() + .sanitize_and_convert_to_compute_budget_limits(&working_bank.feature_set) + .map(|compute_budget| compute_budget.into()) + else { + num_dropped_on_compute_budget += 1; + continue; + }; + + transactions.push(tx); + max_ages.push(calculate_max_age( + sanitized_epoch, + deactivation_slot, + alt_resolved_slot, + )); + fee_budget_limits_vec.push(fee_budget_limits); + } let check_results = working_bank.check_transactions( &transactions, @@ -218,52 +303,58 @@ impl SanitizedTransactionReceiveAndBuffer { MAX_PROCESSING_AGE, &mut error_counts, ); - let post_lock_validation_count = transactions.len(); - let mut post_transaction_check_count = Saturating::(0); - let mut num_dropped_on_capacity = Saturating::(0); - let mut num_buffered = Saturating::(0); - for (((transaction, max_age), fee_budget_limits), _check_result) in transactions + for (((transaction, max_age), fee_budget_limits), check_result) in transactions .drain(..) .zip(max_ages.drain(..)) .zip(fee_budget_limits_vec.drain(..)) .zip(check_results) - .filter(|(_, check_result)| check_result.is_ok()) - .filter(|(((tx, _), _), _)| { - Consumer::check_fee_payer_unlocked(&working_bank, tx, &mut error_counts).is_ok() - }) { - post_transaction_check_count += 1; + match check_result { + Ok(_) => {} + Err(err) => { + match err { + TransactionError::BlockhashNotFound => { + num_dropped_on_age += 1; + } + TransactionError::AlreadyProcessed => { + num_dropped_on_already_processed += 1; + } + _ => {} + } + continue; + } + } + + if Consumer::check_fee_payer_unlocked( + &working_bank, + &transaction, + &mut error_counts, + ) + .is_err() + { + num_dropped_on_fee_payer += 1; + continue; + } let (priority, cost) = calculate_priority_and_cost(&transaction, &fee_budget_limits, &working_bank); - + num_buffered += 1; if container.insert_new_transaction(transaction, max_age, priority, cost) { num_dropped_on_capacity += 1; } - num_buffered += 1; } + } - let Saturating(post_sanitization_count) = post_sanitization_count; - let Saturating(post_transaction_check_count) = post_transaction_check_count; - let Saturating(num_dropped_on_capacity) = num_dropped_on_capacity; - let Saturating(num_buffered) = num_buffered; - - // Update metrics for transactions that were dropped. - let num_dropped_on_sanitization = chunk.len().saturating_sub(post_sanitization_count); - let num_dropped_on_lock_validation = - post_sanitization_count.saturating_sub(post_lock_validation_count); - let num_dropped_on_transaction_checks = - post_lock_validation_count.saturating_sub(post_transaction_check_count); - - count_metrics.update(|count_metrics| { - count_metrics.num_dropped_on_capacity += num_dropped_on_capacity; - count_metrics.num_buffered += num_buffered; - count_metrics.num_dropped_on_sanitization += num_dropped_on_sanitization; - count_metrics.num_dropped_on_validate_locks += num_dropped_on_lock_validation; - count_metrics.num_dropped_on_receive_transaction_checks += - num_dropped_on_transaction_checks; - }); + BufferStats { + num_dropped_on_sanitization, + num_dropped_on_lock_validation, + num_dropped_on_compute_budget, + num_dropped_on_age, + num_dropped_on_already_processed, + num_dropped_on_fee_payer, + num_dropped_on_capacity, + num_buffered, } } } @@ -281,10 +372,8 @@ impl ReceiveAndBuffer for TransactionViewReceiveAndBuffer { fn receive_and_buffer_packets( &mut self, container: &mut Self::Container, - timing_metrics: &mut SchedulerTimingMetrics, - count_metrics: &mut SchedulerCountMetrics, decision: &BufferedPacketsDecision, - ) -> Result { + ) -> Result { let (root_bank, working_bank) = { let bank_forks = self.bank_forks.read().unwrap(); let root_bank = bank_forks.root_bank(); @@ -296,12 +385,27 @@ impl ReceiveAndBuffer for TransactionViewReceiveAndBuffer { const TIMEOUT: Duration = Duration::from_millis(10); const PACKET_BURST_LIMIT: usize = 1000; let start = Instant::now(); - let mut num_received = 0; + let mut received_message = false; + let mut stats = ReceivingStats { + num_received: 0, + num_dropped_without_parsing: 0, + num_dropped_on_parsing_and_sanitization: 0, + num_dropped_on_lock_validation: 0, + num_dropped_on_compute_budget: 0, + num_dropped_on_age: 0, + num_dropped_on_already_processed: 0, + num_dropped_on_fee_payer: 0, + num_dropped_on_capacity: 0, + num_buffered: 0, + receive_time_us: 0, + buffer_time_us: 0, + }; // If not leader/unknown, do a blocking-receive initially. This lets // the thread sleep until a message is received, or until the timeout. // Additionally, only sleep if the container is empty. + let mut timed_out = false; if container.is_empty() && matches!( decision, @@ -315,85 +419,101 @@ impl ReceiveAndBuffer for TransactionViewReceiveAndBuffer { match self.receiver.recv_timeout(TIMEOUT) { Ok(packet_batch_message) => { received_message = true; - num_received += self.handle_packet_batch_message( + stats.accumulate(self.handle_packet_batch_message( container, - timing_metrics, - count_metrics, decision, &root_bank, &working_bank, packet_batch_message, - ); + )); } - Err(RecvTimeoutError::Timeout) => return Ok(num_received), + Err(RecvTimeoutError::Timeout) => timed_out = true, Err(RecvTimeoutError::Disconnected) => { - return received_message - .then_some(num_received) - .ok_or(DisconnectedError); + if !received_message { + return Err(DisconnectedError); + } } } } - while start.elapsed() < TIMEOUT && num_received < PACKET_BURST_LIMIT { - match self.receiver.try_recv() { - Ok(packet_batch_message) => { - received_message = true; - num_received += self.handle_packet_batch_message( - container, - timing_metrics, - count_metrics, - decision, - &root_bank, - &working_bank, - packet_batch_message, - ); - } - Err(TryRecvError::Empty) => return Ok(num_received), - Err(TryRecvError::Disconnected) => { - return received_message - .then_some(num_received) - .ok_or(DisconnectedError); + if !timed_out { + while start.elapsed() < TIMEOUT && stats.num_received < PACKET_BURST_LIMIT { + match self.receiver.try_recv() { + Ok(packet_batch_message) => { + stats.receive_time_us += start.elapsed().as_micros() as u64; + received_message = true; + let batch_stats = self.handle_packet_batch_message( + container, + decision, + &root_bank, + &working_bank, + packet_batch_message, + ); + stats.accumulate(batch_stats); + } + Err(TryRecvError::Empty) => { + break; + } + Err(TryRecvError::Disconnected) => { + if !received_message { + return Err(DisconnectedError); + } + } } } } - Ok(num_received) + Ok(ReceivingStats { + num_received: stats.num_received, + num_dropped_without_parsing: stats.num_dropped_without_parsing, + num_dropped_on_parsing_and_sanitization: stats.num_dropped_on_parsing_and_sanitization, + num_dropped_on_lock_validation: stats.num_dropped_on_lock_validation, + num_dropped_on_compute_budget: stats.num_dropped_on_compute_budget, + num_dropped_on_age: stats.num_dropped_on_age, + num_dropped_on_already_processed: stats.num_dropped_on_already_processed, + num_dropped_on_fee_payer: stats.num_dropped_on_fee_payer, + num_dropped_on_capacity: stats.num_dropped_on_capacity, + num_buffered: stats.num_buffered, + receive_time_us: stats.receive_time_us, + buffer_time_us: stats.buffer_time_us, + }) } } +enum PacketHandlingError { + Sanitization, + LockValidation, + ComputeBudget, +} + impl TransactionViewReceiveAndBuffer { /// Return number of received packets. fn handle_packet_batch_message( &mut self, container: &mut TransactionViewStateContainer, - timing_metrics: &mut SchedulerTimingMetrics, - count_metrics: &mut SchedulerCountMetrics, decision: &BufferedPacketsDecision, root_bank: &Bank, working_bank: &Bank, packet_batch_message: BankingPacketBatch, - ) -> usize { - // If not holding packets, just drop them immediately without parsing. - if matches!(decision, BufferedPacketsDecision::Forward) { - return 0; - } - + ) -> ReceivingStats { let start = Instant::now(); + // If outside holding window, do not parse. + let should_parse = !matches!(decision, BufferedPacketsDecision::Forward); + // Sanitize packets, generate IDs, and insert into the container. let alt_resolved_slot = root_bank.slot(); let sanitized_epoch = root_bank.epoch(); let transaction_account_lock_limit = working_bank.get_transaction_account_lock_limit(); - let mut num_received = 0usize; - let mut num_buffered = 0usize; - let mut num_dropped_on_status_age_checks = 0usize; - let mut num_dropped_on_capacity = 0usize; - let mut num_dropped_on_receive = 0usize; - // Create temporary batches of transactions to be age-checked. let mut transaction_priority_ids = ArrayVec::<_, EXTRA_CAPACITY>::new(); let lock_results: [_; EXTRA_CAPACITY] = core::array::from_fn(|_| Ok(())); let mut error_counters = TransactionErrorMetrics::default(); + let mut num_dropped_on_age = 0; + let mut num_dropped_on_already_processed = 0; + let mut num_dropped_on_fee_payer = 0; + let mut num_dropped_on_capacity = 0; + let mut num_buffered = 0; let mut check_and_push_to_queue = |container: &mut TransactionViewStateContainer, @@ -420,8 +540,16 @@ impl TransactionViewReceiveAndBuffer { .iter_mut() .zip(transaction_priority_ids.iter()) { - if result.is_err() { - num_dropped_on_status_age_checks += 1; + if let Err(err) = result { + match err { + TransactionError::BlockhashNotFound => { + num_dropped_on_age += 1; + } + TransactionError::AlreadyProcessed => { + num_dropped_on_already_processed += 1; + } + _ => {} + } container.remove_by_id(priority_id.id); continue; } @@ -434,10 +562,12 @@ impl TransactionViewReceiveAndBuffer { &mut error_counters, ) { *result = Err(err); - num_dropped_on_status_age_checks += 1; + num_dropped_on_fee_payer += 1; container.remove_by_id(priority_id.id); continue; } + + num_buffered += 1; } // Push non-errored transaction into queue. num_dropped_on_capacity += container.push_ids_into_queue( @@ -449,6 +579,12 @@ impl TransactionViewReceiveAndBuffer { ); }; + let mut num_received = 0; + let mut num_dropped_without_parsing = 0; + let mut num_dropped_on_parsing_and_sanitization = 0; + let mut num_dropped_on_lock_validation = 0; + let mut num_dropped_on_compute_budget = 0; + for packet_batch in packet_batch_message.iter() { for packet in packet_batch.iter() { let Some(packet_data) = packet.data(..) else { @@ -456,6 +592,10 @@ impl TransactionViewReceiveAndBuffer { }; num_received += 1; + if !should_parse { + num_dropped_without_parsing += 1; + continue; + } // Reserve free-space to copy packet into, run sanitization checks, and insert. if let Some(transaction_id) = @@ -468,12 +608,17 @@ impl TransactionViewReceiveAndBuffer { sanitized_epoch, transaction_account_lock_limit, ) { - Ok(state) => { - num_buffered += 1; - Ok(state) + Ok(state) => Ok(state), + Err(PacketHandlingError::Sanitization) => { + num_dropped_on_parsing_and_sanitization += 1; + Err(()) } - Err(()) => { - num_dropped_on_receive += 1; + Err(PacketHandlingError::LockValidation) => { + num_dropped_on_lock_validation += 1; + Err(()) + } + Err(PacketHandlingError::ComputeBudget) => { + num_dropped_on_compute_budget += 1; Err(()) } } @@ -497,19 +642,20 @@ impl TransactionViewReceiveAndBuffer { // Any remaining packets undergo status/age checks check_and_push_to_queue(container, &mut transaction_priority_ids); - let buffer_time_us = start.elapsed().as_micros() as u64; - timing_metrics.update(|timing_metrics| { - timing_metrics.buffer_time_us += buffer_time_us; - }); - count_metrics.update(|count_metrics| { - count_metrics.num_received += num_received; - count_metrics.num_buffered += num_buffered; - count_metrics.num_dropped_on_age_and_status += num_dropped_on_status_age_checks; - count_metrics.num_dropped_on_capacity += num_dropped_on_capacity; - count_metrics.num_dropped_on_receive += num_dropped_on_receive; - }); - - num_received + ReceivingStats { + num_received, + num_dropped_without_parsing, + num_dropped_on_parsing_and_sanitization, + num_dropped_on_lock_validation, + num_dropped_on_compute_budget, + num_dropped_on_age, + num_dropped_on_already_processed, + num_dropped_on_fee_payer, + num_dropped_on_capacity, + num_buffered, + receive_time_us: 0, // receive is outside this function + buffer_time_us: start.elapsed().as_micros() as u64, + } } fn try_handle_packet( @@ -519,10 +665,10 @@ impl TransactionViewReceiveAndBuffer { alt_resolved_slot: Slot, sanitized_epoch: Epoch, transaction_account_lock_limit: usize, - ) -> Result { + ) -> Result { // Parsing and basic sanitization checks let Ok(view) = SanitizedTransactionView::try_new_sanitized(bytes) else { - return Err(()); + return Err(PacketHandlingError::Sanitization); }; let Ok(view) = RuntimeTransaction::>::try_from( @@ -530,12 +676,12 @@ impl TransactionViewReceiveAndBuffer { MessageHash::Compute, None, ) else { - return Err(()); + return Err(PacketHandlingError::Sanitization); }; // Discard non-vote packets if in vote-only mode. if root_bank.vote_only_bank() && !view.is_simple_vote_transaction() { - return Err(()); + return Err(PacketHandlingError::Sanitization); } // Load addresses for transaction. @@ -548,7 +694,7 @@ impl TransactionViewReceiveAndBuffer { }), }; let Ok((loaded_addresses, deactivation_slot)) = load_addresses_result else { - return Err(()); + return Err(PacketHandlingError::Sanitization); }; let Ok(view) = RuntimeTransaction::>::try_from( @@ -556,18 +702,18 @@ impl TransactionViewReceiveAndBuffer { loaded_addresses, root_bank.get_reserved_account_keys(), ) else { - return Err(()); + return Err(PacketHandlingError::Sanitization); }; if validate_account_locks(view.account_keys(), transaction_account_lock_limit).is_err() { - return Err(()); + return Err(PacketHandlingError::LockValidation); } let Ok(compute_budget_limits) = view .compute_budget_instruction_details() .sanitize_and_convert_to_compute_budget_limits(&working_bank.feature_set) else { - return Err(()); + return Err(PacketHandlingError::ComputeBudget); }; let max_age = calculate_max_age(sanitized_epoch, deactivation_slot, alt_resolved_slot); @@ -765,34 +911,25 @@ mod tests { let (bank_forks, _mint_keypair) = test_bank_forks(); let (mut receive_and_buffer, mut container) = setup_receive_and_buffer(receiver, bank_forks); - let mut timing_metrics = SchedulerTimingMetrics::default(); - let mut count_metrics = SchedulerCountMetrics::default(); drop(sender); // disconnect channel - let r = receive_and_buffer.receive_and_buffer_packets( - &mut container, - &mut timing_metrics, - &mut count_metrics, - &BufferedPacketsDecision::Hold, - ); + let r = receive_and_buffer + .receive_and_buffer_packets(&mut container, &BufferedPacketsDecision::Hold); assert!(r.is_err()); } - #[test_case(setup_sanitized_transaction_receive_and_buffer, 1; "testcase-sdk")] - #[test_case(setup_transaction_view_receive_and_buffer, 0; "testcase-view")] + #[test_case(setup_sanitized_transaction_receive_and_buffer; "testcase-sdk")] + #[test_case(setup_transaction_view_receive_and_buffer; "testcase-view")] fn test_receive_and_buffer_no_hold( setup_receive_and_buffer: impl FnOnce( Receiver, Arc>, ) -> (R, R::Container), - expected_num_received: usize, ) { let (sender, receiver) = unbounded(); let (bank_forks, mint_keypair) = test_bank_forks(); let (mut receive_and_buffer, mut container) = setup_receive_and_buffer(receiver, bank_forks.clone()); - let mut timing_metrics = SchedulerTimingMetrics::default(); - let mut count_metrics = SchedulerCountMetrics::default(); let transaction = transfer( &mint_keypair, @@ -803,19 +940,36 @@ mod tests { let packet_batches = Arc::new(to_packet_batches(&[transaction], 1)); sender.send(packet_batches).unwrap(); - let num_received = receive_and_buffer + let ReceivingStats { + num_received, + num_dropped_without_parsing, + num_dropped_on_parsing_and_sanitization, + num_dropped_on_lock_validation, + num_dropped_on_compute_budget, + num_dropped_on_age, + num_dropped_on_already_processed, + num_dropped_on_fee_payer, + num_dropped_on_capacity, + num_buffered, + receive_time_us: _, + buffer_time_us: _, + } = receive_and_buffer .receive_and_buffer_packets( &mut container, - &mut timing_metrics, - &mut count_metrics, &BufferedPacketsDecision::Forward, // no packets should be held ) .unwrap(); - // Currently the different approaches have slightly different accounting. - // - sdk: all valid deserializable packets count as received - // - view: immediately drops all packets without counting due to decision - assert_eq!(num_received, expected_num_received); + assert_eq!(num_received, 1); + assert_eq!(num_dropped_without_parsing, 1); + assert_eq!(num_dropped_on_parsing_and_sanitization, 0); + assert_eq!(num_dropped_on_lock_validation, 0); + assert_eq!(num_dropped_on_compute_budget, 0); + assert_eq!(num_dropped_on_age, 0); + assert_eq!(num_dropped_on_already_processed, 0); + assert_eq!(num_dropped_on_fee_payer, 0); + assert_eq!(num_dropped_on_capacity, 0); + assert_eq!(num_buffered, 0); verify_container(&mut container, 0); } @@ -831,8 +985,6 @@ mod tests { let (bank_forks, mint_keypair) = test_bank_forks(); let (mut receive_and_buffer, mut container) = setup_receive_and_buffer(receiver, bank_forks.clone()); - let mut timing_metrics = SchedulerTimingMetrics::default(); - let mut count_metrics = SchedulerCountMetrics::default(); let transaction = transfer( &mint_keypair, @@ -848,53 +1000,83 @@ mod tests { .set_discard(true); sender.send(packet_batches).unwrap(); - let num_received = receive_and_buffer - .receive_and_buffer_packets( - &mut container, - &mut timing_metrics, - &mut count_metrics, - &BufferedPacketsDecision::Hold, - ) + let ReceivingStats { + num_received, + num_dropped_without_parsing, + num_dropped_on_parsing_and_sanitization, + num_dropped_on_lock_validation, + num_dropped_on_compute_budget, + num_dropped_on_age, + num_dropped_on_already_processed, + num_dropped_on_fee_payer, + num_dropped_on_capacity, + num_buffered, + receive_time_us: _, + buffer_time_us: _, + } = receive_and_buffer + .receive_and_buffer_packets(&mut container, &BufferedPacketsDecision::Hold) .unwrap(); assert_eq!(num_received, 0); + assert_eq!(num_dropped_without_parsing, 0); + assert_eq!(num_dropped_on_parsing_and_sanitization, 0); + assert_eq!(num_dropped_on_lock_validation, 0); + assert_eq!(num_dropped_on_compute_budget, 0); + assert_eq!(num_dropped_on_age, 0); + assert_eq!(num_dropped_on_already_processed, 0); + assert_eq!(num_dropped_on_fee_payer, 0); + assert_eq!(num_dropped_on_capacity, 0); + assert_eq!(num_buffered, 0); + verify_container(&mut container, 0); } - #[test_case(setup_sanitized_transaction_receive_and_buffer, 0; "testcase-sdk")] - #[test_case(setup_transaction_view_receive_and_buffer, 1; "testcase-view")] + #[test_case(setup_sanitized_transaction_receive_and_buffer; "testcase-sdk")] + #[test_case(setup_transaction_view_receive_and_buffer; "testcase-view")] fn test_receive_and_buffer_invalid_transaction_format( setup_receive_and_buffer: impl FnOnce( Receiver, Arc>, ) -> (R, R::Container), - expected_num_received: usize, ) { let (sender, receiver) = unbounded(); let (bank_forks, _mint_keypair) = test_bank_forks(); let (mut receive_and_buffer, mut container) = setup_receive_and_buffer(receiver, bank_forks.clone()); - let mut timing_metrics = SchedulerTimingMetrics::default(); - let mut count_metrics = SchedulerCountMetrics::default(); let packet_batches = Arc::new(vec![PacketBatch::from(PinnedPacketBatch::new(vec![ Packet::new([1u8; PACKET_DATA_SIZE], Meta::default()), ]))]); sender.send(packet_batches).unwrap(); - let num_received = receive_and_buffer - .receive_and_buffer_packets( - &mut container, - &mut timing_metrics, - &mut count_metrics, - &BufferedPacketsDecision::Hold, - ) + let ReceivingStats { + num_received, + num_dropped_without_parsing, + num_dropped_on_parsing_and_sanitization, + num_dropped_on_lock_validation, + num_dropped_on_compute_budget, + num_dropped_on_age, + num_dropped_on_already_processed, + num_dropped_on_fee_payer, + num_dropped_on_capacity, + num_buffered, + receive_time_us: _, + buffer_time_us: _, + } = receive_and_buffer + .receive_and_buffer_packets(&mut container, &BufferedPacketsDecision::Hold) .unwrap(); - // Currently the different approaches have slightly different accounting. - // - sdk: only valid deserializable packets count as received - // - view: all valid packets count as received, even if invalid tx format - assert_eq!(num_received, expected_num_received); + assert_eq!(num_received, 1); + assert_eq!(num_dropped_without_parsing, 0); + assert_eq!(num_dropped_on_parsing_and_sanitization, 1); + assert_eq!(num_dropped_on_lock_validation, 0); + assert_eq!(num_dropped_on_compute_budget, 0); + assert_eq!(num_dropped_on_age, 0); + assert_eq!(num_dropped_on_already_processed, 0); + assert_eq!(num_dropped_on_fee_payer, 0); + assert_eq!(num_dropped_on_capacity, 0); + assert_eq!(num_buffered, 0); + verify_container(&mut container, 0); } @@ -910,23 +1092,39 @@ mod tests { let (bank_forks, mint_keypair) = test_bank_forks(); let (mut receive_and_buffer, mut container) = setup_receive_and_buffer(receiver, bank_forks.clone()); - let mut timing_metrics = SchedulerTimingMetrics::default(); - let mut count_metrics = SchedulerCountMetrics::default(); let transaction = transfer(&mint_keypair, &Pubkey::new_unique(), 1, Hash::new_unique()); let packet_batches = Arc::new(to_packet_batches(&[transaction], 1)); sender.send(packet_batches).unwrap(); - let num_received = receive_and_buffer - .receive_and_buffer_packets( - &mut container, - &mut timing_metrics, - &mut count_metrics, - &BufferedPacketsDecision::Hold, - ) + let ReceivingStats { + num_received, + num_dropped_without_parsing, + num_dropped_on_parsing_and_sanitization, + num_dropped_on_lock_validation, + num_dropped_on_compute_budget, + num_dropped_on_age, + num_dropped_on_already_processed, + num_dropped_on_fee_payer, + num_dropped_on_capacity, + num_buffered, + receive_time_us: _, + buffer_time_us: _, + } = receive_and_buffer + .receive_and_buffer_packets(&mut container, &BufferedPacketsDecision::Hold) .unwrap(); assert_eq!(num_received, 1); + assert_eq!(num_dropped_without_parsing, 0); + assert_eq!(num_dropped_on_parsing_and_sanitization, 0); + assert_eq!(num_dropped_on_lock_validation, 0); + assert_eq!(num_dropped_on_compute_budget, 0); + assert_eq!(num_dropped_on_age, 1); + assert_eq!(num_dropped_on_already_processed, 0); + assert_eq!(num_dropped_on_fee_payer, 0); + assert_eq!(num_dropped_on_capacity, 0); + assert_eq!(num_buffered, 0); + verify_container(&mut container, 0); } @@ -942,8 +1140,6 @@ mod tests { let (bank_forks, _mint_keypair) = test_bank_forks(); let (mut receive_and_buffer, mut container) = setup_receive_and_buffer(receiver, bank_forks.clone()); - let mut timing_metrics = SchedulerTimingMetrics::default(); - let mut count_metrics = SchedulerCountMetrics::default(); let transaction = transfer( &Keypair::new(), @@ -954,16 +1150,34 @@ mod tests { let packet_batches = Arc::new(to_packet_batches(&[transaction], 1)); sender.send(packet_batches).unwrap(); - let num_received = receive_and_buffer - .receive_and_buffer_packets( - &mut container, - &mut timing_metrics, - &mut count_metrics, - &BufferedPacketsDecision::Hold, - ) + let ReceivingStats { + num_received, + num_dropped_without_parsing, + num_dropped_on_parsing_and_sanitization, + num_dropped_on_lock_validation, + num_dropped_on_compute_budget, + num_dropped_on_age, + num_dropped_on_already_processed, + num_dropped_on_fee_payer, + num_dropped_on_capacity, + num_buffered, + receive_time_us: _, + buffer_time_us: _, + } = receive_and_buffer + .receive_and_buffer_packets(&mut container, &BufferedPacketsDecision::Hold) .unwrap(); assert_eq!(num_received, 1); + assert_eq!(num_dropped_without_parsing, 0); + assert_eq!(num_dropped_on_parsing_and_sanitization, 0); + assert_eq!(num_dropped_on_lock_validation, 0); + assert_eq!(num_dropped_on_compute_budget, 0); + assert_eq!(num_dropped_on_age, 0); + assert_eq!(num_dropped_on_already_processed, 0); + assert_eq!(num_dropped_on_fee_payer, 1); + assert_eq!(num_dropped_on_capacity, 0); + assert_eq!(num_buffered, 0); + verify_container(&mut container, 0); } @@ -979,8 +1193,6 @@ mod tests { let (bank_forks, mint_keypair) = test_bank_forks(); let (mut receive_and_buffer, mut container) = setup_receive_and_buffer(receiver, bank_forks.clone()); - let mut timing_metrics = SchedulerTimingMetrics::default(); - let mut count_metrics = SchedulerCountMetrics::default(); let to_pubkey = Pubkey::new_unique(); let transaction = VersionedTransaction::try_new( @@ -1006,16 +1218,34 @@ mod tests { let packet_batches = Arc::new(to_packet_batches(&[transaction], 1)); sender.send(packet_batches).unwrap(); - let num_received = receive_and_buffer - .receive_and_buffer_packets( - &mut container, - &mut timing_metrics, - &mut count_metrics, - &BufferedPacketsDecision::Hold, - ) + let ReceivingStats { + num_received, + num_dropped_without_parsing, + num_dropped_on_parsing_and_sanitization, + num_dropped_on_lock_validation, + num_dropped_on_compute_budget, + num_dropped_on_age, + num_dropped_on_already_processed, + num_dropped_on_fee_payer, + num_dropped_on_capacity, + num_buffered, + receive_time_us: _, + buffer_time_us: _, + } = receive_and_buffer + .receive_and_buffer_packets(&mut container, &BufferedPacketsDecision::Hold) .unwrap(); assert_eq!(num_received, 1); + assert_eq!(num_dropped_without_parsing, 0); + assert_eq!(num_dropped_on_parsing_and_sanitization, 1); + assert_eq!(num_dropped_on_lock_validation, 0); + assert_eq!(num_dropped_on_compute_budget, 0); + assert_eq!(num_dropped_on_age, 0); + assert_eq!(num_dropped_on_already_processed, 0); + assert_eq!(num_dropped_on_fee_payer, 0); + assert_eq!(num_dropped_on_capacity, 0); + assert_eq!(num_buffered, 0); + verify_container(&mut container, 0); } @@ -1031,8 +1261,6 @@ mod tests { let (bank_forks, mint_keypair) = test_bank_forks(); let (mut receive_and_buffer, mut container) = setup_receive_and_buffer(receiver, bank_forks.clone()); - let mut timing_metrics = SchedulerTimingMetrics::default(); - let mut count_metrics = SchedulerCountMetrics::default(); let transaction = transfer( &mint_keypair, @@ -1043,16 +1271,34 @@ mod tests { let packet_batches = Arc::new(to_packet_batches(&[transaction], 1)); sender.send(packet_batches).unwrap(); - let num_received = receive_and_buffer - .receive_and_buffer_packets( - &mut container, - &mut timing_metrics, - &mut count_metrics, - &BufferedPacketsDecision::Hold, - ) + let ReceivingStats { + num_received, + num_dropped_without_parsing, + num_dropped_on_parsing_and_sanitization, + num_dropped_on_lock_validation, + num_dropped_on_compute_budget, + num_dropped_on_age, + num_dropped_on_already_processed, + num_dropped_on_fee_payer, + num_dropped_on_capacity, + num_buffered, + receive_time_us: _, + buffer_time_us: _, + } = receive_and_buffer + .receive_and_buffer_packets(&mut container, &BufferedPacketsDecision::Hold) .unwrap(); assert_eq!(num_received, 1); + assert_eq!(num_dropped_without_parsing, 0); + assert_eq!(num_dropped_on_parsing_and_sanitization, 0); + assert_eq!(num_dropped_on_lock_validation, 0); + assert_eq!(num_dropped_on_compute_budget, 0); + assert_eq!(num_dropped_on_age, 0); + assert_eq!(num_dropped_on_already_processed, 0); + assert_eq!(num_dropped_on_fee_payer, 0); + assert_eq!(num_dropped_on_capacity, 0); + assert_eq!(num_buffered, 1); + verify_container(&mut container, 1); } @@ -1068,8 +1314,6 @@ mod tests { let (bank_forks, mint_keypair) = test_bank_forks(); let (mut receive_and_buffer, mut container) = setup_receive_and_buffer(receiver, bank_forks.clone()); - let mut timing_metrics = SchedulerTimingMetrics::default(); - let mut count_metrics = SchedulerCountMetrics::default(); let num_transactions = 3 * TEST_CONTAINER_CAPACITY; let transactions = Vec::from_iter((0..num_transactions).map(|_| { @@ -1084,16 +1328,34 @@ mod tests { let packet_batches = Arc::new(to_packet_batches(&transactions, 17)); sender.send(packet_batches).unwrap(); - let num_received = receive_and_buffer - .receive_and_buffer_packets( - &mut container, - &mut timing_metrics, - &mut count_metrics, - &BufferedPacketsDecision::Hold, - ) + let ReceivingStats { + num_received, + num_dropped_without_parsing, + num_dropped_on_parsing_and_sanitization, + num_dropped_on_lock_validation, + num_dropped_on_compute_budget, + num_dropped_on_age, + num_dropped_on_already_processed, + num_dropped_on_fee_payer, + num_dropped_on_capacity, + num_buffered, + receive_time_us: _, + buffer_time_us: _, + } = receive_and_buffer + .receive_and_buffer_packets(&mut container, &BufferedPacketsDecision::Hold) .unwrap(); assert_eq!(num_received, num_transactions); + assert_eq!(num_dropped_without_parsing, 0); + assert_eq!(num_dropped_on_parsing_and_sanitization, 0); + assert_eq!(num_dropped_on_lock_validation, 0); + assert_eq!(num_dropped_on_compute_budget, 0); + assert_eq!(num_dropped_on_age, 0); + assert_eq!(num_dropped_on_already_processed, 0); + assert_eq!(num_dropped_on_fee_payer, 0); + assert!(num_dropped_on_capacity > 0); + assert_eq!(num_buffered, num_transactions); + verify_container(&mut container, TEST_CONTAINER_CAPACITY); } } diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_common.rs b/core/src/banking_stage/transaction_scheduler/scheduler_common.rs index 730264ffcea41d..6f2f5fd8c88451 100644 --- a/core/src/banking_stage/transaction_scheduler/scheduler_common.rs +++ b/core/src/banking_stage/transaction_scheduler/scheduler_common.rs @@ -242,9 +242,13 @@ impl SchedulingCommon { // Assumption - retryable indexes are in order (sorted by workers). let mut retryable_iter = retryable_indexes.iter().peekable(); for (index, (id, transaction)) in izip!(ids, transactions).enumerate() { - if let Some(&&retryable_index) = retryable_iter.peek() { - if retryable_index == index { - container.retry_transaction(id, transaction); + if let Some(&retryable_index) = retryable_iter.peek() { + if retryable_index.index == index { + container.retry_transaction( + id, + transaction, + retryable_index.immediately_retryable, + ); retryable_iter.next(); continue; } @@ -254,7 +258,11 @@ impl SchedulingCommon { debug_assert!( retryable_iter.peek().is_none(), - "retryable indexes were not in order: {retryable_indexes:?}" + "retryable indexes were not in order: {:?}", + retryable_indexes + .iter() + .map(|index| index.index) + .collect::>(), ); Ok((num_transactions, num_retryable)) @@ -290,11 +298,18 @@ impl SchedulingCommon { mod tests { use { super::*, - crate::banking_stage::transaction_scheduler::transaction_state_container::TransactionStateContainer, - crossbeam_channel::unbounded, solana_hash::Hash, solana_keypair::Keypair, - solana_pubkey::Pubkey, solana_runtime_transaction::runtime_transaction::RuntimeTransaction, + crate::banking_stage::{ + consumer::RetryableIndex, + transaction_scheduler::transaction_state_container::TransactionStateContainer, + }, + crossbeam_channel::unbounded, + solana_hash::Hash, + solana_keypair::Keypair, + solana_pubkey::Pubkey, + solana_runtime_transaction::runtime_transaction::RuntimeTransaction, solana_system_transaction as system_transaction, - solana_transaction::sanitized::SanitizedTransaction, test_case::test_case, + solana_transaction::sanitized::SanitizedTransaction, + test_case::test_case, }; const NUM_WORKERS: usize = 4; @@ -521,17 +536,22 @@ mod tests { let num_scheduled = common.send_batch(0).unwrap(); let work = work_receivers[0].try_recv().unwrap(); assert_eq!(work.ids.len(), num_scheduled); - let retryable_indexes = vec![0, 1]; + let retryable_indexes = vec![ + RetryableIndex::new(0, true), + RetryableIndex::new(1, false), // should be held by container. + ]; + let expected_num_retryable = retryable_indexes.len(); let finished_work = FinishedConsumeWork { work, - retryable_indexes: retryable_indexes.clone(), + retryable_indexes, }; finished_work_sender.send(finished_work).unwrap(); let (num_transactions, num_retryable) = common.try_receive_completed(&mut container).unwrap(); assert_eq!(num_transactions, num_scheduled); - assert_eq!(num_retryable, retryable_indexes.len()); - assert_eq!(container.buffer_size(), retryable_indexes.len()); + assert_eq!(num_retryable, expected_num_retryable); + assert_eq!(container.buffer_size(), expected_num_retryable); + assert_eq!(container.queue_size(), expected_num_retryable - 1); // held transaction not in queue. } #[test] @@ -550,10 +570,10 @@ mod tests { let num_scheduled = common.send_batch(0).unwrap(); let work = work_receivers[0].try_recv().unwrap(); assert_eq!(work.ids.len(), num_scheduled); - let retryable_indexes = vec![1, 0]; + let retryable_indexes = vec![RetryableIndex::new(1, true), RetryableIndex::new(0, true)]; let finished_work = FinishedConsumeWork { work, - retryable_indexes: retryable_indexes.clone(), + retryable_indexes, }; finished_work_sender.send(finished_work).unwrap(); diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs index e32b1f5d690eca..7088e2ab550ced 100644 --- a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs +++ b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs @@ -6,16 +6,15 @@ use { receive_and_buffer::{DisconnectedError, ReceiveAndBuffer}, scheduler::{PreLockFilterAction, Scheduler}, scheduler_error::SchedulerError, - scheduler_metrics::{ - SchedulerCountMetrics, SchedulerLeaderDetectionMetrics, SchedulerTimingMetrics, - SchedulingDetails, - }, + scheduler_metrics::{SchedulerCountMetrics, SchedulerTimingMetrics, SchedulingDetails}, }, crate::banking_stage::{ consume_worker::ConsumeWorkerMetrics, consumer::Consumer, decision_maker::{BufferedPacketsDecision, DecisionMaker}, - transaction_scheduler::transaction_state_container::StateContainer, + transaction_scheduler::{ + receive_and_buffer::ReceivingStats, transaction_state_container::StateContainer, + }, TOTAL_BUFFERED_PACKETS, }, solana_clock::MAX_PROCESSING_AGE, @@ -24,7 +23,10 @@ use { solana_svm::transaction_error_metrics::TransactionErrorMetrics, std::{ num::Saturating, - sync::{Arc, RwLock}, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, RwLock, + }, }, }; @@ -34,6 +36,8 @@ where R: ReceiveAndBuffer, S: Scheduler, { + /// Exit signal for the scheduler thread. + exit: Arc, /// Decision maker for determining what should be done with transactions. decision_maker: DecisionMaker, receive_and_buffer: R, @@ -43,8 +47,6 @@ where container: R::Container, /// State for scheduling and communicating with worker threads. scheduler: S, - /// Metrics tracking time for leader bank detection. - leader_detection_metrics: SchedulerLeaderDetectionMetrics, /// Metrics tracking counts on transactions in different states /// over an interval and during a leader slot. count_metrics: SchedulerCountMetrics, @@ -63,6 +65,7 @@ where S: Scheduler, { pub fn new( + exit: Arc, decision_maker: DecisionMaker, receive_and_buffer: R, bank_forks: Arc>, @@ -70,12 +73,12 @@ where worker_metrics: Vec>, ) -> Self { Self { + exit, decision_maker, receive_and_buffer, bank_forks, container: R::Container::with_capacity(TOTAL_BUFFERED_PACKETS), scheduler, - leader_detection_metrics: SchedulerLeaderDetectionMetrics::default(), count_metrics: SchedulerCountMetrics::default(), timing_metrics: SchedulerTimingMetrics::default(), worker_metrics, @@ -84,7 +87,8 @@ where } pub fn run(mut self) -> Result<(), SchedulerError> { - loop { + let mut last_slot = None; + while !self.exit.load(Ordering::Relaxed) { // BufferedPacketsDecision is shared with legacy BankingStage, which will forward // packets. Initially, not renaming these decision variants but the actions taken // are different, since new BankingStage will not forward packets. @@ -100,15 +104,17 @@ where self.timing_metrics.update(|timing_metrics| { timing_metrics.decision_time_us += decision_time_us; }); - let new_leader_slot = decision.bank_start().map(|b| b.working_bank.slot()); - self.leader_detection_metrics - .update_and_maybe_report(decision.bank_start()); + let new_leader_slot = decision.bank().map(|b| b.slot()); self.count_metrics .maybe_report_and_reset_slot(new_leader_slot); self.timing_metrics .maybe_report_and_reset_slot(new_leader_slot); self.receive_completed()?; + if last_slot != new_leader_slot { + self.container.flush_held_transactions(); + last_slot = new_leader_slot; + } self.process_transactions(&decision)?; if self.receive_and_buffer_packets(&decision).is_err() { break; @@ -139,16 +145,11 @@ where decision: &BufferedPacketsDecision, ) -> Result<(), SchedulerError> { match decision { - BufferedPacketsDecision::Consume(bank_start) => { + BufferedPacketsDecision::Consume(bank) => { let (scheduling_summary, schedule_time_us) = measure_us!(self.scheduler.schedule( &mut self.container, |txs, results| { - Self::pre_graph_filter( - txs, - results, - &bank_start.working_bank, - MAX_PROCESSING_AGE, - ) + Self::pre_graph_filter(txs, results, bank, MAX_PROCESSING_AGE) }, |_| PreLockFilterAction::AttemptToSchedule // no pre-lock filter for now )?); @@ -246,7 +247,7 @@ where const CHUNK_SIZE: usize = 128; let mut error_counters = TransactionErrorMetrics::default(); - let mut num_dropped_on_age_and_status = Saturating::(0); + let mut num_dropped_on_clean = Saturating::(0); for chunk in transaction_ids.chunks(CHUNK_SIZE) { let lock_results = vec![Ok(()); chunk.len()]; let sanitized_txs: Vec<_> = chunk @@ -268,7 +269,7 @@ where // Remove errored transactions for (result, id) in check_results.iter().zip(chunk.iter()) { if result.is_err() { - num_dropped_on_age_and_status += 1; + num_dropped_on_clean += 1; self.container.remove_by_id(id.id); } } @@ -284,7 +285,7 @@ where } self.count_metrics.update(|count_metrics| { - count_metrics.num_dropped_on_age_and_status += num_dropped_on_age_and_status; + count_metrics.num_dropped_on_clean += num_dropped_on_clean; }); } @@ -308,13 +309,47 @@ where fn receive_and_buffer_packets( &mut self, decision: &BufferedPacketsDecision, - ) -> Result { - self.receive_and_buffer.receive_and_buffer_packets( - &mut self.container, - &mut self.timing_metrics, - &mut self.count_metrics, - decision, - ) + ) -> Result { + let receiving_stats = self + .receive_and_buffer + .receive_and_buffer_packets(&mut self.container, decision)?; + + self.count_metrics.update(|count_metrics| { + let ReceivingStats { + num_received, + num_dropped_without_parsing: num_dropped_without_buffering, + num_dropped_on_parsing_and_sanitization, + num_dropped_on_lock_validation, + num_dropped_on_compute_budget, + num_dropped_on_age, + num_dropped_on_already_processed, + num_dropped_on_fee_payer, + num_dropped_on_capacity, + num_buffered, + receive_time_us: _, + buffer_time_us: _, + } = &receiving_stats; + + count_metrics.num_received += *num_received; + count_metrics.num_dropped_on_receive += *num_dropped_without_buffering; + count_metrics.num_dropped_on_parsing_and_sanitization += + *num_dropped_on_parsing_and_sanitization; + count_metrics.num_dropped_on_validate_locks += *num_dropped_on_lock_validation; + count_metrics.num_dropped_on_receive_compute_budget += *num_dropped_on_compute_budget; + count_metrics.num_dropped_on_receive_age += *num_dropped_on_age; + count_metrics.num_dropped_on_receive_already_processed += + *num_dropped_on_already_processed; + count_metrics.num_dropped_on_receive_fee_payer += *num_dropped_on_fee_payer; + count_metrics.num_dropped_on_capacity += *num_dropped_on_capacity; + count_metrics.num_buffered += *num_buffered; + }); + + self.timing_metrics.update(|timing_metrics| { + timing_metrics.receive_time_us += receiving_stats.receive_time_us; + timing_metrics.buffer_time_us += receiving_stats.buffer_time_us; + }); + + Ok(receiving_stats) } } @@ -323,7 +358,7 @@ mod tests { use { super::*, crate::banking_stage::{ - consumer::TARGET_NUM_TRANSACTIONS_PER_BATCH, + consumer::{RetryableIndex, TARGET_NUM_TRANSACTIONS_PER_BATCH}, packet_deserializer::PacketDeserializer, scheduler_messages::{ConsumeWork, FinishedConsumeWork, TransactionBatchId}, tests::create_slow_genesis_config, @@ -340,22 +375,19 @@ mod tests { solana_fee_calculator::FeeRateGovernor, solana_hash::Hash, solana_keypair::Keypair, - solana_ledger::{ - blockstore::Blockstore, genesis_utils::GenesisConfigInfo, - get_tmp_ledger_path_auto_delete, leader_schedule_cache::LeaderScheduleCache, - }, + solana_ledger::genesis_utils::GenesisConfigInfo, solana_message::Message, solana_perf::packet::{to_packet_batches, PacketBatch, NUM_PACKETS}, - solana_poh::poh_recorder::PohRecorder, - solana_poh_config::PohConfig, + solana_poh::poh_recorder::{ + SharedLeaderFirstTickHeight, SharedTickHeight, SharedWorkingBank, + }, solana_pubkey::Pubkey, solana_runtime::bank::Bank, solana_runtime_transaction::transaction_meta::StaticMeta, solana_signer::Signer, solana_system_interface::instruction as system_instruction, solana_transaction::Transaction, - std::sync::{atomic::AtomicBool, Arc, RwLock}, - tempfile::TempDir, + std::sync::{Arc, RwLock}, test_case::test_case, }; @@ -368,10 +400,8 @@ mod tests { struct TestFrame { bank: Arc, mint_keypair: Keypair, - _ledger_path: TempDir, - poh_recorder: Arc>, banking_packet_sender: Sender>>, - + shared_working_bank: SharedWorkingBank, consume_work_receivers: Vec>>, finished_consume_work_sender: Sender>, } @@ -409,22 +439,15 @@ mod tests { genesis_config.fee_rate_governor = FeeRateGovernor::new(5000, 0); let (bank, bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); - let ledger_path = get_tmp_ledger_path_auto_delete!(); - let blockstore = Blockstore::open(ledger_path.path()) - .expect("Expected to be able to open database ledger"); - let (poh_recorder, _entry_receiver) = PohRecorder::new( - bank.tick_height(), - bank.last_blockhash(), - bank.clone(), - Some((4, 4)), - bank.ticks_per_slot(), - Arc::new(blockstore), - &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), - &PohConfig::default(), - Arc::new(AtomicBool::default()), + let shared_working_bank = SharedWorkingBank::empty(); + let shared_tick_height = SharedTickHeight::new(0); + let shared_leader_first_tick_height = SharedLeaderFirstTickHeight::new(None); + + let decision_maker = DecisionMaker::new( + shared_working_bank.clone(), + shared_tick_height, + shared_leader_first_tick_height, ); - let poh_recorder = Arc::new(RwLock::new(poh_recorder)); - let decision_maker = DecisionMaker::new(poh_recorder.clone()); let (banking_packet_sender, banking_packet_receiver) = unbounded(); let receive_and_buffer = @@ -436,8 +459,7 @@ mod tests { let test_frame = TestFrame { bank, mint_keypair, - _ledger_path: ledger_path, - poh_recorder, + shared_working_bank, banking_packet_sender, consume_work_receivers, finished_consume_work_sender, @@ -448,7 +470,9 @@ mod tests { finished_consume_work_receiver, PrioGraphSchedulerConfig::default(), ); + let exit = Arc::new(AtomicBool::new(false)); let scheduler_controller = SchedulerController::new( + exit, decision_maker, receive_and_buffer, bank_forks, @@ -511,7 +535,7 @@ mod tests { // from the channel. while scheduler_controller .receive_and_buffer_packets(&decision) - .map(|n| n > 0) + .map(|n| n.num_received > 0) .unwrap_or_default() {} assert!(scheduler_controller.process_transactions(&decision).is_ok()); @@ -549,21 +573,18 @@ mod tests { fn test_schedule_consume_single_threaded_no_conflicts( create_receive_and_buffer: impl FnOnce(BankingPacketReceiver, Arc>) -> R, ) { - let (test_frame, mut scheduler_controller) = + let (mut test_frame, mut scheduler_controller) = create_test_frame(1, create_receive_and_buffer); let TestFrame { bank, mint_keypair, - poh_recorder, + shared_working_bank, banking_packet_sender, consume_work_receivers, .. - } = &test_frame; + } = &mut test_frame; - poh_recorder - .write() - .unwrap() - .set_bank_for_test(bank.clone()); + shared_working_bank.store(bank.clone()); // Send packet batch to the scheduler - should do nothing until we become the leader. let tx1 = create_and_fund_prioritized_transfer( @@ -609,21 +630,18 @@ mod tests { fn test_schedule_consume_single_threaded_conflict( create_receive_and_buffer: impl FnOnce(BankingPacketReceiver, Arc>) -> R, ) { - let (test_frame, mut scheduler_controller) = + let (mut test_frame, mut scheduler_controller) = create_test_frame(1, create_receive_and_buffer); let TestFrame { bank, mint_keypair, - poh_recorder, + shared_working_bank, banking_packet_sender, consume_work_receivers, .. - } = &test_frame; + } = &mut test_frame; - poh_recorder - .write() - .unwrap() - .set_bank_for_test(bank.clone()); + shared_working_bank.store(bank.clone()); let pk = Pubkey::new_unique(); let tx1 = create_and_fund_prioritized_transfer( @@ -672,21 +690,18 @@ mod tests { fn test_schedule_consume_single_threaded_multi_batch( create_receive_and_buffer: impl FnOnce(BankingPacketReceiver, Arc>) -> R, ) { - let (test_frame, mut scheduler_controller) = + let (mut test_frame, mut scheduler_controller) = create_test_frame(1, create_receive_and_buffer); let TestFrame { bank, mint_keypair, - poh_recorder, + shared_working_bank, banking_packet_sender, consume_work_receivers, .. - } = &test_frame; + } = &mut test_frame; - poh_recorder - .write() - .unwrap() - .set_bank_for_test(bank.clone()); + shared_working_bank.store(bank.clone()); // Send multiple batches - all get scheduled let txs1 = (0..2 * TARGET_NUM_TRANSACTIONS_PER_BATCH) @@ -740,21 +755,18 @@ mod tests { fn test_schedule_consume_simple_thread_selection( create_receive_and_buffer: impl FnOnce(BankingPacketReceiver, Arc>) -> R, ) { - let (test_frame, mut scheduler_controller) = + let (mut test_frame, mut scheduler_controller) = create_test_frame(2, create_receive_and_buffer); let TestFrame { bank, mint_keypair, - poh_recorder, + shared_working_bank, banking_packet_sender, consume_work_receivers, .. - } = &test_frame; + } = &mut test_frame; - poh_recorder - .write() - .unwrap() - .set_bank_for_test(bank.clone()); + shared_working_bank.store(bank.clone()); // Send 4 transactions w/o conflicts. 2 should be scheduled on each thread let txs = (0..4) @@ -811,22 +823,19 @@ mod tests { fn test_schedule_consume_retryable( create_receive_and_buffer: impl FnOnce(BankingPacketReceiver, Arc>) -> R, ) { - let (test_frame, mut scheduler_controller) = + let (mut test_frame, mut scheduler_controller) = create_test_frame(1, create_receive_and_buffer); let TestFrame { bank, mint_keypair, - poh_recorder, + shared_working_bank, banking_packet_sender, consume_work_receivers, finished_consume_work_sender, .. - } = &test_frame; + } = &mut test_frame; - poh_recorder - .write() - .unwrap() - .set_bank_for_test(bank.clone()); + shared_working_bank.store(bank.clone()); // Send packet batch to the scheduler - should do nothing until we become the leader. let tx1 = create_and_fund_prioritized_transfer( @@ -870,7 +879,7 @@ mod tests { finished_consume_work_sender .send(FinishedConsumeWork { work: consume_work, - retryable_indexes: vec![1], + retryable_indexes: vec![RetryableIndex::new(1, true)], }) .unwrap(); diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_metrics.rs b/core/src/banking_stage/transaction_scheduler/scheduler_metrics.rs index fd93cfed8d5ad0..fb2ad716f1a105 100644 --- a/core/src/banking_stage/transaction_scheduler/scheduler_metrics.rs +++ b/core/src/banking_stage/transaction_scheduler/scheduler_metrics.rs @@ -2,7 +2,6 @@ use { super::scheduler::SchedulingSummary, itertools::MinMaxResult, solana_clock::Slot, - solana_poh::poh_recorder::BankStart, solana_time_utils::AtomicInterval, std::{ num::Saturating, @@ -69,16 +68,22 @@ pub struct SchedulerCountMetricsInner { /// Number of transactions that were immediately dropped on receive. pub num_dropped_on_receive: Saturating, /// Number of transactions that were dropped due to sanitization failure. - pub num_dropped_on_sanitization: Saturating, + pub num_dropped_on_parsing_and_sanitization: Saturating, /// Number of transactions that were dropped due to failed lock validation. pub num_dropped_on_validate_locks: Saturating, - /// Number of transactions that were dropped due to failed transaction - /// checks during receive. - pub num_dropped_on_receive_transaction_checks: Saturating, + /// Number of transactions that were dropped in checking compute budget configuration + /// during receive checks. + pub num_dropped_on_receive_compute_budget: Saturating, + /// Number of transactions that were dropped due to age/nonce during receive checks. + pub num_dropped_on_receive_age: Saturating, + /// Number of transactions that were dropped due to already processed during receive checks. + pub num_dropped_on_receive_already_processed: Saturating, + /// Number of transactions that were dropped on fee payer checks during receive checks. + pub num_dropped_on_receive_fee_payer: Saturating, /// Number of transactions that were dropped due to clearing. pub num_dropped_on_clear: Saturating, - /// Number of transactions that were dropped due to age and status checks. - pub num_dropped_on_age_and_status: Saturating, + /// Number of transactions that were dropped during cleaning. + pub num_dropped_on_clean: Saturating, /// Number of transactions that were dropped due to exceeded capacity. pub num_dropped_on_capacity: Saturating, /// Min prioritization fees in the transaction container @@ -125,12 +130,16 @@ impl SchedulerCountMetricsInner { num_finished: Saturating(num_finished), num_retryable: Saturating(num_retryable), num_dropped_on_receive: Saturating(num_dropped_on_receive), - num_dropped_on_sanitization: Saturating(num_dropped_on_sanitization), + num_dropped_on_parsing_and_sanitization: + Saturating(num_dropped_on_parsing_and_sanitization), num_dropped_on_validate_locks: Saturating(num_dropped_on_validate_locks), - num_dropped_on_receive_transaction_checks: - Saturating(num_dropped_on_receive_transaction_checks), + num_dropped_on_receive_compute_budget: Saturating(num_dropped_on_receive_compute_budget), + num_dropped_on_receive_age: Saturating(num_dropped_on_receive_age), + num_dropped_on_receive_already_processed: + Saturating(num_dropped_on_receive_already_processed), + num_dropped_on_receive_fee_payer: Saturating(num_dropped_on_receive_fee_payer), num_dropped_on_clear: Saturating(num_dropped_on_clear), - num_dropped_on_age_and_status: Saturating(num_dropped_on_age_and_status), + num_dropped_on_clean: Saturating(num_dropped_on_clean), num_dropped_on_capacity: Saturating(num_dropped_on_capacity), min_prioritization_fees: _min_prioritization_fees, max_prioritization_fees: _max_prioritization_fees, @@ -151,8 +160,8 @@ impl SchedulerCountMetricsInner { ("num_retryable", num_retryable, i64), ("num_dropped_on_receive", num_dropped_on_receive, i64), ( - "num_dropped_on_sanitization", - num_dropped_on_sanitization, + "num_dropped_on_parsing_and_sanitization", + num_dropped_on_parsing_and_sanitization, i64 ), ( @@ -161,14 +170,25 @@ impl SchedulerCountMetricsInner { i64 ), ( - "num_dropped_on_receive_transaction_checks", - num_dropped_on_receive_transaction_checks, + "num_dropped_on_receive_compute_budget", + num_dropped_on_receive_compute_budget, + i64 + ), + ("num_dropped_on_receive_age", num_dropped_on_receive_age, i64), + ( + "num_dropped_on_receive_already_processed", + num_dropped_on_receive_already_processed, + i64 + ), + ( + "num_dropped_on_receive_fee_payer", + num_dropped_on_receive_fee_payer, i64 ), ("num_dropped_on_clear", num_dropped_on_clear, i64), ( - "num_dropped_on_age_and_status", - num_dropped_on_age_and_status, + "num_dropped_on_clean", + num_dropped_on_clean, i64 ), ("num_dropped_on_capacity", num_dropped_on_capacity, i64), @@ -190,13 +210,6 @@ impl SchedulerCountMetricsInner { || self.num_schedule_filtered_out != Saturating(0) || self.num_finished != Saturating(0) || self.num_retryable != Saturating(0) - || self.num_dropped_on_receive != Saturating(0) - || self.num_dropped_on_sanitization != Saturating(0) - || self.num_dropped_on_validate_locks != Saturating(0) - || self.num_dropped_on_receive_transaction_checks != Saturating(0) - || self.num_dropped_on_clear != Saturating(0) - || self.num_dropped_on_age_and_status != Saturating(0) - || self.num_dropped_on_capacity != Saturating(0) } fn reset(&mut self) { @@ -209,11 +222,14 @@ impl SchedulerCountMetricsInner { self.num_finished = Saturating(0); self.num_retryable = Saturating(0); self.num_dropped_on_receive = Saturating(0); - self.num_dropped_on_sanitization = Saturating(0); + self.num_dropped_on_parsing_and_sanitization = Saturating(0); self.num_dropped_on_validate_locks = Saturating(0); - self.num_dropped_on_receive_transaction_checks = Saturating(0); + self.num_dropped_on_receive_compute_budget = Saturating(0); + self.num_dropped_on_receive_age = Saturating(0); + self.num_dropped_on_receive_already_processed = Saturating(0); + self.num_dropped_on_receive_fee_payer = Saturating(0); self.num_dropped_on_clear = Saturating(0); - self.num_dropped_on_age_and_status = Saturating(0); + self.num_dropped_on_clean = Saturating(0); self.num_dropped_on_capacity = Saturating(0); self.min_prioritization_fees = u64::MAX; self.max_prioritization_fees = 0; @@ -374,69 +390,6 @@ impl SchedulerTimingMetricsInner { } } -#[derive(Default)] -pub struct SchedulerLeaderDetectionMetrics { - inner: Option, -} - -struct SchedulerLeaderDetectionMetricsInner { - slot: Slot, - bank_creation_time: Instant, - bank_detected_time: Instant, -} - -impl SchedulerLeaderDetectionMetrics { - pub fn update_and_maybe_report(&mut self, bank_start: Option<&BankStart>) { - match (&self.inner, bank_start) { - (None, Some(bank_start)) => self.initialize_inner(bank_start), - (Some(_inner), None) => self.report_and_reset(), - (Some(inner), Some(bank_start)) if inner.slot != bank_start.working_bank.slot() => { - self.report_and_reset(); - self.initialize_inner(bank_start); - } - _ => {} - } - } - - fn initialize_inner(&mut self, bank_start: &BankStart) { - let bank_detected_time = Instant::now(); - self.inner = Some(SchedulerLeaderDetectionMetricsInner { - slot: bank_start.working_bank.slot(), - bank_creation_time: *bank_start.bank_creation_time, - bank_detected_time, - }); - } - - fn report_and_reset(&mut self) { - let SchedulerLeaderDetectionMetricsInner { - slot, - bank_creation_time, - bank_detected_time, - } = self.inner.take().expect("inner must be present"); - - let bank_detected_delay_us = bank_detected_time - .duration_since(bank_creation_time) - .as_micros() - .try_into() - .unwrap_or(i64::MAX); - let bank_detected_to_slot_end_detected_us = bank_detected_time - .elapsed() - .as_micros() - .try_into() - .unwrap_or(i64::MAX); - datapoint_info!( - "banking_stage_scheduler_leader_detection", - ("slot", slot, i64), - ("bank_detected_delay_us", bank_detected_delay_us, i64), - ( - "bank_detected_to_slot_end_detected_us", - bank_detected_to_slot_end_detected_us, - i64 - ), - ); - } -} - pub struct SchedulingDetails { pub last_report: Instant, pub num_schedule_calls: usize, diff --git a/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs b/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs index bfd1419dcf1420..dad2bfd2506dc6 100644 --- a/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs +++ b/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs @@ -44,6 +44,7 @@ pub(crate) struct TransactionStateContainer { capacity: usize, priority_queue: MinMaxHeap, id_to_transaction_state: Slab>, + held_transactions: Vec, } #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] @@ -71,13 +72,23 @@ pub(crate) trait StateContainer { /// Retries a transaction - inserts transaction back into map. /// This transitions the transaction to `Unprocessed` state. - fn retry_transaction(&mut self, transaction_id: TransactionId, transaction: Tx) { + fn retry_transaction( + &mut self, + transaction_id: TransactionId, + transaction: Tx, + immediately_retryable: bool, + ) { let transaction_state = self .get_mut_transaction_state(transaction_id) .expect("transaction must exist"); let priority_id = TransactionPriorityId::new(transaction_state.priority(), transaction_id); transaction_state.retry_transaction(transaction); - self.push_ids_into_queue(std::iter::once(priority_id)); + + if immediately_retryable { + self.push_ids_into_queue(std::iter::once(priority_id)); + } else { + self.hold_transaction(priority_id); + } } /// Pushes transaction ids into the priority queue. If the queue if full, @@ -91,9 +102,14 @@ pub(crate) trait StateContainer { priority_ids: impl Iterator, ) -> usize; + /// Hold the tarnsaction until the next flush (next slot). + fn hold_transaction(&mut self, priority_id: TransactionPriorityId); + /// Remove transaction by id. fn remove_by_id(&mut self, id: TransactionId); + fn flush_held_transactions(&mut self); + fn get_min_max_priority(&self) -> MinMaxResult; #[cfg(feature = "dev-context-only-utils")] @@ -110,6 +126,7 @@ impl StateContainer for TransactionStateContainer StateContainer for TransactionStateContainer MinMaxResult { match self.priority_queue.peek_min() { Some(min) => match self.priority_queue.peek_max() { @@ -323,11 +350,21 @@ impl StateContainer for TransactionViewStateContainer { self.inner.push_ids_into_queue(priority_ids) } + #[inline] + fn hold_transaction(&mut self, priority_id: TransactionPriorityId) { + self.inner.hold_transaction(priority_id); + } + #[inline] fn remove_by_id(&mut self, id: TransactionId) { self.inner.remove_by_id(id); } + #[inline] + fn flush_held_transactions(&mut self) { + self.inner.flush_held_transactions(); + } + #[inline] fn get_min_max_priority(&self) -> MinMaxResult { self.inner.get_min_max_priority() diff --git a/core/src/banking_stage/unified_scheduler.rs b/core/src/banking_stage/unified_scheduler.rs index dd82ce73a52479..f8654d851d32c9 100644 --- a/core/src/banking_stage/unified_scheduler.rs +++ b/core/src/banking_stage/unified_scheduler.rs @@ -38,7 +38,11 @@ use { solana_poh::{poh_recorder::PohRecorder, transaction_recorder::TransactionRecorder}, solana_runtime::bank_forks::BankForks, solana_unified_scheduler_pool::{BankingStageHelper, DefaultSchedulerPool}, - std::sync::{Arc, RwLock}, + std::{ + num::NonZeroUsize, + ops::Deref, + sync::{Arc, RwLock}, + }, }; #[allow(dead_code)] @@ -49,12 +53,21 @@ pub(crate) fn ensure_banking_stage_setup( channels: &Channels, poh_recorder: &Arc>, transaction_recorder: TransactionRecorder, - num_threads: u32, + num_threads: NonZeroUsize, ) { - let root_bank = bank_forks.read().unwrap().sharable_root_bank(); + let sharable_banks = bank_forks.read().unwrap().sharable_banks(); let unified_receiver = channels.unified_receiver().clone(); - let mut decision_maker = DecisionMaker::new(poh_recorder.clone()); - let banking_stage_monitor = Box::new(DecisionMakerWrapper::new(decision_maker.clone())); + + let (is_exited, decision_maker) = { + let poh_recorder = poh_recorder.read().unwrap(); + ( + poh_recorder.is_exited.clone(), + DecisionMaker::from(poh_recorder.deref()), + ) + }; + + let banking_stage_monitor = + Box::new(DecisionMakerWrapper::new(is_exited, decision_maker.clone())); let banking_packet_handler = Box::new( move |helper: &BankingStageHelper, batches: BankingPacketBatch| { let decision = decision_maker.make_consume_or_forward_decision(); @@ -64,7 +77,7 @@ pub(crate) fn ensure_banking_stage_setup( // by solScCleaner. return; } - let bank = root_bank.load(); + let bank = sharable_banks.root(); for batch in batches.iter() { // over-provision nevertheless some of packets could be invalid. let task_id_base = helper.generate_task_ids(batch.len()); @@ -91,7 +104,7 @@ pub(crate) fn ensure_banking_stage_setup( ); pool.register_banking_stage( - Some(num_threads.try_into().unwrap()), + Some(num_threads.get()), unified_receiver, banking_packet_handler, transaction_recorder, diff --git a/core/src/banking_stage/vote_worker.rs b/core/src/banking_stage/vote_worker.rs index 0d96ac37bae776..6a1002e672b25d 100644 --- a/core/src/banking_stage/vote_worker.rs +++ b/core/src/banking_stage/vote_worker.rs @@ -19,7 +19,7 @@ use { solana_accounts_db::account_locks::validate_account_locks, solana_clock::FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET, solana_measure::{measure::Measure, measure_us}, - solana_poh::poh_recorder::{BankStart, PohRecorderError}, + solana_poh::poh_recorder::PohRecorderError, solana_runtime::{bank::Bank, bank_forks::BankForks}, solana_runtime_transaction::{ runtime_transaction::RuntimeTransaction, transaction_with_meta::TransactionWithMeta, @@ -31,7 +31,10 @@ use { solana_transaction::sanitized::SanitizedTransaction, solana_transaction_error::TransactionError, std::{ - sync::{atomic::Ordering, Arc, RwLock}, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, RwLock, + }, time::Instant, }, }; @@ -46,6 +49,7 @@ mod transaction { pub const UNPROCESSED_BUFFER_STEP_SIZE: usize = 16; pub struct VoteWorker { + exit: Arc, decision_maker: DecisionMaker, tpu_receiver: PacketReceiver, gossip_receiver: PacketReceiver, @@ -56,6 +60,7 @@ pub struct VoteWorker { impl VoteWorker { pub fn new( + exit: Arc, decision_maker: DecisionMaker, tpu_receiver: PacketReceiver, gossip_receiver: PacketReceiver, @@ -64,6 +69,7 @@ impl VoteWorker { consumer: Consumer, ) -> Self { Self { + exit, decision_maker, tpu_receiver, gossip_receiver, @@ -79,7 +85,7 @@ impl VoteWorker { let mut last_metrics_update = Instant::now(); - loop { + while !self.exit.load(Ordering::Relaxed) { if !self.storage.is_empty() || last_metrics_update.elapsed() >= SLOT_BOUNDARY_CHECK_PERIOD { @@ -121,7 +127,7 @@ impl VoteWorker { ) { let (decision, make_decision_us) = measure_us!(self.decision_maker.make_consume_or_forward_decision()); - let metrics_action = slot_metrics_tracker.check_leader_slot_boundary(decision.bank_start()); + let metrics_action = slot_metrics_tracker.check_leader_slot_boundary(decision.bank()); slot_metrics_tracker.increment_make_decision_us(make_decision_us); // Take metrics action before processing packets (potentially resetting the @@ -131,9 +137,9 @@ impl VoteWorker { slot_metrics_tracker.apply_action(metrics_action); match decision { - BufferedPacketsDecision::Consume(bank_start) => { + BufferedPacketsDecision::Consume(bank) => { let (_, consume_buffered_packets_us) = measure_us!(self.consume_buffered_packets( - &bank_start, + &bank, banking_stage_stats, slot_metrics_tracker, )); @@ -159,7 +165,7 @@ impl VoteWorker { fn consume_buffered_packets( &mut self, - bank_start: &BankStart, + bank: &Bank, banking_stage_stats: &BankingStageStats, slot_metrics_tracker: &mut LeaderSlotMetricsTracker, ) { @@ -173,7 +179,7 @@ impl VoteWorker { let num_packets_to_process = self.storage.len(); let reached_end_of_slot = self.process_packets( - bank_start, + bank, &mut consumed_buffered_packets_count, &mut rebuffered_packet_count, banking_stage_stats, @@ -208,7 +214,7 @@ impl VoteWorker { // returns `true` if the end of slot is reached fn process_packets( &mut self, - bank_start: &BankStart, + bank: &Bank, consumed_buffered_packets_count: &mut usize, rebuffered_packet_count: &mut usize, banking_stage_stats: &BankingStageStats, @@ -217,7 +223,7 @@ impl VoteWorker { // Based on the stake distribution present in the supplied bank, drain the unprocessed votes // from each validator using a weighted random ordering. Votes from validators with // 0 stake are ignored. - let all_vote_packets = self.storage.drain_unprocessed(&bank_start.working_bank); + let all_vote_packets = self.storage.drain_unprocessed(bank); let mut reached_end_of_slot = false; let mut sanitized_transactions = Vec::with_capacity(UNPROCESSED_BUFFER_STEP_SIZE); @@ -228,7 +234,7 @@ impl VoteWorker { vote_packets.clear(); chunk.iter().for_each(|packet| { if consume_scan_should_process_packet( - &bank_start.working_bank, + bank, banking_stage_stats, packet, reached_end_of_slot, @@ -241,7 +247,7 @@ impl VoteWorker { }); if let Some(retryable_vote_indices) = self.do_process_packets( - bank_start, + bank, &mut reached_end_of_slot, &mut sanitized_transactions, banking_stage_stats, @@ -265,7 +271,7 @@ impl VoteWorker { fn do_process_packets( &self, - bank_start: &BankStart, + bank: &Bank, reached_end_of_slot: &mut bool, sanitized_transactions: &mut Vec>, banking_stage_stats: &BankingStageStats, @@ -280,8 +286,7 @@ impl VoteWorker { let (process_transactions_summary, process_packets_transactions_us) = measure_us!(self .process_packets_transactions( - &bank_start.working_bank, - &bank_start.bank_creation_time, + bank, sanitized_transactions, banking_stage_stats, slot_metrics_tracker, @@ -299,7 +304,7 @@ impl VoteWorker { .. } = process_transactions_summary; - if reached_max_poh_height || !bank_start.should_working_bank_still_be_processing_txs() { + if reached_max_poh_height || !bank.is_complete() { *reached_end_of_slot = true; } @@ -325,15 +330,13 @@ impl VoteWorker { fn process_packets_transactions( &self, - bank: &Arc, - bank_creation_time: &Instant, + bank: &Bank, sanitized_transactions: &[impl TransactionWithMeta], banking_stage_stats: &BankingStageStats, slot_metrics_tracker: &mut LeaderSlotMetricsTracker, ) -> ProcessTransactionsSummary { - let (mut process_transactions_summary, process_transactions_us) = measure_us!( - self.process_transactions(bank, bank_creation_time, sanitized_transactions) - ); + let (mut process_transactions_summary, process_transactions_us) = + measure_us!(self.process_transactions(bank, sanitized_transactions)); slot_metrics_tracker.increment_process_transactions_us(process_transactions_us); banking_stage_stats .transaction_processing_elapsed @@ -381,8 +384,7 @@ impl VoteWorker { /// than the total number if max PoH height was reached and the bank halted fn process_transactions( &self, - bank: &Arc, - bank_creation_time: &Instant, + bank: &Bank, transactions: &[impl TransactionWithMeta], ) -> ProcessTransactionsSummary { let process_transaction_batch_output = self @@ -408,8 +410,7 @@ impl VoteWorker { total_transaction_counts .accumulate(&transaction_counts, commit_transactions_result.is_ok()); - let should_bank_still_be_processing_txs = - Bank::should_bank_still_be_processing_txs(bank_creation_time, bank.ns_per_slot); + let should_bank_still_be_processing_txs = bank.is_complete(); let reached_max_poh_height = match ( commit_transactions_result, should_bank_still_be_processing_txs, @@ -428,7 +429,10 @@ impl VoteWorker { ProcessTransactionsSummary { reached_max_poh_height, transaction_counts: total_transaction_counts, - retryable_transaction_indexes, + retryable_transaction_indexes: retryable_transaction_indexes + .into_iter() + .map(|retryable_index| retryable_index.index) + .collect(), cost_model_throttled_transactions_count, cost_model_us, execute_and_commit_timings, diff --git a/core/src/cluster_info_vote_listener.rs b/core/src/cluster_info_vote_listener.rs index b4c728d50d5588..378ce077c3da7f 100644 --- a/core/src/cluster_info_vote_listener.rs +++ b/core/src/cluster_info_vote_listener.rs @@ -27,7 +27,7 @@ use { }, solana_runtime::{ bank::Bank, - bank_forks::{BankForks, SharableBank}, + bank_forks::{BankForks, SharableBanks}, bank_hash_cache::{BankHashCache, DumpedSlotSubscription}, commitment::VOTE_THRESHOLD_SIZE, epoch_stakes::VersionedEpochStakes, @@ -203,14 +203,14 @@ impl ClusterInfoVoteListener { let (verified_vote_transactions_sender, verified_vote_transactions_receiver) = unbounded(); let listen_thread = { let exit = exit.clone(); - let root_bank = bank_forks.read().unwrap().sharable_root_bank(); + let sharable_banks = bank_forks.read().unwrap().sharable_banks(); Builder::new() .name("solCiVoteLstnr".to_string()) .spawn(move || { let _ = Self::recv_loop( exit, &cluster_info, - root_bank, + sharable_banks, verified_packets_sender, verified_vote_transactions_sender, ); @@ -252,7 +252,7 @@ impl ClusterInfoVoteListener { fn recv_loop( exit: Arc, cluster_info: &ClusterInfo, - root_bank: SharableBank, + sharable_banks: SharableBanks, verified_packets_sender: BankingPacketSender, verified_vote_transactions_sender: VerifiedVoteTransactionsSender, ) -> Result<()> { @@ -261,7 +261,7 @@ impl ClusterInfoVoteListener { let votes = cluster_info.get_votes(&mut cursor); inc_new_counter_debug!("cluster_info_vote_listener-recv_count", votes.len()); if !votes.is_empty() { - let (vote_txs, packets) = Self::verify_votes(votes, &root_bank); + let (vote_txs, packets) = Self::verify_votes(votes, &sharable_banks); verified_vote_transactions_sender.send(vote_txs)?; verified_packets_sender.send(BankingPacketBatch::new(packets))?; } @@ -273,7 +273,7 @@ impl ClusterInfoVoteListener { #[allow(clippy::type_complexity)] fn verify_votes( votes: Vec, - root_bank: &SharableBank, + sharable_banks: &SharableBanks, ) -> (Vec, Vec) { let mut packet_batches = packet::to_packet_batches(&votes, 1); @@ -283,7 +283,7 @@ impl ClusterInfoVoteListener { /*reject_non_vote=*/ false, votes.len(), ); - let root_bank = root_bank.load(); + let root_bank = sharable_banks.root(); let epoch_schedule = root_bank.epoch_schedule(); votes .into_iter() @@ -1507,9 +1507,9 @@ mod tests { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let bank = Bank::new_for_tests(&genesis_config); let bank_forks = BankForks::new_rw_arc(bank); - let root_bank = bank_forks.read().unwrap().sharable_root_bank(); + let sharable_banks = bank_forks.read().unwrap().sharable_banks(); let votes = vec![]; - let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes, &root_bank); + let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes, &sharable_banks); assert!(vote_txs.is_empty()); assert!(packets.is_empty()); } @@ -1549,10 +1549,10 @@ mod tests { ); let bank = Bank::new_for_tests(&genesis_config); let bank_forks = BankForks::new_rw_arc(bank); - let root_bank = bank_forks.read().unwrap().sharable_root_bank(); + let sharable_banks = bank_forks.read().unwrap().sharable_banks(); let vote_tx = test_vote_tx(voting_keypairs.first(), hash); let votes = vec![vote_tx]; - let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes, &root_bank); + let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes, &sharable_banks); assert_eq!(vote_txs.len(), 1); verify_packets_len(&packets, 1); } @@ -1575,12 +1575,12 @@ mod tests { ); let bank = Bank::new_for_tests(&genesis_config); let bank_forks = BankForks::new_rw_arc(bank); - let root_bank = bank_forks.read().unwrap().sharable_root_bank(); + let sharable_banks = bank_forks.read().unwrap().sharable_banks(); let vote_tx = test_vote_tx(voting_keypairs.first(), hash); let mut bad_vote = vote_tx.clone(); bad_vote.signatures[0] = Signature::default(); let votes = vec![vote_tx.clone(), bad_vote, vote_tx]; - let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes, &root_bank); + let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes, &sharable_banks); assert_eq!(vote_txs.len(), 2); verify_packets_len(&packets, 2); } diff --git a/core/src/cluster_slots_service/cluster_slots.rs b/core/src/cluster_slots_service/cluster_slots.rs index 8f9624d1974d31..a712f88a158052 100644 --- a/core/src/cluster_slots_service/cluster_slots.rs +++ b/core/src/cluster_slots_service/cluster_slots.rs @@ -74,7 +74,7 @@ struct RootEpoch { number: Epoch, schedule: EpochSchedule, } -#[derive(Default)] + pub struct ClusterSlots { // ring buffer storing, per slot, which stakes were committed to a certain slot. cluster_slots: RwLock>, @@ -95,6 +95,31 @@ struct RowContent { } impl ClusterSlots { + pub fn new(root_bank: &Bank, cluster_info: &ClusterInfo) -> Self { + let cluster_slots = Self::default(); + cluster_slots.update(root_bank, cluster_info); + cluster_slots + } + + // Intentionally private default function to disallow uninitialized construction + fn default() -> Self { + Self { + cluster_slots: RwLock::new(VecDeque::new()), + epoch_metadata: RwLock::new(HashMap::new()), + current_slot: AtomicU64::default(), + root_epoch: RwLock::new(None), + cursor: Mutex::new(Cursor::default()), + metrics_last_report: AtomicInterval::default(), + metric_allocations: AtomicU64::default(), + metric_write_locks: AtomicU64::default(), + } + } + + #[cfg(feature = "dev-context-only-utils")] + pub fn default_for_tests() -> Self { + Self::default() + } + #[inline] pub(crate) fn lookup(&self, slot: Slot) -> Option> { let cluster_slots = self.cluster_slots.read().unwrap(); diff --git a/core/src/commitment_service.rs b/core/src/commitment_service.rs index ec55bfaae15539..c6f59efa6181c8 100644 --- a/core/src/commitment_service.rs +++ b/core/src/commitment_service.rs @@ -467,7 +467,7 @@ mod tests { process_slot_vote_unchecked(&mut vote_state1, 3); process_slot_vote_unchecked(&mut vote_state1, 5); if !with_node_vote_state { - let versioned = VoteStateVersions::new_current(vote_state1.clone()); + let versioned = VoteStateVersions::new_v3(vote_state1.clone()); vote_state::to(&versioned, &mut vote_account1).unwrap(); bank.store_account(&pk1, &vote_account1); } @@ -475,19 +475,19 @@ mod tests { let mut vote_state2 = vote_state::from(&vote_account2).unwrap(); process_slot_vote_unchecked(&mut vote_state2, 9); process_slot_vote_unchecked(&mut vote_state2, 10); - let versioned = VoteStateVersions::new_current(vote_state2); + let versioned = VoteStateVersions::new_v3(vote_state2); vote_state::to(&versioned, &mut vote_account2).unwrap(); bank.store_account(&pk2, &vote_account2); let mut vote_state3 = vote_state::from(&vote_account3).unwrap(); vote_state3.root_slot = Some(1); - let versioned = VoteStateVersions::new_current(vote_state3); + let versioned = VoteStateVersions::new_v3(vote_state3); vote_state::to(&versioned, &mut vote_account3).unwrap(); bank.store_account(&pk3, &vote_account3); let mut vote_state4 = vote_state::from(&vote_account4).unwrap(); vote_state4.root_slot = Some(2); - let versioned = VoteStateVersions::new_current(vote_state4); + let versioned = VoteStateVersions::new_v3(vote_state4); vote_state::to(&versioned, &mut vote_account4).unwrap(); bank.store_account(&pk4, &vote_account4); diff --git a/core/src/consensus.rs b/core/src/consensus.rs index d1da8af2a105ca..1bf1b964a72220 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -1788,7 +1788,7 @@ pub mod test { solana_slot_history::SlotHistory, solana_vote::vote_account::VoteAccount, solana_vote_program::vote_state::{ - process_slot_vote_unchecked, Vote, VoteState, VoteStateVersions, MAX_LOCKOUT_HISTORY, + process_slot_vote_unchecked, Vote, VoteStateV3, VoteStateVersions, MAX_LOCKOUT_HISTORY, }, std::{ collections::{HashMap, VecDeque}, @@ -1806,17 +1806,17 @@ pub mod test { .iter() .map(|(lamports, votes)| { let mut account = AccountSharedData::from(Account { - data: vec![0; VoteState::size_of()], + data: vec![0; VoteStateV3::size_of()], lamports: *lamports, owner: solana_vote_program::id(), ..Account::default() }); - let mut vote_state = VoteState::default(); + let mut vote_state = VoteStateV3::default(); for slot in *votes { process_slot_vote_unchecked(&mut vote_state, *slot); } - VoteState::serialize( - &VoteStateVersions::new_current(vote_state), + VoteStateV3::serialize( + &VoteStateVersions::new_v3(vote_state), account.data_as_mut_slice(), ) .expect("serialize state"); @@ -1979,6 +1979,7 @@ pub mod test { let duplicate_ancestor1 = 44; let duplicate_ancestor2 = 45; vote_simulator + .tbft_structs .heaviest_subtree_fork_choice .mark_fork_invalid_candidate(&( duplicate_ancestor1, @@ -1991,6 +1992,7 @@ pub mod test { .hash(), )); vote_simulator + .tbft_structs .heaviest_subtree_fork_choice .mark_fork_invalid_candidate(&( duplicate_ancestor2, @@ -2011,7 +2013,7 @@ pub mod test { total_stake, bank0.epoch_vote_accounts(0).unwrap(), &vote_simulator.latest_validator_votes_for_frozen_banks, - &vote_simulator.heaviest_subtree_fork_choice, + &vote_simulator.tbft_structs.heaviest_subtree_fork_choice, ), SwitchForkDecision::FailedSwitchDuplicateRollback(duplicate_ancestor2) ); @@ -2025,6 +2027,7 @@ pub mod test { } for (i, duplicate_ancestor) in confirm_ancestors.into_iter().enumerate() { vote_simulator + .tbft_structs .heaviest_subtree_fork_choice .mark_fork_valid_candidate(&( duplicate_ancestor, @@ -2044,7 +2047,7 @@ pub mod test { total_stake, bank0.epoch_vote_accounts(0).unwrap(), &vote_simulator.latest_validator_votes_for_frozen_banks, - &vote_simulator.heaviest_subtree_fork_choice, + &vote_simulator.tbft_structs.heaviest_subtree_fork_choice, ); if i == 0 { assert_eq!( @@ -2076,7 +2079,7 @@ pub mod test { total_stake, bank0.epoch_vote_accounts(0).unwrap(), &vote_simulator.latest_validator_votes_for_frozen_banks, - &vote_simulator.heaviest_subtree_fork_choice, + &vote_simulator.tbft_structs.heaviest_subtree_fork_choice, ), SwitchForkDecision::SameFork ); @@ -2091,7 +2094,7 @@ pub mod test { total_stake, bank0.epoch_vote_accounts(0).unwrap(), &vote_simulator.latest_validator_votes_for_frozen_banks, - &vote_simulator.heaviest_subtree_fork_choice, + &vote_simulator.tbft_structs.heaviest_subtree_fork_choice, ), SwitchForkDecision::FailedSwitchThreshold(0, 20000) ); @@ -2108,7 +2111,7 @@ pub mod test { total_stake, bank0.epoch_vote_accounts(0).unwrap(), &vote_simulator.latest_validator_votes_for_frozen_banks, - &vote_simulator.heaviest_subtree_fork_choice, + &vote_simulator.tbft_structs.heaviest_subtree_fork_choice, ), SwitchForkDecision::FailedSwitchThreshold(0, 20000) ); @@ -2125,7 +2128,7 @@ pub mod test { total_stake, bank0.epoch_vote_accounts(0).unwrap(), &vote_simulator.latest_validator_votes_for_frozen_banks, - &vote_simulator.heaviest_subtree_fork_choice, + &vote_simulator.tbft_structs.heaviest_subtree_fork_choice, ), SwitchForkDecision::FailedSwitchThreshold(0, 20000) ); @@ -2142,7 +2145,7 @@ pub mod test { total_stake, bank0.epoch_vote_accounts(0).unwrap(), &vote_simulator.latest_validator_votes_for_frozen_banks, - &vote_simulator.heaviest_subtree_fork_choice, + &vote_simulator.tbft_structs.heaviest_subtree_fork_choice, ), SwitchForkDecision::FailedSwitchThreshold(0, 20000) ); @@ -2161,7 +2164,7 @@ pub mod test { total_stake, bank0.epoch_vote_accounts(0).unwrap(), &vote_simulator.latest_validator_votes_for_frozen_banks, - &vote_simulator.heaviest_subtree_fork_choice, + &vote_simulator.tbft_structs.heaviest_subtree_fork_choice, ), SwitchForkDecision::FailedSwitchThreshold(0, 20000) ); @@ -2178,7 +2181,7 @@ pub mod test { total_stake, bank0.epoch_vote_accounts(0).unwrap(), &vote_simulator.latest_validator_votes_for_frozen_banks, - &vote_simulator.heaviest_subtree_fork_choice, + &vote_simulator.tbft_structs.heaviest_subtree_fork_choice, ), SwitchForkDecision::SwitchProof(Hash::default()) ); @@ -2196,7 +2199,7 @@ pub mod test { total_stake, bank0.epoch_vote_accounts(0).unwrap(), &vote_simulator.latest_validator_votes_for_frozen_banks, - &vote_simulator.heaviest_subtree_fork_choice, + &vote_simulator.tbft_structs.heaviest_subtree_fork_choice, ), SwitchForkDecision::SwitchProof(Hash::default()) ); @@ -2218,7 +2221,7 @@ pub mod test { total_stake, bank0.epoch_vote_accounts(0).unwrap(), &vote_simulator.latest_validator_votes_for_frozen_banks, - &vote_simulator.heaviest_subtree_fork_choice, + &vote_simulator.tbft_structs.heaviest_subtree_fork_choice, ), SwitchForkDecision::FailedSwitchThreshold(0, 20000) ); @@ -2246,7 +2249,7 @@ pub mod test { total_stake, bank0.epoch_vote_accounts(0).unwrap(), &vote_simulator.latest_validator_votes_for_frozen_banks, - &vote_simulator.heaviest_subtree_fork_choice, + &vote_simulator.tbft_structs.heaviest_subtree_fork_choice, ), SwitchForkDecision::FailedSwitchThreshold(0, num_validators * 10000) ); @@ -2262,7 +2265,7 @@ pub mod test { total_stake, bank0.epoch_vote_accounts(0).unwrap(), &vote_simulator.latest_validator_votes_for_frozen_banks, - &vote_simulator.heaviest_subtree_fork_choice, + &vote_simulator.tbft_structs.heaviest_subtree_fork_choice, ), SwitchForkDecision::FailedSwitchThreshold(0, 20000) ); @@ -2295,7 +2298,7 @@ pub mod test { total_stake, bank0.epoch_vote_accounts(0).unwrap(), &vote_simulator.latest_validator_votes_for_frozen_banks, - &vote_simulator.heaviest_subtree_fork_choice, + &vote_simulator.tbft_structs.heaviest_subtree_fork_choice, ), SwitchForkDecision::SwitchProof(Hash::default()) ); @@ -2315,7 +2318,7 @@ pub mod test { total_stake, bank0.epoch_vote_accounts(0).unwrap(), &vote_simulator.latest_validator_votes_for_frozen_banks, - &vote_simulator.heaviest_subtree_fork_choice, + &vote_simulator.tbft_structs.heaviest_subtree_fork_choice, ), SwitchForkDecision::FailedSwitchThreshold(0, 20000) ); @@ -3035,7 +3038,7 @@ pub mod test { total_stake, bank0.epoch_vote_accounts(0).unwrap(), &vote_simulator.latest_validator_votes_for_frozen_banks, - &vote_simulator.heaviest_subtree_fork_choice, + &vote_simulator.tbft_structs.heaviest_subtree_fork_choice, ), SwitchForkDecision::SameFork ); @@ -3050,7 +3053,7 @@ pub mod test { total_stake, bank0.epoch_vote_accounts(0).unwrap(), &vote_simulator.latest_validator_votes_for_frozen_banks, - &vote_simulator.heaviest_subtree_fork_choice, + &vote_simulator.tbft_structs.heaviest_subtree_fork_choice, ), SwitchForkDecision::FailedSwitchThreshold(0, 20000) ); @@ -3066,7 +3069,7 @@ pub mod test { total_stake, bank0.epoch_vote_accounts(0).unwrap(), &vote_simulator.latest_validator_votes_for_frozen_banks, - &vote_simulator.heaviest_subtree_fork_choice, + &vote_simulator.tbft_structs.heaviest_subtree_fork_choice, ), SwitchForkDecision::SwitchProof(Hash::default()) ); @@ -3126,7 +3129,7 @@ pub mod test { total_stake, bank0.epoch_vote_accounts(0).unwrap(), &vote_simulator.latest_validator_votes_for_frozen_banks, - &vote_simulator.heaviest_subtree_fork_choice, + &vote_simulator.tbft_structs.heaviest_subtree_fork_choice, ), SwitchForkDecision::FailedSwitchThreshold(0, 20000) ); @@ -3142,7 +3145,7 @@ pub mod test { total_stake, bank0.epoch_vote_accounts(0).unwrap(), &vote_simulator.latest_validator_votes_for_frozen_banks, - &vote_simulator.heaviest_subtree_fork_choice, + &vote_simulator.tbft_structs.heaviest_subtree_fork_choice, ), SwitchForkDecision::FailedSwitchThreshold(0, 20000) ); @@ -3158,7 +3161,7 @@ pub mod test { total_stake, bank0.epoch_vote_accounts(0).unwrap(), &vote_simulator.latest_validator_votes_for_frozen_banks, - &vote_simulator.heaviest_subtree_fork_choice, + &vote_simulator.tbft_structs.heaviest_subtree_fork_choice, ), SwitchForkDecision::SwitchProof(Hash::default()) ); @@ -3763,7 +3766,7 @@ pub mod test { total_stake, bank0.epoch_vote_accounts(0).unwrap(), &vote_simulator.latest_validator_votes_for_frozen_banks, - &vote_simulator.heaviest_subtree_fork_choice, + &vote_simulator.tbft_structs.heaviest_subtree_fork_choice, ), SwitchForkDecision::FailedSwitchThreshold(0, 20_000) ); @@ -3781,7 +3784,7 @@ pub mod test { total_stake, bank0.epoch_vote_accounts(0).unwrap(), &vote_simulator.latest_validator_votes_for_frozen_banks, - &vote_simulator.heaviest_subtree_fork_choice, + &vote_simulator.tbft_structs.heaviest_subtree_fork_choice, ), SwitchForkDecision::SwitchProof(Hash::default()) ); @@ -3819,7 +3822,7 @@ pub mod test { total_stake, bank0.epoch_vote_accounts(0).unwrap(), &vote_simulator.latest_validator_votes_for_frozen_banks, - &vote_simulator.heaviest_subtree_fork_choice, + &vote_simulator.tbft_structs.heaviest_subtree_fork_choice, ), SwitchForkDecision::FailedSwitchThreshold(0, 20_000) ); @@ -3839,7 +3842,7 @@ pub mod test { total_stake, bank0.epoch_vote_accounts(0).unwrap(), &vote_simulator.latest_validator_votes_for_frozen_banks, - &vote_simulator.heaviest_subtree_fork_choice, + &vote_simulator.tbft_structs.heaviest_subtree_fork_choice, ), SwitchForkDecision::SwitchProof(Hash::default()) ); diff --git a/core/src/consensus/tower1_14_11.rs b/core/src/consensus/tower1_14_11.rs index 522c3660215d06..9914f2fad935ec 100644 --- a/core/src/consensus/tower1_14_11.rs +++ b/core/src/consensus/tower1_14_11.rs @@ -9,7 +9,7 @@ use { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "3MECXPvR1Tq3jRJkS1kCDuXDjqjgAkctaQ2avYnwKGE7") + frozen_abi(digest = "H3PUUxvCCu8MFwoXppzyJenXCJswSude7DiWK49yKKRo") )] #[derive(Clone, Serialize, Deserialize, Debug, PartialEq)] pub struct Tower1_14_11 { diff --git a/core/src/consensus/tower1_7_14.rs b/core/src/consensus/tower1_7_14.rs index 6be66ff74d57ee..a3544bf4c2e4f8 100644 --- a/core/src/consensus/tower1_7_14.rs +++ b/core/src/consensus/tower1_7_14.rs @@ -10,7 +10,7 @@ use { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "FCocsqtauW2QfMz2hXDd5DCwJBw9NBFoq3CokeiDLMqv") + frozen_abi(digest = "FJeuD6UBJe9D8s6iWwianu6KcnJxSozHxDpPQMTrkNJK") )] #[derive(Clone, Serialize, Deserialize, Debug, PartialEq)] pub struct Tower1_7_14 { @@ -41,7 +41,7 @@ pub struct Tower1_7_14 { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "2bgXEjDjQqkHrXtffNA6jusFLNt4rJBMVEsRctYABnn1") + frozen_abi(digest = "8ASxminStCjs2Xcx2b6arJQBENyMV3H62zSoTvsLqFqP") )] #[derive(Default, Clone, Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct SavedTower1_7_14 { diff --git a/core/src/consensus/tower_storage.rs b/core/src/consensus/tower_storage.rs index 534ce829241f8b..0f785eb6f59096 100644 --- a/core/src/consensus/tower_storage.rs +++ b/core/src/consensus/tower_storage.rs @@ -78,7 +78,7 @@ impl From for SavedTowerVersions { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "GqJW8vVvSkSZwTJE6x6MFFhi7kcU6mqst8PF7493h2hk") + frozen_abi(digest = "8T1GVMzNNWcHRQwyzPhFj5nErdazaBe3ZGKQdY7T89Zo") )] #[derive(Default, Clone, Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct SavedTower { @@ -219,155 +219,6 @@ impl TowerStorage for FileTowerStorage { } } -pub struct EtcdTowerStorage { - client: tokio::sync::Mutex, - instance_id: [u8; 8], - runtime: tokio::runtime::Runtime, -} - -pub struct EtcdTlsConfig { - pub domain_name: String, - pub ca_certificate: Vec, - pub identity_certificate: Vec, - pub identity_private_key: Vec, -} - -impl EtcdTowerStorage { - pub fn new, S: AsRef<[E]>>( - endpoints: S, - tls_config: Option, - ) -> Result { - let runtime = tokio::runtime::Builder::new_current_thread() - .enable_io() - .enable_time() - .build() - .unwrap(); - - let client = runtime - .block_on(etcd_client::Client::connect( - endpoints, - tls_config.map(|tls_config| { - etcd_client::ConnectOptions::default().with_tls( - etcd_client::TlsOptions::new() - .domain_name(tls_config.domain_name) - .ca_certificate(etcd_client::Certificate::from_pem( - tls_config.ca_certificate, - )) - .identity(etcd_client::Identity::from_pem( - tls_config.identity_certificate, - tls_config.identity_private_key, - )), - ) - }), - )) - .map_err(Self::etdc_to_tower_error)?; - - Ok(Self { - client: tokio::sync::Mutex::new(client), - instance_id: solana_time_utils::timestamp().to_le_bytes(), - runtime, - }) - } - - fn get_keys(node_pubkey: &Pubkey) -> (String, String) { - let instance_key = format!("{node_pubkey}/instance"); - let tower_key = format!("{node_pubkey}/tower"); - (instance_key, tower_key) - } - - fn etdc_to_tower_error(error: etcd_client::Error) -> TowerError { - TowerError::IoError(io::Error::other(error.to_string())) - } -} - -impl TowerStorage for EtcdTowerStorage { - fn load(&self, node_pubkey: &Pubkey) -> Result { - let (instance_key, tower_key) = Self::get_keys(node_pubkey); - - let txn = etcd_client::Txn::new().and_then(vec![etcd_client::TxnOp::put( - instance_key.clone(), - self.instance_id, - None, - )]); - self.runtime - .block_on(async { self.client.lock().await.txn(txn).await }) - .map_err(|err| { - error!("Failed to acquire etcd instance lock: {err}"); - Self::etdc_to_tower_error(err) - })?; - - let txn = etcd_client::Txn::new() - .when(vec![etcd_client::Compare::value( - instance_key, - etcd_client::CompareOp::Equal, - self.instance_id, - )]) - .and_then(vec![etcd_client::TxnOp::get(tower_key, None)]); - - let response = self - .runtime - .block_on(async { self.client.lock().await.txn(txn).await }) - .map_err(|err| { - error!("Failed to read etcd saved tower: {err}"); - Self::etdc_to_tower_error(err) - })?; - - if !response.succeeded() { - return Err(TowerError::IoError(io::Error::other(format!( - "Lost etcd instance lock for {node_pubkey}" - )))); - } - - for op_response in response.op_responses() { - if let etcd_client::TxnOpResponse::Get(get_response) = op_response { - if let Some(kv) = get_response.kvs().first() { - return bincode::deserialize_from(kv.value()) - .map_err(|e| e.into()) - .and_then(|t: SavedTowerVersions| t.try_into_tower(node_pubkey)); - } - } - } - - // Should never happen... - Err(TowerError::IoError(io::Error::other( - "Saved tower response missing".to_string(), - ))) - } - - fn store(&self, saved_tower: &SavedTowerVersions) -> Result<()> { - let (instance_key, tower_key) = Self::get_keys(&saved_tower.pubkey()); - - let txn = etcd_client::Txn::new() - .when(vec![etcd_client::Compare::value( - instance_key, - etcd_client::CompareOp::Equal, - self.instance_id, - )]) - .and_then(vec![etcd_client::TxnOp::put( - tower_key, - bincode::serialize(&saved_tower)?, - None, - )]); - - let response = self - .runtime - .block_on(async { self.client.lock().await.txn(txn).await }) - .map_err(|err| { - error!("Failed to write etcd saved tower: {err}"); - err - }) - .map_err(Self::etdc_to_tower_error)?; - - if !response.succeeded() { - return Err(TowerError::IoError(io::Error::other(format!( - "Lost etcd instance lock for {}", - saved_tower.pubkey() - )))); - } - Ok(()) - } -} - #[cfg(test)] pub mod test { use { @@ -380,7 +231,7 @@ pub mod test { solana_keypair::Keypair, solana_vote::vote_transaction::VoteTransaction, solana_vote_program::vote_state::{ - BlockTimestamp, LandedVote, Vote, VoteState, VoteState1_14_11, MAX_LOCKOUT_HISTORY, + BlockTimestamp, LandedVote, Vote, VoteState1_14_11, VoteStateV3, MAX_LOCKOUT_HISTORY, }, tempfile::TempDir, }; @@ -390,7 +241,7 @@ pub mod test { let tower_path = TempDir::new().unwrap(); let identity_keypair = Keypair::new(); let node_pubkey = identity_keypair.pubkey(); - let mut vote_state = VoteState::default(); + let mut vote_state = VoteStateV3::default(); vote_state .votes .resize(MAX_LOCKOUT_HISTORY, LandedVote::default()); diff --git a/core/src/forwarding_stage.rs b/core/src/forwarding_stage.rs index b11a7feba36458..7c92b401744d3d 100644 --- a/core/src/forwarding_stage.rs +++ b/core/src/forwarding_stage.rs @@ -12,15 +12,16 @@ use { solana_connection_cache::client_connection::ClientConnection, solana_cost_model::cost_model::CostModel, solana_fee_structure::{FeeBudgetLimits, FeeDetails}, - solana_gossip::{cluster_info::ClusterInfo, contact_info::Protocol}, + solana_gossip::{cluster_info::ClusterInfo, contact_info::Protocol, node::NodeMultihoming}, solana_keypair::Keypair, + solana_net_utils::multihomed_sockets::BindIpAddrs, solana_packet as packet, solana_perf::data_budget::DataBudget, solana_poh::poh_recorder::PohRecorder, solana_quic_definitions::NotifyKeyUpdate, solana_runtime::{ bank::{Bank, CollectorFeeDetails}, - bank_forks::SharableBank, + bank_forks::SharableBanks, }, solana_runtime_transaction::{ runtime_transaction::RuntimeTransaction, transaction_meta::StaticMeta, @@ -58,7 +59,15 @@ mod packet_container; /// * [`TpuClientNextClient`]: Relies on the `tpu-client-next` crate. pub enum ForwardingClientOption<'a> { ConnectionCache(Arc), - TpuClientNext((&'a Keypair, UdpSocket, RuntimeHandle, CancellationToken)), + TpuClientNext( + ( + &'a Keypair, + Box<[UdpSocket]>, + RuntimeHandle, + CancellationToken, + Arc, + ), + ), } /// Value chosen because it was used historically, at some point @@ -125,7 +134,7 @@ pub(crate) fn spawn_forwarding_stage( receiver: Receiver<(BankingPacketBatch, bool)>, client: ForwardingClientOption<'_>, vote_client_udp_socket: UdpSocket, - root_bank: SharableBank, + sharable_banks: SharableBanks, forward_address_getter: ForwardAddressGetter, data_budget: DataBudget, ) -> SpawnForwardingStageResult { @@ -137,9 +146,10 @@ pub(crate) fn spawn_forwarding_stage( let forwarding_stage = ForwardingStage::new( receiver, vote_client, - non_vote_client.clone(), - root_bank, + Box::new([non_vote_client]), + sharable_banks, data_budget, + None, ); SpawnForwardingStageResult { join_handle: Builder::new() @@ -151,43 +161,63 @@ pub(crate) fn spawn_forwarding_stage( } ForwardingClientOption::TpuClientNext(( stake_identity, - tpu_client_socket, + tpu_client_sockets, runtime_handle, cancel, + node_multihoming, )) => { - let non_vote_client = TpuClientNextClient::new( - runtime_handle, - forward_address_getter, - Some(stake_identity), - tpu_client_socket, - cancel, - ); + // Create TPU clients for each socket provided. + // Number of clients is same as number of bind IP addresses. + let non_vote_clients: Box<[TpuClientNextClient]> = tpu_client_sockets + .into_vec() + .into_iter() + .map(|socket| { + TpuClientNextClient::new( + runtime_handle.clone(), + forward_address_getter.clone(), + Some(stake_identity), + socket, + cancel.clone(), + ) + }) + .collect(); let forwarding_stage = ForwardingStage::new( receiver, vote_client, - non_vote_client.clone(), - root_bank, + non_vote_clients.clone(), + sharable_banks, data_budget, + Some(node_multihoming.bind_ip_addrs.clone()), ); SpawnForwardingStageResult { join_handle: Builder::new() .name("solFwdStage".to_string()) .spawn(move || forwarding_stage.run()) .unwrap(), - client_updater: Arc::new(non_vote_client) as Arc, + client_updater: Arc::new(UpdateHandles(non_vote_clients)) + as Arc, } } } } +/// Local struct to be able to update keys on all clients at once +struct UpdateHandles(Box<[TpuClientNextClient]>); +impl NotifyKeyUpdate for UpdateHandles { + fn update_key(&self, key: &Keypair) -> Result<(), Box> { + self.0.iter().try_for_each(|client| client.update_key(key)) + } +} + struct ForwardingStage { receiver: Receiver<(BankingPacketBatch, bool)>, packet_container: PacketContainer, - root_bank: SharableBank, + sharable_banks: SharableBanks, vote_client: VoteClient, - non_vote_client: NonVoteClient, + non_vote_clients: Box<[NonVoteClient]>, data_budget: DataBudget, metrics: ForwardingStageMetrics, + bind_ip_addrs: Option>, } impl @@ -196,25 +226,27 @@ impl fn new( receiver: Receiver<(BankingPacketBatch, bool)>, vote_client: VoteClient, - non_vote_client: NonVoteClient, - root_bank: SharableBank, + non_vote_clients: Box<[NonVoteClient]>, + sharable_banks: SharableBanks, data_budget: DataBudget, + bind_ip_addrs: Option>, ) -> Self { Self { receiver, packet_container: PacketContainer::with_capacity(4 * 4096), - root_bank, - non_vote_client, + sharable_banks, + non_vote_clients, vote_client, data_budget, metrics: ForwardingStageMetrics::default(), + bind_ip_addrs, } } /// Runs `ForwardingStage`'s main loop, to receive, order, and forward packets. fn run(mut self) { loop { - let root_bank = self.root_bank.load(); + let root_bank = self.sharable_banks.root(); if !self.receive_and_buffer(&root_bank) { break; } @@ -236,25 +268,12 @@ impl self.buffer_packet_batches(packet_batches, tpu_vote_batch, bank); // Drain the channel up to timeout - let timed_out = loop { - if now.elapsed() >= TIMEOUT { - break true; - } + while now.elapsed() < TIMEOUT { match self.receiver.try_recv() { Ok((packet_batches, tpu_vote_batch)) => { self.buffer_packet_batches(packet_batches, tpu_vote_batch, bank) } - Err(_) => break false, - } - }; - - // If timeout was reached, prevent backup by draining all - // packets in the channel. - if timed_out { - warn!("ForwardingStage is backed up, dropping packets"); - while let Ok((packet_batch, _)) = self.receiver.try_recv() { - self.metrics.dropped_on_timeout += - packet_batch.iter().map(|b| b.len()).sum::(); + Err(_) => break, } } @@ -344,6 +363,16 @@ impl let mut non_vote_batch = Vec::with_capacity(FORWARD_BATCH_SIZE); let mut vote_batch = Vec::with_capacity(FORWARD_BATCH_SIZE); + // determine the client to use for next batch based on current active interface + // use primary interface bind (index 0) if not in multihoming context. + let active_non_vote_client = { + let active_index = self + .bind_ip_addrs + .as_ref() + .map(|binds| binds.active_index()) + .unwrap_or(0); + &self.non_vote_clients[active_index] + }; // Loop through packets creating batches of packets to forward. while let Some(packet) = self.packet_container.pop_max() { // If it exceeds our data-budget, drop. @@ -369,7 +398,7 @@ impl non_vote_batch.push(packet_data_vec); send_batch_if_full( &mut non_vote_batch, - &self.non_vote_client, + active_non_vote_client, &mut self.metrics.non_votes_forwarded, &mut self.metrics.non_votes_dropped_on_send, ); @@ -391,8 +420,7 @@ impl if !non_vote_batch.is_empty() { let num_non_votes = non_vote_batch.len(); self.metrics.non_votes_forwarded += num_non_votes; - if self - .non_vote_client + if active_non_vote_client .send_transactions_in_batch(non_vote_batch) .is_err() { @@ -722,8 +750,6 @@ struct ForwardingStageMetrics { non_votes_dropped_on_data_budget: usize, non_votes_forwarded: usize, non_votes_dropped_on_send: usize, - - dropped_on_timeout: usize, } impl ForwardingStageMetrics { @@ -803,7 +829,6 @@ impl Default for ForwardingStageMetrics { non_votes_dropped_on_data_budget: 0, non_votes_forwarded: 0, non_votes_dropped_on_send: 0, - dropped_on_timeout: 0, } } } @@ -898,15 +923,16 @@ mod tests { let (_bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&create_genesis_config(1).genesis_config); - let root_bank = bank_forks.read().unwrap().sharable_root_bank(); + let sharable_banks = bank_forks.read().unwrap().sharable_banks(); let vote_mock_client = MockClient::new(); let non_vote_mock_client = MockClient::new(); let mut forwarding_stage = ForwardingStage::new( packet_batch_receiver, vote_mock_client.clone(), - non_vote_mock_client.clone(), - root_bank, + Box::new([non_vote_mock_client.clone()]), + sharable_banks, DataBudget::default(), + None, ); // Send packet batches. @@ -940,7 +966,7 @@ mod tests { .send((vote_packets.clone(), true)) .unwrap(); - let bank = forwarding_stage.root_bank.load(); + let bank = forwarding_stage.sharable_banks.root(); forwarding_stage.receive_and_buffer(&bank); if !packet_batch_sender.is_empty() { forwarding_stage.receive_and_buffer(&bank); diff --git a/core/src/lib.rs b/core/src/lib.rs index 384c7ad68b4273..55460ca7cf216f 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -22,6 +22,7 @@ pub mod drop_bank_service; pub mod fetch_stage; pub mod forwarding_stage; pub mod gen_keys; +mod mock_alpenglow_consensus; pub mod next_leader; pub mod optimistic_confirmation_verifier; pub mod repair; diff --git a/core/src/mock_alpenglow_consensus.rs b/core/src/mock_alpenglow_consensus.rs new file mode 100644 index 00000000000000..f8a0ebdc732a5d --- /dev/null +++ b/core/src/mock_alpenglow_consensus.rs @@ -0,0 +1,1074 @@ +#![allow(clippy::arithmetic_side_effects)] +use { + crate::consensus::Stake, + bytemuck::{Pod, Zeroable}, + crossbeam_channel::{bounded, Receiver, Sender}, + serde::de::DeserializeOwned, + solana_clock::{Slot, DEFAULT_MS_PER_SLOT}, + solana_gossip::{cluster_info::ClusterInfo, epoch_specs::EpochSpecs}, + solana_keypair::Keypair, + solana_packet::{Meta, Packet}, + solana_pubkey::{Pubkey, PUBKEY_BYTES}, + solana_runtime::bank::Bank, + solana_signature::SIGNATURE_BYTES, + solana_signer::Signer, + solana_streamer::{recvmmsg::recv_mmsg, sendmmsg::batch_send}, + std::{ + collections::HashMap, + iter::once, + net::{SocketAddr, UdpSocket}, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, Mutex, + }, + thread::{self, JoinHandle}, + time::{Duration, Instant}, + }, +}; + +// This is a mockup of optimistic Alpenglow voting (no skips) +// Each mock voting round takes 3 slots to complete: +// 1. prep & enable reception +// 2. initiate voting by sending Notarize, +// 3. closing vote window and finalizing stats +// +// This is done to ensure we can capture votes coming earlier or later than +// our own slot start/end times. +// we run VOTES_IN_A_ROW voting rounds every N slots, where N is controlled on-chain. + +/// number of voting rounds +const NUM_VOTE_ROUNDS: Slot = 4; + +/// rough upper bound of number of testnet validators to avoid allocations +const NUM_TESTNET_VALIDATORS: usize = 1024 * 3; + +/// This is a placeholder that is only used for load-testing. +/// This is not representative of the actual alpenglow implementation. +pub(crate) struct MockAlpenglowConsensus { + sender_thread: JoinHandle<()>, // thread that sends packets + listener_thread: JoinHandle<()>, // thread that listens for votes and updates statemachine + runner_thread: JoinHandle<()>, // thread that signals others to perform voting tasks + state: Arc, // internal state of the test for each round + highest_slot: Slot, // highest slot we have observed so far + should_exit: Arc, + // external state + epoch_specs: EpochSpecs, + cluster_info: Arc, + // control of internal threadpool that handles test timings + slot_sender: Option>, +} + +/// Information we hold for individual peers in the test +struct PeerData { + stake: Stake, + address: SocketAddr, + relative_time_of_arrival: [Option; NUM_VOTOR_TYPES], +} + +/// State machine internal state for the mock alpenglow +/// This roughly approximates the actual certificate pool behavior +#[derive(Default, Debug)] +struct AgStateMachine { + block_notarized: bool, + block_finalized: bool, + notarize_stake_collected: Stake, + finalize_stake_collected: Stake, +} + +/// This holds the state for sender and listener threads +/// of the mock alpenglow behind a mutex. Contention on this +/// should be low since there is only 2 threads and one of them +/// only ever does anything exactly 3 times per slot for ~1ms each +struct SharedState { + current_slot_start: Instant, + peers: HashMap, + total_staked: Stake, + current_slot: Slot, + alpenglow_state: AgStateMachine, +} + +type StateArray = [Mutex; NUM_VOTE_ROUNDS as usize]; + +impl SharedState { + fn reset(&mut self) -> HashMap { + let mut peers = HashMap::with_capacity(NUM_TESTNET_VALIDATORS); + std::mem::swap(&mut peers, &mut self.peers); + self.current_slot = 0; + self.total_staked = 0; + self.alpenglow_state = AgStateMachine::default(); + peers + } + + fn new(current_slot: Slot) -> Self { + Self { + current_slot_start: Instant::now(), + peers: HashMap::with_capacity(NUM_TESTNET_VALIDATORS), + current_slot, + total_staked: 0, + alpenglow_state: AgStateMachine::default(), + } + } + + fn available(&self) -> bool { + self.current_slot == 0 + } + + fn is_ready_for_slot(&self, slot: Slot) -> bool { + self.current_slot == slot + } +} + +const ONE_SLOT: Duration = Duration::from_millis(DEFAULT_MS_PER_SLOT); + +fn get_state_for_slot_index(states: &StateArray, slot: Slot) -> &Mutex { + &states[(slot % NUM_VOTE_ROUNDS) as usize] +} + +/// This is just for test, and does not represent actual alpenglow +#[derive(Copy, Clone, Debug)] +#[repr(u64)] +enum VotorMessageType { + Notarize, + // we can glue these since this mock does not implement skips + NotarizeCertificateAndFinalize, + FinalizeCertificate, + // Update NUM_VOTOR_TYPES if changing this +} + +impl TryFrom for VotorMessageType { + type Error = (); + + fn try_from(value: u64) -> Result { + match value { + 0 => Ok(Self::Notarize), + 1 => Ok(Self::NotarizeCertificateAndFinalize), + 2 => Ok(Self::FinalizeCertificate), + _ => Err(()), + } + } +} +const NUM_VOTOR_TYPES: usize = 3; + +/// Header of the mock vote packet. +/// Actual frames on the wire may be longer as +/// configured by the sender. Only the header is signed. +#[repr(C)] +#[derive(Copy, Clone, Debug, Pod, Zeroable)] +struct MockVotePacketHeader { + signature: [u8; SIGNATURE_BYTES], + sender: [u8; PUBKEY_BYTES], + slot_number: Slot, + state: u64, +} + +const MOCK_VOTE_HEADER_SIZE: usize = std::mem::size_of::(); + +/// The actual alpenglow votor packets are all smaller than this, +/// but this is deliberately overtuned to model the worst case. +const MOCK_VOTE_PACKET_SIZE: usize = 512; + +impl MockVotePacketHeader { + fn from_bytes_mut(buf: &mut [u8]) -> &mut Self { + bytemuck::from_bytes_mut::(&mut buf[..MOCK_VOTE_HEADER_SIZE]) + } + fn from_bytes(buf: &[u8]) -> &Self { + bytemuck::from_bytes::(&buf[..MOCK_VOTE_HEADER_SIZE]) + } +} + +/// Max number of slots we can be ahead of the root bank +const MAX_TOWER_HEIGHT: Slot = 32 + 100; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) enum SendCommand { + Notarize(Slot), + NotarizeCertificateAndFinalize(Slot), + FinalizeCertificate(Slot), +} + +impl MockAlpenglowConsensus { + pub(crate) fn new( + alpenglow_socket: UdpSocket, + cluster_info: Arc, + epoch_specs: EpochSpecs, + ) -> Self { + info!("Mock Alpenglow consensus is enabled"); + let socket = Arc::new(alpenglow_socket); + let (command_sender, vote_command_receiver) = bounded(4); + let shared_state = Arc::new(std::array::from_fn(|_| Mutex::new(SharedState::new(0)))); + let should_exit = Arc::new(AtomicBool::new(false)); + + let (slot_sender, slot_receiver) = bounded(1); + let runner_thread = { + let slot_receiver = slot_receiver.clone(); + let command_sender = command_sender.clone(); + let state = shared_state.clone(); + thread::spawn(move || { + Self::runner(slot_receiver, command_sender, state); + }) + }; + + Self { + state: shared_state.clone(), + listener_thread: thread::spawn({ + let shared_state = shared_state.clone(); + let should_exit = should_exit.clone(); + let socket = socket.clone(); + let my_id = cluster_info.id(); + move || { + Self::listener_thread(shared_state, should_exit, my_id, socket, command_sender) + } + }), + sender_thread: thread::spawn({ + let cluster_info = cluster_info.clone(); + move || { + Self::sender_thread( + shared_state, + cluster_info, + socket.clone(), + vote_command_receiver, + ) + } + }), + runner_thread, + should_exit, + epoch_specs, + cluster_info, + highest_slot: 0, + slot_sender: Some(slot_sender), + } + } + + /// prepare to receive votes for the slot indicated + /// This should be called in advance + /// in case we are really late getting shreds + fn prepare_to_receive(&mut self, slot: Slot, slot_start: Instant) -> Result<(), Slot> { + trace!( + "{}: preparing to receive for slot {slot}", + self.cluster_info.id() + ); + let staked_nodes = self.epoch_specs.current_epoch_staked_nodes(); + + let mut state = get_state_for_slot_index(&self.state, slot).lock().unwrap(); + if !state.available() { + return Err(state.current_slot); + } + state.current_slot = slot; + state.current_slot_start = slot_start; + for (peer, &stake) in staked_nodes.iter() { + let Some(ag_addr) = self + .cluster_info + .lookup_contact_info(peer, |ci| ci.alpenglow()) + .flatten() + else { + continue; + }; + state.peers.insert( + *peer, + PeerData { + stake, + address: ag_addr, + relative_time_of_arrival: [None; NUM_VOTOR_TYPES], + }, + ); + state.total_staked += stake; + } + trace!( + "Prepared for slot {slot}, total stake is {}", + state.total_staked + ); + Ok(()) + } + + /// Collects votes and changes states + fn listener_thread( + self_state: Arc, + should_exit: Arc, + my_id: Pubkey, + socket: Arc, + command_sender: Sender, + ) { + socket + .set_read_timeout(Some(Duration::from_secs(1))) + .unwrap(); + trace!("Listener thread started"); + // Set aside enough space to fetch multiple packets from the kernel per syscall + let mut packets: Vec = vec![Packet::default(); 1024]; + loop { + // must wipe all Meta records to reuse the buffer + for p in packets.iter_mut() { + *p.meta_mut() = Meta::default(); + } + + if should_exit.load(Ordering::Relaxed) { + return; + } + // recv_mmsg should timeout in 1 second + let n = match recv_mmsg(&socket, &mut packets) { + // we may have received no packets, in this case we can safely skip the rest + Ok(0) => continue, + Ok(n) => n, + Err(e) => { + match e.kind() { + std::io::ErrorKind::TimedOut | std::io::ErrorKind::WouldBlock => { + 0 // no packets received + } + _ => { + error!( + "Got error {:?} in mock alpenglow RX socket operation, exiting \ + thread", + e.raw_os_error() + ); + return; + } + } + } + }; + + for pkt in packets.iter().take(n) { + if pkt.meta().size < MOCK_VOTE_HEADER_SIZE { + trace!("Packet too small {}", pkt.meta().size); + continue; + } + let sender = SocketAddr::new(pkt.meta().addr, pkt.meta().port); + let Some(pkt_buf) = pkt.data(..) else { + continue; + }; + let vote_pkt = MockVotePacketHeader::from_bytes(pkt_buf); + let pk = Pubkey::new_from_array(vote_pkt.sender); + if vote_pkt.signature != SIGNATURE { + trace!("Sigverify failed"); + continue; + } + + let mut state = get_state_for_slot_index(&self_state, vote_pkt.slot_number) + .lock() + .unwrap(); + + if !state.is_ready_for_slot(vote_pkt.slot_number) { + trace!( + "Packet does not have matching slot number {} != {}", + vote_pkt.slot_number, + state.current_slot + ); + continue; + } + + let elapsed = state.current_slot_start.elapsed(); + + let stake_60_percent = (state.total_staked as f64 * 0.6) as Stake; + let stake_80_percent = (state.total_staked as f64 * 0.8) as Stake; + let Ok(votor_msg) = VotorMessageType::try_from(vote_pkt.state) else { + continue; + }; + let Some(peer_info) = state.peers.get_mut(&pk) else { + continue; + }; + if sender != peer_info.address { + // check if sender socket matches + continue; + } + trace!( + "RX slot {}: {:?} from {}", + vote_pkt.slot_number, + votor_msg, + pk + ); + let toa = &mut peer_info.relative_time_of_arrival[votor_msg as u64 as usize]; + if toa.is_none() { + *toa = Some(elapsed); + } else { + // duplicate packet received, ignore it + trace!("Duplicate packet"); + continue; + } + // keep borrow checker happy + let stake = peer_info.stake; + match votor_msg { + VotorMessageType::Notarize => { + state.alpenglow_state.notarize_stake_collected += stake; + trace!( + "{my_id}:{} of {} Notarize stake collected", + state.alpenglow_state.notarize_stake_collected, + stake_60_percent + ); + if !state.alpenglow_state.block_notarized + && state.alpenglow_state.notarize_stake_collected >= stake_60_percent + { + state.alpenglow_state.block_notarized = true; + trace!( + "{my_id} has notarized slot {} by observing 60% of notar votes", + state.current_slot + ); + let _ = command_sender.try_send( + SendCommand::NotarizeCertificateAndFinalize(state.current_slot), + ); + } + if !state.alpenglow_state.block_finalized + && state.alpenglow_state.notarize_stake_collected >= stake_80_percent + { + state.alpenglow_state.block_finalized = true; + trace!( + "{my_id} has finalized slot {} by observing 80% of notar votes", + state.current_slot + ); + let _ = command_sender + .try_send(SendCommand::FinalizeCertificate(state.current_slot)); + } + } + VotorMessageType::NotarizeCertificateAndFinalize => { + if !state.alpenglow_state.block_notarized { + state.alpenglow_state.block_notarized = true; + trace!( + "{my_id} has notarized slot {} by observing notar certificate", + state.current_slot + ); + let _ = command_sender.try_send( + SendCommand::NotarizeCertificateAndFinalize(state.current_slot), + ); + } + state.alpenglow_state.finalize_stake_collected += stake; + trace!( + "{my_id}:{} of {} Finalize stake collected", + state.alpenglow_state.finalize_stake_collected, + stake_60_percent + ); + if !state.alpenglow_state.block_finalized + && state.alpenglow_state.finalize_stake_collected >= stake_60_percent + { + state.alpenglow_state.block_finalized = true; + trace!( + "{my_id} has finalized slot {} by observing finalize votes", + state.current_slot + ); + let _ = command_sender + .try_send(SendCommand::FinalizeCertificate(state.current_slot)); + } + } + VotorMessageType::FinalizeCertificate => { + if !state.alpenglow_state.block_finalized { + state.alpenglow_state.block_finalized = true; + trace!( + "{my_id} has finalized slot {} by observing finalize certificate", + state.current_slot + ); + let _ = command_sender + .try_send(SendCommand::FinalizeCertificate(state.current_slot)); + } + } + } + } + } + } + + /// Sends mock packets to everyone in the cluster + fn sender_thread( + state: Arc, + cluster_info: Arc, + socket: Arc, + command: Receiver, + ) { + let mut packet_buf = vec![0u8; MOCK_VOTE_PACKET_SIZE]; + let id = cluster_info.id(); + for command in command.iter() { + let (slot, votor_msg) = match command { + SendCommand::Notarize(slot) => (slot, VotorMessageType::Notarize), + SendCommand::NotarizeCertificateAndFinalize(slot) => { + (slot, VotorMessageType::NotarizeCertificateAndFinalize) + } + SendCommand::FinalizeCertificate(slot) => { + (slot, VotorMessageType::FinalizeCertificate) + } + }; + + prep_and_sign_packet( + &mut packet_buf, + slot, + votor_msg, + cluster_info.keypair().as_ref(), + ); + + // prepare addresses to send the packets + let mut send_instructions = Vec::with_capacity(NUM_TESTNET_VALIDATORS); // we have ~2500 validators in testnet + { + let state = get_state_for_slot_index(&state, slot).lock().unwrap(); + // check if our task was aborted, avoid sending if it was. + if !state.is_ready_for_slot(slot) { + return; + } + + for (peer, info) in state.peers.iter() { + send_instructions.push((&packet_buf, info.address)); + trace!( + "{id}: send {votor_msg:?} for slot {slot} to {} for {peer}", + info.address + ); + } + } + // broadcast to everybody at once + let _ = batch_send(&socket, send_instructions); + } + } + + fn check_conditions_to_vote(&mut self, slot: Slot, root_bank: &Bank) -> bool { + // ensure we do not start process for a slot which is "in the past" + if slot <= self.highest_slot { + trace!( + "Skipping AG logic for slot {slot}, current highest slot is {}", + self.highest_slot + ); + return false; + } + self.highest_slot = slot; + + let Some(config) = get_test_config_from_account::(root_bank) else { + trace!( + "Skipping AG logic for slot {slot}, onchain config is not available {}", + self.highest_slot + ); + return false; // no config is available => test can not run + }; + let interval = config.test_interval_slots as u64; + if interval <= NUM_VOTE_ROUNDS + 1 { + trace!("Alpenglow voting is disabled",); + return false; + } + + let root_slot = root_bank.slot(); + // If we fall too far behind and can not root banks, engage safety latch to stop the test + // and keep it stopped no matter what the config says + if root_slot + MAX_TOWER_HEIGHT < slot { + error!( + "root slot ({root_slot}) is too far behind vote slot ({slot}), test will not run", + ); + return false; + } + + slot % interval == 0 + } + + pub(crate) fn signal_new_slot(&mut self, slot: Slot, root_bank: &Bank) { + if !self.check_conditions_to_vote(slot, root_bank) { + return; + } + { + let mut slot_start = Instant::now(); + for s in slot..slot + NUM_VOTE_ROUNDS { + if self.prepare_to_receive(s, slot_start).is_err() { + error!("Can not initiate mock voting, slot {s} was not released"); + datapoint_info!("mock_alpenglow", ("runner_stuck", 2, i64), ("slot", s, i64)); + } + slot_start += ONE_SLOT; + } + } + + if let Some(slot_sender) = self.slot_sender.as_ref() { + if slot_sender.try_send(slot).is_err() { + error!("Can not initiate mock voting, worker is busy"); + datapoint_info!( + "mock_alpenglow", + ("runner_stuck", 1, i64), + ("slot", slot, i64) + ); + } + } + } + + /// Runs test for 4 slots in a row when new slot index + /// is sent over slot_receiver channel + fn runner( + slot_receiver: Receiver, + command_sender: Sender, + state: Arc, + ) { + for start_slot in slot_receiver.iter() { + let slot_range = start_slot..(start_slot + NUM_VOTE_ROUNDS); + let vote_slots = slot_range.clone().map(Some).chain(once(None)); + let report_slots = once(None).chain(slot_range.map(Some)); + for (vote_slot, report_slot) in vote_slots.zip(report_slots) { + // we get activated 1 slot in advance to capture votes coming + // earlier than we have finished replay + std::thread::sleep(ONE_SLOT); + if let Some(slot) = vote_slot { + trace!("Starting voting in slot {slot}"); + let _ = command_sender.send(SendCommand::Notarize(slot)); + } + if let Some(slot) = report_slot { + // collect stats from the previous slot's voting + let (peers, total_staked) = { + let mut state_for_slot_index = + get_state_for_slot_index(&state, slot).lock().unwrap(); + let state_slot = state_for_slot_index.current_slot; + let total_staked = state_for_slot_index.total_staked; + let peers = state_for_slot_index.reset(); + // check if state is for correct slot to not report garbage + if state_slot != slot { + continue; + } + (peers, total_staked) + }; + report_collected_votes(peers, total_staked, slot); + } + } + } + } + + pub(crate) fn join(mut self) -> thread::Result<()> { + self.should_exit.store(true, Ordering::Relaxed); + drop(self.slot_sender.take()); // drop slot_sender to cause runners to terminate + self.listener_thread.join()?; // this exits because of the should_exit flag we have set + self.runner_thread.join()?; // this exits because slot_sender is dropped + self.sender_thread.join() + } +} + +fn prep_and_sign_packet( + packet_buf: &mut [u8], + slot: Slot, + state: VotorMessageType, + keypair: &Keypair, +) { + // prepare the packet to send and sign it + { + let pkt = MockVotePacketHeader::from_bytes_mut(packet_buf); + pkt.slot_number = slot; + pkt.sender = *keypair.pubkey().as_array(); + pkt.signature = [0; SIGNATURE_BYTES]; + pkt.state = state as u64; + } + { + let pkt = MockVotePacketHeader::from_bytes_mut(packet_buf); + pkt.signature = SIGNATURE; + } +} + +const SIGNATURE: [u8; SIGNATURE_BYTES] = [7u8; SIGNATURE_BYTES]; + +fn report_collected_votes(peers: HashMap, total_staked: Stake, slot: Slot) { + trace!("Reporting statistics for slot {slot}"); + let (total_voted_nodes, stake_weighted_delay, percent_collected) = + compute_stake_weighted_means(&peers, total_staked); + datapoint_info!( + "mock_alpenglow", + ("total_peers", peers.len(), f64), + ("slot", slot, i64), + ("packets_collected_notarize", total_voted_nodes[0], f64), + ( + "percent_stake_collected_notarize", + percent_collected[0], + f64 + ), + ("weighted_delay_ms_notarize", stake_weighted_delay[0], f64), + ("packets_collected_notarize_cert", total_voted_nodes[1], f64), + ( + "percent_stake_collected_notarize_cert", + percent_collected[1], + f64 + ), + ( + "weighted_delay_ms_notarize_cert", + stake_weighted_delay[1], + f64 + ), + ("packets_collected_finalize_cert", total_voted_nodes[2], f64), + ( + "percent_stake_collected_finalize_cert", + percent_collected[2], + f64 + ), + ( + "weighted_delay_ms_finalize_cert", + stake_weighted_delay[2], + f64 + ), + ); +} + +/// Computes the vote transmission KPIs for a given slot split +/// out by votor message type. These returned KPIs are: +/// (total messages received, stake-weighted vote delays, +/// percent of stake we received a message from) +fn compute_stake_weighted_means( + peers: &HashMap, + total_staked: u64, +) -> ( + [usize; NUM_VOTOR_TYPES], + [f64; NUM_VOTOR_TYPES], + [f64; NUM_VOTOR_TYPES], +) { + let mut total_voted_stake: [Stake; NUM_VOTOR_TYPES] = [0; NUM_VOTOR_TYPES]; + let mut total_voted_nodes: [usize; NUM_VOTOR_TYPES] = [0; NUM_VOTOR_TYPES]; + let mut total_delay_ms = [0u128; NUM_VOTOR_TYPES]; + for (_pubkey, peer_data) in peers.iter() { + for i in 0..NUM_VOTOR_TYPES { + let Some(rel_toa) = peer_data.relative_time_of_arrival[i] else { + continue; + }; + total_voted_stake[i] += peer_data.stake; + total_voted_nodes[i] += 1; + // clamping the actual observed ToA to 800 ms to prevent outliers from + // skewing the dataset too much. + total_delay_ms[i] += rel_toa.as_millis().clamp(0, 800) * peer_data.stake as u128; + } + } + + let mut stake_weighted_delay = [0f64; NUM_VOTOR_TYPES]; + let mut percent_collected = [0f64; NUM_VOTOR_TYPES]; + + for i in 0..NUM_VOTOR_TYPES { + if total_voted_stake[i] > 0 { + stake_weighted_delay[i] = total_delay_ms[i] as f64 / total_voted_stake[i] as f64; + } + percent_collected[i] = 100.0 * total_voted_stake[i] as f64 / total_staked as f64; + + info!( + "{:?}: got {} % of total stake collected, stake-weighted delay is {}ms", + VotorMessageType::try_from(i as u64).unwrap(), // this unwrap is ok since i is in static range + percent_collected[i], + stake_weighted_delay[i] + ); + } + (total_voted_nodes, stake_weighted_delay, percent_collected) +} + +// Pubkey for the account that is used to control the test via on-chain state +mod control_pubkey { + solana_pubkey::declare_id!("9PsiyXopc2M9DMEmsEeafNHHHAUmPKe9mHYgrk6fHPyx"); +} + +/// Actual on-chain state that controls the mock alpenglow test +#[derive(Serialize, Deserialize, Debug, Default)] // Serialize is needed for tests only +#[repr(C)] +pub(crate) struct TestConfig { + _version: u8, // This is part of Record program header + _authority: [u8; 32], // This is part of Record program header + test_interval_slots: u16, // 0 here means test is disabled + _packet_size: u16, + _future_use: [u8; 16], +} + +///Parse an account's content, keeping it in cache. +fn get_test_config_from_account(bank: &Bank) -> Option { + let data = bank + .accounts() + .accounts_db + .load_account_with(&bank.ancestors, &control_pubkey::ID, true)? + .0; + data.deserialize_data().ok() +} + +#[cfg(test)] +mod tests { + use { + crate::mock_alpenglow_consensus::{ + compute_stake_weighted_means, get_state_for_slot_index, prep_and_sign_packet, + MockAlpenglowConsensus, PeerData, SendCommand, SharedState, StateArray, TestConfig, + VotorMessageType, MOCK_VOTE_HEADER_SIZE, MOCK_VOTE_PACKET_SIZE, NUM_VOTOR_TYPES, + }, + crossbeam_channel::bounded, + solana_clock::Slot, + solana_keypair::Keypair, + solana_net_utils::sockets::bind_to_localhost_unique, + solana_pubkey::Pubkey, + solana_signer::Signer, + std::{ + collections::HashMap, + net::UdpSocket, + sync::{atomic::AtomicBool, Arc, Mutex}, + thread::sleep, + time::{Duration, Instant}, + }, + }; + + #[test] + fn test_record_size() { + assert_eq!( + bincode::serialized_size(&TestConfig::default()).unwrap(), + 53 + ); + } + + #[test] + fn test_mock_alpenglow_statemachine() { + let test_timeout = Duration::from_secs(3); + let max_slots = 5; + solana_logger::setup_with("trace"); + let num_nodes = 10; + let keypairs: Vec = (0..num_nodes).map(|_| Keypair::new()).collect(); + let peers: Vec<(Pubkey, UdpSocket)> = keypairs + .iter() + .map(|kp| (kp.pubkey(), bind_to_localhost_unique().unwrap())) + .collect(); + + let socket = Arc::new(peers[0].1.try_clone().unwrap()); + let my_id = keypairs[0].pubkey(); + let (command_sender, vote_command_receiver) = bounded(4); + let shared_state = Arc::new(std::array::from_fn(|_| Mutex::new(SharedState::new(0)))); + let should_exit = Arc::new(AtomicBool::new(false)); + + let mut packet_tx_buf = [0u8; MOCK_VOTE_PACKET_SIZE]; + std::thread::scope(|scope| { + scope.spawn(|| { + MockAlpenglowConsensus::listener_thread( + shared_state.clone(), + should_exit.clone(), + my_id, + socket, + command_sender, + ) + }); + //make sure test terminates listener thread even if we panic + scope.spawn(|| { + for _ in 0..max_slots { + if should_exit.load(std::sync::atomic::Ordering::Relaxed) { + break; + } + sleep(test_timeout); + } + should_exit.store(true, std::sync::atomic::Ordering::Relaxed); + }); + + let slot = 1; // fast finalize + debug!("Slot {slot} starting"); + let peers_map = make_peer_map(peers.as_slice()); + mock_prep_rx(&shared_state, slot, peers_map); + // make sure initial state is correct + { + let slot_state = get_state_for_slot_index(&shared_state, slot) + .lock() + .unwrap(); + assert_eq!(slot_state.alpenglow_state.notarize_stake_collected, 0); + assert!(!slot_state.alpenglow_state.block_notarized); + assert!(!slot_state.alpenglow_state.block_finalized); + } + + sleep(Duration::from_millis(1)); + // make sure we produce NotarizeCert when getting 60% of stake + for p in 1..=6 { + send_packet( + p, + VotorMessageType::Notarize, + slot, + &keypairs, + &peers, + &mut packet_tx_buf, + ); + } + + // wait for the broadcasts + let cmd = vote_command_receiver.recv_timeout(test_timeout).unwrap(); + assert_eq!(cmd, SendCommand::NotarizeCertificateAndFinalize(slot)); + { + let slot_state = get_state_for_slot_index(&shared_state, slot) + .lock() + .unwrap(); + let peerdata = slot_state.peers.get(&peers[1].0).unwrap(); + assert!(peerdata.relative_time_of_arrival[0].unwrap().as_millis() > 0); + assert!(peerdata.relative_time_of_arrival[0].unwrap() < test_timeout); + assert!(peerdata.relative_time_of_arrival[1].is_none()); + assert!(peerdata.relative_time_of_arrival[2].is_none()); + assert_eq!(slot_state.alpenglow_state.notarize_stake_collected, 6); + assert!(slot_state.alpenglow_state.block_notarized); + assert!(!slot_state.alpenglow_state.block_finalized); + } + sleep(Duration::from_millis(1)); + // make sure we produce FinalizeCert when getting 60% of stake sending Finalize + for p in 1..=6 { + send_packet( + p, + VotorMessageType::NotarizeCertificateAndFinalize, + slot, + &keypairs, + &peers, + &mut packet_tx_buf, + ); + } + // wait for the broadcast + let cmd = vote_command_receiver.recv_timeout(test_timeout).unwrap(); + assert_eq!(cmd, SendCommand::FinalizeCertificate(slot)); + { + let slot_state = get_state_for_slot_index(&shared_state, slot) + .lock() + .unwrap(); + let peerdata = slot_state.peers.get(&peers[1].0).unwrap(); + assert!(peerdata.relative_time_of_arrival[1].unwrap().as_millis() > 0); + assert!(peerdata.relative_time_of_arrival[1].unwrap() < test_timeout); + assert!(peerdata.relative_time_of_arrival[2].is_none()); + assert_eq!(slot_state.alpenglow_state.finalize_stake_collected, 6); + assert!(slot_state.alpenglow_state.block_finalized); + let (total_voted_nodes, stake_weighted_delay, _percent_collected) = + compute_stake_weighted_means(&slot_state.peers, peers.len() as u64); + assert_eq!(total_voted_nodes[0], 6); + assert_eq!(total_voted_nodes[1], 6); + assert!(stake_weighted_delay[0] < stake_weighted_delay[1]); + assert_eq!(stake_weighted_delay[2], 0.0); + } + // new slot new pattern (slow finalize) + let slot = slot + 1; + debug!("Slot {slot} starting"); + let peers_map = make_peer_map(peers.as_slice()); + mock_prep_rx(&shared_state, slot, peers_map); + + // make sure we do not NotarizeCert when getting Notar votes + for p in 1..=5 { + send_packet( + p, + VotorMessageType::Notarize, + slot, + &keypairs, + &peers, + &mut packet_tx_buf, + ); + } + sleep(Duration::from_millis(1)); + { + let slot_state = get_state_for_slot_index(&shared_state, slot) + .lock() + .unwrap(); + assert!(!slot_state.alpenglow_state.block_notarized); + assert!(!slot_state.alpenglow_state.block_finalized); + } + + // now we get a couple of notarize certificates + for p in 3..=5 { + send_packet( + p, + VotorMessageType::NotarizeCertificateAndFinalize, + slot, + &keypairs, + &peers, + &mut packet_tx_buf, + ); + } + + // wait for the broadcasts + let cmd = vote_command_receiver.recv_timeout(test_timeout).unwrap(); + assert_eq!(cmd, SendCommand::NotarizeCertificateAndFinalize(slot)); + { + let slot_state = get_state_for_slot_index(&shared_state, slot) + .lock() + .unwrap(); + assert!(slot_state.alpenglow_state.block_notarized); + assert!(!slot_state.alpenglow_state.block_finalized); + } + // and the rest of Notarize votes + for p in 6..=9 { + send_packet( + p, + VotorMessageType::Notarize, + slot, + &keypairs, + &peers, + &mut packet_tx_buf, + ); + } + // wait for the broadcast + let cmd = vote_command_receiver.recv_timeout(test_timeout).unwrap(); + assert_eq!(cmd, SendCommand::FinalizeCertificate(slot)); + { + let slot_state = get_state_for_slot_index(&shared_state, slot) + .lock() + .unwrap(); + assert!(slot_state.alpenglow_state.notarize_stake_collected >= 8); + assert!(slot_state.alpenglow_state.block_notarized); + assert!(slot_state.alpenglow_state.block_finalized); + } + + // epic packet loss we only see certs + let slot = slot + 1; + + debug!("Slot {slot} starting"); + let peers_map = make_peer_map(peers.as_slice()); + mock_prep_rx(&shared_state, slot, peers_map); + + // now we get a notarize certificate + send_packet( + 3, + VotorMessageType::NotarizeCertificateAndFinalize, + slot, + &keypairs, + &peers, + &mut packet_tx_buf, + ); + let cmd = vote_command_receiver.recv_timeout(test_timeout).unwrap(); + assert_eq!(cmd, SendCommand::NotarizeCertificateAndFinalize(slot)); + + { + let slot_state = get_state_for_slot_index(&shared_state, slot) + .lock() + .unwrap(); + assert_eq!(slot_state.alpenglow_state.notarize_stake_collected, 0); + assert_eq!(slot_state.alpenglow_state.finalize_stake_collected, 1); + assert!(slot_state.alpenglow_state.block_notarized); + assert!(!slot_state.alpenglow_state.block_finalized); + } + // and a Finalize cert + send_packet( + 6, + VotorMessageType::FinalizeCertificate, + slot, + &keypairs, + &peers, + &mut packet_tx_buf, + ); + let cmd = vote_command_receiver.recv_timeout(test_timeout).unwrap(); + assert_eq!(cmd, SendCommand::FinalizeCertificate(slot)); + { + let slot_state = get_state_for_slot_index(&shared_state, slot) + .lock() + .unwrap(); + assert_eq!(slot_state.alpenglow_state.notarize_stake_collected, 0); + assert_eq!(slot_state.alpenglow_state.finalize_stake_collected, 1); + assert!(slot_state.alpenglow_state.block_notarized); + assert!(slot_state.alpenglow_state.block_finalized); + } + assert!( + slot <= max_slots, + "max_slots should match actual test length to prevent CI from flaking" + ); + should_exit.store(true, std::sync::atomic::Ordering::Relaxed); + }); + } + + fn send_packet( + from_peer: usize, + votor_message: VotorMessageType, + slot: u64, + keypairs: &[Keypair], + peers: &[(Pubkey, UdpSocket)], + packet_buf: &mut [u8], + ) { + prep_and_sign_packet(packet_buf, slot, votor_message, &keypairs[from_peer]); + peers[from_peer] + .1 + .send_to( + &packet_buf[0..MOCK_VOTE_HEADER_SIZE], + peers[0].1.local_addr().unwrap(), + ) + .unwrap(); + } + + fn mock_prep_rx(state: &StateArray, slot: Slot, peer_map: HashMap) { + let mut state = get_state_for_slot_index(state, slot).lock().unwrap(); + state.reset(); + state.current_slot = slot; + state.current_slot_start = Instant::now(); + state.total_staked = peer_map.len() as u64; + state.peers = peer_map; + } + + fn make_peer_map(sockets: &[(Pubkey, UdpSocket)]) -> HashMap { + let mut result = HashMap::new(); + for (peer, socket) in sockets { + result.insert( + *peer, + PeerData { + stake: 1, + address: socket.local_addr().unwrap(), + relative_time_of_arrival: [None; NUM_VOTOR_TYPES], + }, + ); + } + result + } +} diff --git a/core/src/repair/ancestor_hashes_service.rs b/core/src/repair/ancestor_hashes_service.rs index 340bc33db18db7..01e3a23d79d74b 100644 --- a/core/src/repair/ancestor_hashes_service.rs +++ b/core/src/repair/ancestor_hashes_service.rs @@ -20,7 +20,7 @@ use { crossbeam_channel::{unbounded, Receiver, RecvTimeoutError, Sender}, dashmap::{mapref::entry::Entry::Occupied, DashMap}, solana_clock::{Slot, DEFAULT_MS_PER_SLOT}, - solana_genesis_config::ClusterType, + solana_cluster_type::ClusterType, solana_gossip::{cluster_info::ClusterInfo, contact_info::Protocol, ping_pong::Pong}, solana_keypair::{signable::Signable, Keypair}, solana_ledger::blockstore::Blockstore, @@ -190,7 +190,6 @@ impl AncestorHashesService { ancestor_hashes_response_quic_receiver, PacketFlags::REPAIR, response_sender, - Recycler::default(), exit, ) }) @@ -606,7 +605,7 @@ impl AncestorHashesService { let serve_repair = { ServeRepair::new( repair_info.cluster_info.clone(), - repair_info.bank_forks.read().unwrap().sharable_root_bank(), + repair_info.bank_forks.read().unwrap().sharable_banks(), repair_info.repair_whitelist.clone(), Box::new(StandardRepairHandler::new(blockstore)), ) @@ -1175,7 +1174,7 @@ mod test { #[test] fn test_ancestor_hashes_service_find_epoch_slots_frozen_dead_slots() { let vote_simulator = VoteSimulator::new(3); - let cluster_slots = ClusterSlots::default(); + let cluster_slots = ClusterSlots::default_for_tests(); let mut dead_slot_pool = HashSet::new(); let mut repairable_dead_slot_pool = HashSet::new(); let root_bank = vote_simulator.bank_forks.read().unwrap().root_bank(); @@ -1272,11 +1271,7 @@ mod test { let responder_serve_repair = { ServeRepair::new( Arc::new(cluster_info), - vote_simulator - .bank_forks - .read() - .unwrap() - .sharable_root_bank(), + vote_simulator.bank_forks.read().unwrap().sharable_banks(), Arc::>>::default(), // repair whitelist Box::new(StandardRepairHandler::new(blockstore.clone())), ) @@ -1383,7 +1378,7 @@ mod test { let requester_serve_repair = { ServeRepair::new( requester_cluster_info.clone(), - bank_forks.read().unwrap().sharable_root_bank(), + bank_forks.read().unwrap().sharable_banks(), repair_whitelist.clone(), Box::new(StandardRepairHandler::new(blockstore)), ) @@ -1392,7 +1387,7 @@ mod test { let repair_info = RepairInfo { bank_forks, cluster_info: requester_cluster_info, - cluster_slots: Arc::new(ClusterSlots::default()), + cluster_slots: Arc::new(ClusterSlots::default_for_tests()), epoch_schedule, ancestor_duplicate_slots_sender, repair_validators: None, diff --git a/core/src/repair/cluster_slot_state_verifier.rs b/core/src/repair/cluster_slot_state_verifier.rs index 0cae312aeb110a..b8542bb4c70290 100644 --- a/core/src/repair/cluster_slot_state_verifier.rs +++ b/core/src/repair/cluster_slot_state_verifier.rs @@ -1584,7 +1584,7 @@ mod test { let (vote_simulator, blockstore) = setup_forks_from_tree(forks, 1, None); let descendants = vote_simulator.bank_forks.read().unwrap().descendants(); InitialState { - heaviest_subtree_fork_choice: vote_simulator.heaviest_subtree_fork_choice, + heaviest_subtree_fork_choice: vote_simulator.tbft_structs.heaviest_subtree_fork_choice, progress: vote_simulator.progress, descendants, bank_forks: vote_simulator.bank_forks, diff --git a/core/src/repair/repair_handler.rs b/core/src/repair/repair_handler.rs index de18d345515eb5..581e93b96da0ce 100644 --- a/core/src/repair/repair_handler.rs +++ b/core/src/repair/repair_handler.rs @@ -18,7 +18,7 @@ use { }, solana_perf::packet::{Packet, PacketBatch, PacketBatchRecycler, PinnedPacketBatch}, solana_pubkey::Pubkey, - solana_runtime::bank_forks::SharableBank, + solana_runtime::bank_forks::SharableBanks, std::{ collections::HashSet, net::SocketAddr, @@ -150,12 +150,12 @@ impl RepairHandlerType { &self, blockstore: Arc, cluster_info: Arc, - sharable_root_bank: SharableBank, + sharable_banks: SharableBanks, serve_repair_whitelist: Arc>>, ) -> ServeRepair { ServeRepair::new( cluster_info, - sharable_root_bank, + sharable_banks, serve_repair_whitelist, self.to_handler(blockstore), ) diff --git a/core/src/repair/repair_response.rs b/core/src/repair/repair_response.rs index e78864c1e9e66e..be4c8dd5d045b1 100644 --- a/core/src/repair/repair_response.rs +++ b/core/src/repair/repair_response.rs @@ -49,7 +49,7 @@ mod test { solana_keypair::Keypair, solana_ledger::{ shred::Shredder, - sigverify_shreds::{verify_shred_cpu, LruCache}, + sigverify_shreds::{verify_shred_cpu, LruCache, SlotPubkeys}, }, solana_packet::PacketFlags, solana_signer::Signer, @@ -76,14 +76,14 @@ mod test { .unwrap(); packet.meta_mut().flags |= PacketFlags::REPAIR; - let leader_slots = HashMap::from([(slot, keypair.pubkey())]); + let leader_slots: SlotPubkeys = [(slot, keypair.pubkey())].into_iter().collect(); assert!(verify_shred_cpu((&packet).into(), &leader_slots, &cache)); let wrong_keypair = Keypair::new(); - let leader_slots = HashMap::from([(slot, wrong_keypair.pubkey())]); + let leader_slots: SlotPubkeys = [(slot, wrong_keypair.pubkey())].into_iter().collect(); assert!(!verify_shred_cpu((&packet).into(), &leader_slots, &cache)); - let leader_slots = HashMap::new(); + let leader_slots: SlotPubkeys = HashMap::default(); assert!(!verify_shred_cpu((&packet).into(), &leader_slots, &cache)); } diff --git a/core/src/repair/repair_service.rs b/core/src/repair/repair_service.rs index c500fa351437d5..72e6335ae82228 100644 --- a/core/src/repair/repair_service.rs +++ b/core/src/repair/repair_service.rs @@ -35,7 +35,7 @@ use { solana_pubkey::Pubkey, solana_runtime::{ bank::Bank, - bank_forks::{BankForks, SharableBank}, + bank_forks::{BankForks, SharableBanks}, }, solana_streamer::sendmmsg::{batch_send, SendPktsError}, solana_time_utils::timestamp, @@ -420,7 +420,7 @@ impl RepairServiceChannels { } struct RepairTracker { - root_bank: SharableBank, + sharable_banks: SharableBanks, repair_weight: RepairWeight, serve_repair: ServeRepair, repair_metrics: RepairMetrics, @@ -693,7 +693,7 @@ impl RepairService { popular_pruned_forks_sender, } = repair_channels; let RepairTracker { - root_bank, + sharable_banks, repair_weight, serve_repair, repair_metrics, @@ -701,7 +701,7 @@ impl RepairService { popular_pruned_forks_requests, outstanding_repairs, } = repair_tracker; - let root_bank = root_bank.load(); + let root_bank = sharable_banks.root(); Self::update_weighting_heuristic( blockstore, @@ -751,15 +751,15 @@ impl RepairService { repair_info: RepairInfo, outstanding_requests: &RwLock, ) { - let root_bank = repair_info.bank_forks.read().unwrap().sharable_root_bank(); - let root_bank_slot = root_bank.load().slot(); + let sharable_banks = repair_info.bank_forks.read().unwrap().sharable_banks(); + let root_bank_slot = sharable_banks.root().slot(); let mut repair_tracker = RepairTracker { - root_bank, + sharable_banks, repair_weight: RepairWeight::new(root_bank_slot), serve_repair: { ServeRepair::new( repair_info.cluster_info.clone(), - repair_info.bank_forks.read().unwrap().sharable_root_bank(), + repair_info.bank_forks.read().unwrap().sharable_banks(), repair_info.repair_whitelist.clone(), Box::new(StandardRepairHandler::new(blockstore.clone())), ) @@ -1641,13 +1641,13 @@ mod test { let bank_forks = BankForks::new_rw_arc(bank); let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap()); - let cluster_slots = ClusterSlots::default(); + let cluster_slots = ClusterSlots::default_for_tests(); let cluster_info = Arc::new(new_test_cluster_info()); let identity_keypair = cluster_info.keypair().clone(); let serve_repair = { ServeRepair::new( cluster_info, - bank_forks.read().unwrap().sharable_root_bank(), + bank_forks.read().unwrap().sharable_banks(), Arc::new(RwLock::new(HashSet::default())), Box::new(StandardRepairHandler::new(blockstore.clone())), ) @@ -1749,7 +1749,7 @@ mod test { let serve_repair = { ServeRepair::new( cluster_info.clone(), - bank_forks.read().unwrap().sharable_root_bank(), + bank_forks.read().unwrap().sharable_banks(), Arc::new(RwLock::new(HashSet::default())), Box::new(StandardRepairHandler::new(blockstore)), ) @@ -1759,7 +1759,7 @@ mod test { // Signal that this peer has confirmed the dead slot, and is thus // a valid target for repair let dead_slot = 9; - let cluster_slots = ClusterSlots::default(); + let cluster_slots = ClusterSlots::default_for_tests(); cluster_slots.fake_epoch_info_for_tests(HashMap::from([(*valid_repair_peer.pubkey(), 42)])); cluster_slots.insert_node_id(dead_slot, *valid_repair_peer.pubkey()); cluster_info.insert_info(valid_repair_peer); diff --git a/core/src/repair/repair_weighted_traversal.rs b/core/src/repair/repair_weighted_traversal.rs index 97100d4e479ba1..6ffc02071a5b1b 100644 --- a/core/src/repair/repair_weighted_traversal.rs +++ b/core/src/repair/repair_weighted_traversal.rs @@ -293,7 +293,7 @@ pub mod test { &keypair, &[], true, - Some(Hash::default()), + Hash::default(), last_shred as u32, last_shred as u32, &reed_solomon_cache, diff --git a/core/src/repair/serve_repair.rs b/core/src/repair/serve_repair.rs index 06e71cb7e05c5e..9123f262b9b1a4 100644 --- a/core/src/repair/serve_repair.rs +++ b/core/src/repair/serve_repair.rs @@ -25,7 +25,7 @@ use { Rng, }, solana_clock::Slot, - solana_genesis_config::ClusterType, + solana_cluster_type::ClusterType, solana_gossip::{ cluster_info::{ClusterInfo, ClusterInfoError}, contact_info::{ContactInfo, Protocol}, @@ -41,7 +41,7 @@ use { packet::{Packet, PacketBatch, PacketBatchRecycler, PinnedPacketBatch}, }, solana_pubkey::{Pubkey, PUBKEY_BYTES}, - solana_runtime::bank_forks::SharableBank, + solana_runtime::bank_forks::SharableBanks, solana_signature::{Signature, SIGNATURE_BYTES}, solana_signer::Signer, solana_streamer::{ @@ -229,7 +229,7 @@ type PingCache = ping_pong::PingCache; #[cfg_attr( feature = "frozen-abi", derive(AbiEnumVisitor, AbiExample), - frozen_abi(digest = "9KN64WUT7XDYj9zZopS1hztGyAP9y4N4QznsyC4mqsGs") + frozen_abi(digest = "fFcqrZWZX4WcorTUxfMCVWeh2QcwamXKdLTzsDj58Kn") )] #[derive(Debug, Deserialize, Serialize)] pub enum RepairProtocol { @@ -338,7 +338,7 @@ impl RepairProtocol { pub struct ServeRepair { cluster_info: Arc, - root_bank: SharableBank, + sharable_banks: SharableBanks, repair_whitelist: Arc>>, repair_handler: Box, } @@ -401,13 +401,13 @@ struct RepairRequestWithMeta { impl ServeRepair { pub fn new( cluster_info: Arc, - sharable_root_bank: SharableBank, + sharable_banks: SharableBanks, repair_whitelist: Arc>>, repair_handler: Box, ) -> Self { Self { cluster_info, - root_bank: sharable_root_bank, + sharable_banks, repair_whitelist, repair_handler, } @@ -424,7 +424,7 @@ impl ServeRepair { let repair_handler = Box::new(StandardRepairHandler::new(blockstore)); Self::new( cluster_info, - bank_forks.read().unwrap().sharable_root_bank(), + bank_forks.read().unwrap().sharable_banks(), repair_whitelist, repair_handler, ) @@ -662,7 +662,7 @@ impl ServeRepair { let mut total_requests = requests.len(); let socket_addr_space = *self.cluster_info.socket_addr_space(); - let root_bank = self.root_bank.load(); + let root_bank = self.sharable_banks.root(); let epoch_staked_nodes = root_bank.epoch_staked_nodes(root_bank.epoch()); let identity_keypair = self.cluster_info.keypair().clone(); let my_id = identity_keypair.pubkey(); @@ -1838,7 +1838,7 @@ mod tests { &keypair, &[], true, - Some(Hash::default()), + Hash::default(), index as u32, index as u32, &reed_solomon_cache, @@ -1881,7 +1881,7 @@ mod tests { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let bank = Bank::new_for_tests(&genesis_config); let bank_forks = BankForks::new_rw_arc(bank); - let cluster_slots = ClusterSlots::default(); + let cluster_slots = ClusterSlots::default_for_tests(); let cluster_info = Arc::new(new_test_cluster_info()); let serve_repair = ServeRepair::new_for_test( cluster_info.clone(), @@ -2182,7 +2182,7 @@ mod tests { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let bank = Bank::new_for_tests(&genesis_config); let bank_forks = BankForks::new_rw_arc(bank); - let cluster_slots = ClusterSlots::default(); + let cluster_slots = ClusterSlots::default_for_tests(); let cluster_info = Arc::new(new_test_cluster_info()); let me = cluster_info.my_contact_info(); let (repair_request_quic_sender, _) = tokio::sync::mpsc::channel(/*buffer:*/ 128); @@ -2277,7 +2277,7 @@ mod tests { &keypair, &[], true, - Some(Hash::default()), + Hash::default(), 0, 0, &reed_solomon_cache, diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index e5aa98f4d0ce21..88bfcc5caa2ade 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -32,6 +32,7 @@ use { voting_service::VoteOp, window_service::DuplicateSlotReceiver, }, + agave_votor::root_utils, crossbeam_channel::{Receiver, RecvTimeoutError, Sender}, rayon::{prelude::*, ThreadPool}, solana_accounts_db::contains::Contains, @@ -53,7 +54,10 @@ use { leader_schedule_utils::first_of_consecutive_leader_slots, }, solana_measure::measure::Measure, - solana_poh::poh_recorder::{PohLeaderStatus, PohRecorder, GRACE_TICKS_FACTOR, MAX_GRACE_SLOTS}, + solana_poh::{ + poh_controller::PohController, + poh_recorder::{PohLeaderStatus, PohRecorder, GRACE_TICKS_FACTOR, MAX_GRACE_SLOTS}, + }, solana_pubkey::Pubkey, solana_rpc::{ optimistically_confirmed_bank_tracker::{BankNotification, BankNotificationSenderConfig}, @@ -71,8 +75,8 @@ use { vote_sender_types::ReplayVoteSender, }, solana_signer::Signer, + solana_svm_timings::ExecuteTimings, solana_time_utils::timestamp, - solana_timings::ExecuteTimings, solana_transaction::Transaction, solana_vote::vote_transaction::VoteTransaction, std::{ @@ -191,6 +195,14 @@ struct SkippedSlotsInfo { last_skipped_slot: u64, } +pub struct TowerBFTStructures { + pub heaviest_subtree_fork_choice: HeaviestSubtreeForkChoice, + pub duplicate_slots_tracker: DuplicateSlotsTracker, + pub duplicate_confirmed_slots: DuplicateConfirmedSlots, + pub unfrozen_gossip_verified_vote_hashes: UnfrozenGossipVerifiedVoteHashes, + pub epoch_slots_frozen_slots: EpochSlotsFrozenSlots, +} + struct PartitionInfo { partition_start_time: Option, } @@ -270,6 +282,7 @@ pub struct ReplayStageConfig { pub bank_forks: Arc>, pub cluster_info: Arc, pub poh_recorder: Arc>, + pub poh_controller: PohController, pub tower: Tower, pub vote_tracker: Arc, pub cluster_slots: Arc, @@ -341,19 +354,15 @@ struct ReplayLoopTiming { } impl ReplayLoopTiming { #[allow(clippy::too_many_arguments)] - fn update( + fn update_non_alpenglow( &mut self, collect_frozen_banks_elapsed_us: u64, compute_bank_stats_elapsed_us: u64, select_vote_and_reset_forks_elapsed_us: u64, - start_leader_elapsed_us: u64, reset_bank_elapsed_us: u64, voting_elapsed_us: u64, select_forks_elapsed_us: u64, compute_slot_stats_elapsed_us: u64, - generate_new_bank_forks_elapsed_us: u64, - replay_active_banks_elapsed_us: u64, - wait_receive_elapsed_us: u64, heaviest_fork_failures_elapsed_us: u64, bank_count: u64, process_ancestor_hashes_duplicate_slots_elapsed_us: u64, @@ -363,19 +372,15 @@ impl ReplayLoopTiming { process_duplicate_slots_elapsed_us: u64, repair_correct_slots_elapsed_us: u64, retransmit_not_propagated_elapsed_us: u64, + start_leader_elapsed_us: u64, ) { - self.loop_count += 1; self.collect_frozen_banks_elapsed_us += collect_frozen_banks_elapsed_us; self.compute_bank_stats_elapsed_us += compute_bank_stats_elapsed_us; self.select_vote_and_reset_forks_elapsed_us += select_vote_and_reset_forks_elapsed_us; - self.start_leader_elapsed_us += start_leader_elapsed_us; self.reset_bank_elapsed_us += reset_bank_elapsed_us; self.voting_elapsed_us += voting_elapsed_us; self.select_forks_elapsed_us += select_forks_elapsed_us; self.compute_slot_stats_elapsed_us += compute_slot_stats_elapsed_us; - self.generate_new_bank_forks_elapsed_us += generate_new_bank_forks_elapsed_us; - self.replay_active_banks_elapsed_us += replay_active_banks_elapsed_us; - self.wait_receive_elapsed_us += wait_receive_elapsed_us; self.heaviest_fork_failures_elapsed_us += heaviest_fork_failures_elapsed_us; self.bank_count += bank_count; self.process_ancestor_hashes_duplicate_slots_elapsed_us += @@ -388,6 +393,19 @@ impl ReplayLoopTiming { self.process_duplicate_slots_elapsed_us += process_duplicate_slots_elapsed_us; self.repair_correct_slots_elapsed_us += repair_correct_slots_elapsed_us; self.retransmit_not_propagated_elapsed_us += retransmit_not_propagated_elapsed_us; + self.start_leader_elapsed_us += start_leader_elapsed_us; + } + + fn update_common( + &mut self, + generate_new_bank_forks_elapsed_us: u64, + replay_active_banks_elapsed_us: u64, + wait_receive_elapsed_us: u64, + ) { + self.loop_count += 1; + self.generate_new_bank_forks_elapsed_us += generate_new_bank_forks_elapsed_us; + self.replay_active_banks_elapsed_us += replay_active_banks_elapsed_us; + self.wait_receive_elapsed_us += wait_receive_elapsed_us; self.maybe_submit(); } @@ -570,6 +588,7 @@ impl ReplayStage { bank_forks, cluster_info, poh_recorder, + mut poh_controller, mut tower, vote_tracker, cluster_slots, @@ -605,6 +624,23 @@ impl ReplayStage { popular_pruned_forks_receiver, } = receivers; + // Alpenglow migration status + let mut is_alpenglow_migration_complete = false; + let mut first_alpenglow_slot = bank_forks + .read() + .unwrap() + .root_bank() + .feature_set + .activated_slot(&agave_feature_set::alpenglow::id()); + if let Some(first_alpenglow_slot) = first_alpenglow_slot { + assert!(bank_forks.read().unwrap().highest_slot() >= first_alpenglow_slot); + info!("alpenglow active on startup"); + Self::maybe_initiate_alpenglow_migration( + &poh_recorder, + &mut is_alpenglow_migration_complete, + ); + } + trace!("replay stage"); // Start the replay stage loop let (lockouts_sender, commitment_service) = AggregateCommitmentService::new( @@ -615,9 +651,10 @@ impl ReplayStage { let run_replay = move || { let verify_recyclers = VerifyRecyclers::default(); let _exit = Finalizer::new(exit.clone()); + let mut identity_keypair = cluster_info.keypair().clone(); let mut my_pubkey = identity_keypair.pubkey(); - if my_pubkey != tower.node_pubkey { + if !is_alpenglow_migration_complete && my_pubkey != tower.node_pubkey { // set-identity was called during the startup procedure, ensure the tower is consistent // before starting the loop. further calls to set-identity will reload the tower in the loop let my_old_pubkey = tower.node_pubkey; @@ -639,7 +676,7 @@ impl ReplayStage { }; warn!("Identity changed during startup from {my_old_pubkey} to {my_pubkey}"); } - let (mut progress, mut heaviest_subtree_fork_choice) = + let (mut progress, heaviest_subtree_fork_choice) = Self::initialize_progress_and_fork_choice_with_locked_bank_forks( &bank_forks, &my_pubkey, @@ -652,16 +689,13 @@ impl ReplayStage { let mut partition_info = PartitionInfo::new(); let mut skipped_slots_info = SkippedSlotsInfo::default(); let mut replay_timing = ReplayLoopTiming::default(); - let mut duplicate_slots_tracker = DuplicateSlotsTracker::default(); - let mut duplicate_confirmed_slots: DuplicateConfirmedSlots = - DuplicateConfirmedSlots::default(); - let mut epoch_slots_frozen_slots: EpochSlotsFrozenSlots = - EpochSlotsFrozenSlots::default(); + let duplicate_slots_tracker = DuplicateSlotsTracker::default(); + let duplicate_confirmed_slots = DuplicateConfirmedSlots::default(); + let epoch_slots_frozen_slots = EpochSlotsFrozenSlots::default(); let mut duplicate_slots_to_repair = DuplicateSlotsToRepair::default(); let mut purge_repair_slot_counter = PurgeRepairSlotCounter::default(); - let mut unfrozen_gossip_verified_vote_hashes: UnfrozenGossipVerifiedVoteHashes = - UnfrozenGossipVerifiedVoteHashes::default(); - let mut latest_validator_votes_for_frozen_banks: LatestValidatorVotesForFrozenBanks = + let unfrozen_gossip_verified_vote_hashes = UnfrozenGossipVerifiedVoteHashes::default(); + let mut latest_validator_votes_for_frozen_banks = LatestValidatorVotesForFrozenBanks::default(); let mut tracked_vote_transactions: Vec = Vec::new(); let mut has_new_vote_been_rooted = !wait_for_vote_to_start_leader; @@ -669,6 +703,13 @@ impl ReplayStage { last_refresh_time: Instant::now(), last_print_time: Instant::now(), }; + let mut tbft_structs = TowerBFTStructures { + heaviest_subtree_fork_choice, + duplicate_slots_tracker, + duplicate_confirmed_slots, + unfrozen_gossip_verified_vote_hashes, + epoch_slots_frozen_slots, + }; let (working_bank, in_vote_only_mode) = { let r_bank_forks = bank_forks.read().unwrap(); ( @@ -695,13 +736,19 @@ impl ReplayStage { .build() .expect("new rayon threadpool"); - Self::reset_poh_recorder( - &my_pubkey, - &blockstore, - working_bank, - &poh_recorder, - &leader_schedule_cache, - ); + let shared_poh_bank = poh_recorder.read().unwrap().shared_working_bank(); + if !is_alpenglow_migration_complete { + // This reset is handled by Votor instead when alpenglow is active + Self::reset_poh_recorder( + &my_pubkey, + &blockstore, + working_bank, + &mut poh_controller, + &leader_schedule_cache, + ); + // initially we wait for poh service to pick up the bank. + while poh_controller.has_pending_message() && !exit.load(Ordering::Relaxed) {} + } loop { // Stop getting entries if we get exit signal @@ -722,7 +769,10 @@ impl ReplayStage { ); generate_new_bank_forks_time.stop(); - let mut tpu_has_bank = poh_recorder.read().unwrap().has_bank(); + // We either have a bank currently, OR there is a pending message to either reset or set + // the bank. + let tpu_has_bank = + shared_poh_bank.load().is_some() || poh_controller.has_pending_message(); let mut replay_active_banks_time = Measure::start("replay_active_banks_time"); let (mut ancestors, mut descendants) = { @@ -738,15 +788,10 @@ impl ReplayStage { transaction_status_sender.as_ref(), entry_notification_sender.as_ref(), &verify_recyclers, - &mut heaviest_subtree_fork_choice, &replay_vote_sender, &bank_notification_sender, rpc_subscriptions.as_deref(), &slot_status_notifier, - &mut duplicate_slots_tracker, - &duplicate_confirmed_slots, - &mut epoch_slots_frozen_slots, - &mut unfrozen_gossip_verified_vote_hashes, &mut latest_validator_votes_for_frozen_banks, &cluster_slots_update_sender, &cost_update_sender, @@ -759,427 +804,454 @@ impl ReplayStage { &replay_tx_thread_pool, &prioritization_fee_cache, &mut purge_repair_slot_counter, + &poh_recorder, + first_alpenglow_slot, + (!is_alpenglow_migration_complete).then_some(&mut tbft_structs), + &mut is_alpenglow_migration_complete, ); replay_active_banks_time.stop(); let forks_root = bank_forks.read().unwrap().root(); - // Process cluster-agreed versions of duplicate slots for which we potentially - // have the wrong version. Our version was dead or pruned. - // Signalled by ancestor_hashes_service. - let mut process_ancestor_hashes_duplicate_slots_time = - Measure::start("process_ancestor_hashes_duplicate_slots"); - Self::process_ancestor_hashes_duplicate_slots( - &my_pubkey, - &blockstore, - &ancestor_duplicate_slots_receiver, - &mut duplicate_slots_tracker, - &duplicate_confirmed_slots, - &mut epoch_slots_frozen_slots, - &progress, - &mut heaviest_subtree_fork_choice, - &bank_forks, - &mut duplicate_slots_to_repair, - &ancestor_hashes_replay_update_sender, - &mut purge_repair_slot_counter, - ); - process_ancestor_hashes_duplicate_slots_time.stop(); - - // Check for any newly duplicate confirmed slots detected from gossip / replay - // Note: since this is tracked using both gossip & replay votes, stake is not - // rolled up from descendants. - let mut process_duplicate_confirmed_slots_time = - Measure::start("process_duplicate_confirmed_slots"); - Self::process_duplicate_confirmed_slots( - &duplicate_confirmed_slots_receiver, - &blockstore, - &mut duplicate_slots_tracker, - &mut duplicate_confirmed_slots, - &mut epoch_slots_frozen_slots, - &bank_forks, - &progress, - &mut heaviest_subtree_fork_choice, - &mut duplicate_slots_to_repair, - &ancestor_hashes_replay_update_sender, - &mut purge_repair_slot_counter, - ); - process_duplicate_confirmed_slots_time.stop(); - - // Ingest any new verified votes from gossip. Important for fork choice - // and switching proofs because these may be votes that haven't yet been - // included in a block, so we may not have yet observed these votes just - // by replaying blocks. - let mut process_unfrozen_gossip_verified_vote_hashes_time = - Measure::start("process_gossip_verified_vote_hashes"); - Self::process_gossip_verified_vote_hashes( - &gossip_verified_vote_hash_receiver, - &mut unfrozen_gossip_verified_vote_hashes, - &heaviest_subtree_fork_choice, - &mut latest_validator_votes_for_frozen_banks, - ); - for _ in gossip_verified_vote_hash_receiver.try_iter() {} - process_unfrozen_gossip_verified_vote_hashes_time.stop(); - - let mut process_popular_pruned_forks_time = - Measure::start("process_popular_pruned_forks_time"); - // Check for "popular" (52+% stake aggregated across versions/descendants) forks - // that are pruned, which would not be detected by normal means. - // Signalled by `repair_service`. - Self::process_popular_pruned_forks( - &popular_pruned_forks_receiver, - &blockstore, - &mut duplicate_slots_tracker, - &mut epoch_slots_frozen_slots, - &bank_forks, - &mut heaviest_subtree_fork_choice, - &mut duplicate_slots_to_repair, - &ancestor_hashes_replay_update_sender, - &mut purge_repair_slot_counter, - ); - process_popular_pruned_forks_time.stop(); - - // Check to remove any duplicated slots from fork choice - let mut process_duplicate_slots_time = Measure::start("process_duplicate_slots"); - if !tpu_has_bank { - Self::process_duplicate_slots( + if !is_alpenglow_migration_complete { + // Process cluster-agreed versions of duplicate slots for which we potentially + // have the wrong version. Our version was dead or pruned. + // Signalled by ancestor_hashes_service. + let mut process_ancestor_hashes_duplicate_slots_time = + Measure::start("process_ancestor_hashes_duplicate_slots"); + Self::process_ancestor_hashes_duplicate_slots( + &my_pubkey, &blockstore, - &duplicate_slots_receiver, - &mut duplicate_slots_tracker, - &duplicate_confirmed_slots, - &mut epoch_slots_frozen_slots, - &bank_forks, + &ancestor_duplicate_slots_receiver, + &mut tbft_structs.duplicate_slots_tracker, + &tbft_structs.duplicate_confirmed_slots, + &mut tbft_structs.epoch_slots_frozen_slots, &progress, - &mut heaviest_subtree_fork_choice, + &mut tbft_structs.heaviest_subtree_fork_choice, + &bank_forks, &mut duplicate_slots_to_repair, &ancestor_hashes_replay_update_sender, &mut purge_repair_slot_counter, ); - } - process_duplicate_slots_time.stop(); - - let mut collect_frozen_banks_time = Measure::start("frozen_banks"); - let mut frozen_banks: Vec<_> = bank_forks - .read() - .unwrap() - .frozen_banks() - .filter(|(slot, _bank)| *slot >= forks_root) - .map(|(_slot, bank)| bank) - .collect(); - collect_frozen_banks_time.stop(); - - let mut compute_bank_stats_time = Measure::start("compute_bank_stats"); - let newly_computed_slot_stats = Self::compute_bank_stats( - &vote_account, - &ancestors, - &mut frozen_banks, - &mut tower, - &mut progress, - &vote_tracker, - &cluster_slots, - &bank_forks, - &mut heaviest_subtree_fork_choice, - &mut latest_validator_votes_for_frozen_banks, - ); - compute_bank_stats_time.stop(); - - let mut compute_slot_stats_time = Measure::start("compute_slot_stats_time"); - for slot in newly_computed_slot_stats { - let fork_stats = progress.get_fork_stats(slot).unwrap(); - let duplicate_confirmed_forks = Self::tower_duplicate_confirmed_forks( - &tower, - &fork_stats.voted_stakes, - fork_stats.total_stake, - &progress, - &bank_forks, - ); - - Self::mark_slots_duplicate_confirmed( - &duplicate_confirmed_forks, + process_ancestor_hashes_duplicate_slots_time.stop(); + + // Check for any newly duplicate confirmed slots detected from gossip / replay + // Note: since this is tracked using both gossip & replay votes, stake is not + // rolled up from descendants. + let mut process_duplicate_confirmed_slots_time = + Measure::start("process_duplicate_confirmed_slots"); + Self::process_duplicate_confirmed_slots( + &duplicate_confirmed_slots_receiver, &blockstore, + &mut tbft_structs.duplicate_slots_tracker, + &mut tbft_structs.duplicate_confirmed_slots, + &mut tbft_structs.epoch_slots_frozen_slots, &bank_forks, - &mut progress, - &mut duplicate_slots_tracker, - &mut heaviest_subtree_fork_choice, - &mut epoch_slots_frozen_slots, + &progress, + &mut tbft_structs.heaviest_subtree_fork_choice, &mut duplicate_slots_to_repair, &ancestor_hashes_replay_update_sender, &mut purge_repair_slot_counter, - &mut duplicate_confirmed_slots, ); - } - compute_slot_stats_time.stop(); - - let mut select_forks_time = Measure::start("select_forks_time"); - let (heaviest_bank, heaviest_bank_on_same_voted_fork) = - heaviest_subtree_fork_choice.select_forks( - &frozen_banks, - &tower, - &progress, - &ancestors, + process_duplicate_confirmed_slots_time.stop(); + + // Ingest any new verified votes from gossip. Important for fork choice + // and switching proofs because these may be votes that haven't yet been + // included in a block, so we may not have yet observed these votes just + // by replaying blocks. + let mut process_unfrozen_gossip_verified_vote_hashes_time = + Measure::start("process_gossip_verified_vote_hashes"); + Self::process_gossip_verified_vote_hashes( + &gossip_verified_vote_hash_receiver, + &mut tbft_structs.unfrozen_gossip_verified_vote_hashes, + &tbft_structs.heaviest_subtree_fork_choice, + &mut latest_validator_votes_for_frozen_banks, + ); + for _ in gossip_verified_vote_hash_receiver.try_iter() {} + process_unfrozen_gossip_verified_vote_hashes_time.stop(); + + let mut process_popular_pruned_forks_time = + Measure::start("process_popular_pruned_forks_time"); + // Check for "popular" (52+% stake aggregated across versions/descendants) forks + // that are pruned, which would not be detected by normal means. + // Signalled by `repair_service`. + Self::process_popular_pruned_forks( + &popular_pruned_forks_receiver, + &blockstore, + &mut tbft_structs.duplicate_slots_tracker, + &mut tbft_structs.epoch_slots_frozen_slots, &bank_forks, + &mut tbft_structs.heaviest_subtree_fork_choice, + &mut duplicate_slots_to_repair, + &ancestor_hashes_replay_update_sender, + &mut purge_repair_slot_counter, ); - select_forks_time.stop(); + process_popular_pruned_forks_time.stop(); - Self::check_for_vote_only_mode( - heaviest_bank.slot(), - forks_root, - &in_vote_only_mode, - &bank_forks, - ); - - let mut select_vote_and_reset_forks_time = - Measure::start("select_vote_and_reset_forks"); - let SelectVoteAndResetForkResult { - vote_bank, - reset_bank, - heaviest_fork_failures, - } = select_vote_and_reset_forks( - &heaviest_bank, - heaviest_bank_on_same_voted_fork.as_ref(), - &ancestors, - &descendants, - &progress, - &mut tower, - &latest_validator_votes_for_frozen_banks, - &heaviest_subtree_fork_choice, - ); - select_vote_and_reset_forks_time.stop(); + // Check to remove any duplicated slots from fork choice + let mut process_duplicate_slots_time = + Measure::start("process_duplicate_slots"); + if !tpu_has_bank { + Self::process_duplicate_slots( + &blockstore, + &duplicate_slots_receiver, + &mut tbft_structs.duplicate_slots_tracker, + &tbft_structs.duplicate_confirmed_slots, + &mut tbft_structs.epoch_slots_frozen_slots, + &bank_forks, + &progress, + &mut tbft_structs.heaviest_subtree_fork_choice, + &mut duplicate_slots_to_repair, + &ancestor_hashes_replay_update_sender, + &mut purge_repair_slot_counter, + ); + } + process_duplicate_slots_time.stop(); - if vote_bank.is_none() { - Self::maybe_refresh_last_vote( - &mut tower, - &progress, - heaviest_bank_on_same_voted_fork, + let mut collect_frozen_banks_time = Measure::start("frozen_banks"); + let mut frozen_banks: Vec<_> = bank_forks + .read() + .unwrap() + .frozen_banks() + .filter(|(slot, _bank)| *slot >= forks_root) + .map(|(_slot, bank)| bank) + .collect(); + collect_frozen_banks_time.stop(); + + let mut compute_bank_stats_time = Measure::start("compute_bank_stats"); + let newly_computed_slot_stats = Self::compute_bank_stats( &vote_account, - &identity_keypair, - &authorized_voter_keypairs.read().unwrap(), - &mut tracked_vote_transactions, - has_new_vote_been_rooted, - &mut last_vote_refresh_time, - &voting_sender, - wait_to_vote_slot, - ); - } - - let mut heaviest_fork_failures_time = Measure::start("heaviest_fork_failures_time"); - if tower.is_recent(heaviest_bank.slot()) && !heaviest_fork_failures.is_empty() { - Self::log_heaviest_fork_failures( - &heaviest_fork_failures, - &bank_forks, - &tower, - &progress, &ancestors, - &heaviest_bank, - &mut last_threshold_failure_slot, + &mut frozen_banks, + &mut tower, + &mut progress, + &vote_tracker, + &cluster_slots, + &bank_forks, + &mut tbft_structs.heaviest_subtree_fork_choice, + &mut latest_validator_votes_for_frozen_banks, ); - } - heaviest_fork_failures_time.stop(); + compute_bank_stats_time.stop(); + + let mut compute_slot_stats_time = Measure::start("compute_slot_stats_time"); + for slot in newly_computed_slot_stats { + let fork_stats = progress.get_fork_stats(slot).unwrap(); + let duplicate_confirmed_forks = Self::tower_duplicate_confirmed_forks( + &tower, + &fork_stats.voted_stakes, + fork_stats.total_stake, + &progress, + &bank_forks, + ); - let mut voting_time = Measure::start("voting_time"); - // Vote on a fork - if let Some((ref vote_bank, ref switch_fork_decision)) = vote_bank { - if let Some(votable_leader) = - leader_schedule_cache.slot_leader_at(vote_bank.slot(), Some(vote_bank)) - { - Self::log_leader_change( - &my_pubkey, - vote_bank.slot(), - &mut current_leader, - &votable_leader, + Self::mark_slots_duplicate_confirmed( + &duplicate_confirmed_forks, + &blockstore, + &bank_forks, + &mut progress, + &mut tbft_structs.duplicate_slots_tracker, + &mut tbft_structs.heaviest_subtree_fork_choice, + &mut tbft_structs.epoch_slots_frozen_slots, + &mut duplicate_slots_to_repair, + &ancestor_hashes_replay_update_sender, + &mut purge_repair_slot_counter, + &mut tbft_structs.duplicate_confirmed_slots, ); } + compute_slot_stats_time.stop(); + + let mut select_forks_time = Measure::start("select_forks_time"); + let (heaviest_bank, heaviest_bank_on_same_voted_fork) = tbft_structs + .heaviest_subtree_fork_choice + .select_forks(&frozen_banks, &tower, &progress, &ancestors, &bank_forks); + select_forks_time.stop(); + + Self::check_for_vote_only_mode( + heaviest_bank.slot(), + forks_root, + &in_vote_only_mode, + &bank_forks, + ); - if let Err(e) = Self::handle_votable_bank( + let mut select_vote_and_reset_forks_time = + Measure::start("select_vote_and_reset_forks"); + let SelectVoteAndResetForkResult { vote_bank, - switch_fork_decision, - &bank_forks, + reset_bank, + heaviest_fork_failures, + } = select_vote_and_reset_forks( + &heaviest_bank, + heaviest_bank_on_same_voted_fork.as_ref(), + &ancestors, + &descendants, + &progress, &mut tower, - &mut progress, - &vote_account, - &identity_keypair, - &authorized_voter_keypairs.read().unwrap(), - &blockstore, - &leader_schedule_cache, - &lockouts_sender, - snapshot_controller.as_deref(), - rpc_subscriptions.as_deref(), - &block_commitment_cache, - &mut heaviest_subtree_fork_choice, - &bank_notification_sender, - &mut duplicate_slots_tracker, - &mut duplicate_confirmed_slots, - &mut unfrozen_gossip_verified_vote_hashes, - &mut tracked_vote_transactions, - &mut has_new_vote_been_rooted, - &mut replay_timing, - &voting_sender, - &mut epoch_slots_frozen_slots, - &drop_bank_sender, - wait_to_vote_slot, - ) { - error!("Unable to set root: {e}"); - return; - } - } - voting_time.stop(); - - let mut reset_bank_time = Measure::start("reset_bank"); - // Reset onto a fork - if let Some(reset_bank) = reset_bank { - if last_reset == reset_bank.last_blockhash() { - let reset_bank_descendants = - Self::get_active_descendants(reset_bank.slot(), &progress, &blockstore); - if reset_bank_descendants != last_reset_bank_descendants { - last_reset_bank_descendants = reset_bank_descendants; - poh_recorder - .write() - .unwrap() - .update_start_bank_active_descendants(&last_reset_bank_descendants); - } - } else { - info!( - "vote bank: {:?} reset bank: {:?}", - vote_bank - .as_ref() - .map(|(b, switch_fork_decision)| (b.slot(), switch_fork_decision)), - reset_bank.slot(), - ); - let fork_progress = progress - .get(&reset_bank.slot()) - .expect("bank to reset to must exist in progress map"); - datapoint_info!( - "blocks_produced", - ("num_blocks_on_fork", fork_progress.num_blocks_on_fork, i64), - ( - "num_dropped_blocks_on_fork", - fork_progress.num_dropped_blocks_on_fork, - i64 - ), + &latest_validator_votes_for_frozen_banks, + &tbft_structs.heaviest_subtree_fork_choice, + ); + select_vote_and_reset_forks_time.stop(); + + if vote_bank.is_none() { + Self::maybe_refresh_last_vote( + &mut tower, + &progress, + heaviest_bank_on_same_voted_fork, + &vote_account, + &identity_keypair, + &authorized_voter_keypairs.read().unwrap(), + &mut tracked_vote_transactions, + has_new_vote_been_rooted, + &mut last_vote_refresh_time, + &voting_sender, + wait_to_vote_slot, ); + } - if my_pubkey != cluster_info.id() { - identity_keypair = cluster_info.keypair().clone(); - let my_old_pubkey = my_pubkey; - my_pubkey = identity_keypair.pubkey(); + let mut heaviest_fork_failures_time = + Measure::start("heaviest_fork_failures_time"); + if tower.is_recent(heaviest_bank.slot()) && !heaviest_fork_failures.is_empty() { + Self::log_heaviest_fork_failures( + &heaviest_fork_failures, + &bank_forks, + &tower, + &progress, + &ancestors, + &heaviest_bank, + &mut last_threshold_failure_slot, + ); + } + heaviest_fork_failures_time.stop(); - // Load the new identity's tower - tower = match Self::load_tower( - tower_storage.as_ref(), + let mut voting_time = Measure::start("voting_time"); + // Vote on a fork + if let Some((ref vote_bank, ref switch_fork_decision)) = vote_bank { + if let Some(votable_leader) = + leader_schedule_cache.slot_leader_at(vote_bank.slot(), Some(vote_bank)) + { + Self::log_leader_change( &my_pubkey, - &vote_account, - &bank_forks, - ) { - Ok(tower) => tower, - Err(err) => { - error!( - "Unable to load new tower when attempting to change \ - identity from {my_old_pubkey} to {my_pubkey} on \ - set-identity, Exiting: {err}" - ); - // drop(_exit) will set the exit flag, eventually tearing down the entire process - return; - } - }; - // Ensure the validator can land votes with the new identity before - // becoming leader - has_new_vote_been_rooted = !wait_for_vote_to_start_leader; - warn!("Identity changed from {my_old_pubkey} to {my_pubkey}"); + vote_bank.slot(), + &mut current_leader, + &votable_leader, + ); } - Self::reset_poh_recorder( - &my_pubkey, + if let Err(e) = Self::handle_votable_bank( + vote_bank, + switch_fork_decision, + &bank_forks, + &mut tower, + &mut progress, + &vote_account, + &identity_keypair, + &authorized_voter_keypairs.read().unwrap(), &blockstore, - reset_bank.clone(), - &poh_recorder, &leader_schedule_cache, - ); - last_reset = reset_bank.last_blockhash(); - last_reset_bank_descendants = vec![]; - tpu_has_bank = false; - - if let Some(last_voted_slot) = tower.last_voted_slot() { - // If the current heaviest bank is not a descendant of the last voted slot, - // there must be a partition - partition_info.update( - Self::is_partition_detected( - &ancestors, - last_voted_slot, - heaviest_bank.slot(), - ), - heaviest_bank.slot(), - last_voted_slot, + &lockouts_sender, + snapshot_controller.as_deref(), + rpc_subscriptions.as_deref(), + &block_commitment_cache, + &bank_notification_sender, + &mut tracked_vote_transactions, + &mut has_new_vote_been_rooted, + &mut replay_timing, + &voting_sender, + &drop_bank_sender, + wait_to_vote_slot, + &mut first_alpenglow_slot, + &mut tbft_structs, + ) { + error!("Unable to set root: {e}"); + return; + } + } + voting_time.stop(); + + let mut reset_bank_time = Measure::start("reset_bank"); + // Reset onto a fork + if let Some(reset_bank) = reset_bank { + if last_reset == reset_bank.last_blockhash() { + let reset_bank_descendants = Self::get_active_descendants( + reset_bank.slot(), + &progress, + &blockstore, + ); + if reset_bank_descendants != last_reset_bank_descendants { + last_reset_bank_descendants = reset_bank_descendants; + poh_recorder + .write() + .unwrap() + .update_start_bank_active_descendants( + &last_reset_bank_descendants, + ); + } + } else { + info!( + "vote bank: {:?} reset bank: {:?}", + vote_bank.as_ref().map(|(b, switch_fork_decision)| ( + b.slot(), + switch_fork_decision + )), reset_bank.slot(), - heaviest_fork_failures, ); + let fork_progress = progress + .get(&reset_bank.slot()) + .expect("bank to reset to must exist in progress map"); + datapoint_info!( + "blocks_produced", + ("num_blocks_on_fork", fork_progress.num_blocks_on_fork, i64), + ( + "num_dropped_blocks_on_fork", + fork_progress.num_dropped_blocks_on_fork, + i64 + ), + ); + + if my_pubkey != cluster_info.id() { + identity_keypair = cluster_info.keypair().clone(); + let my_old_pubkey = my_pubkey; + my_pubkey = identity_keypair.pubkey(); + + // Load the new identity's tower + tower = match Self::load_tower( + tower_storage.as_ref(), + &my_pubkey, + &vote_account, + &bank_forks, + ) { + Ok(tower) => tower, + Err(err) => { + error!( + "Unable to load new tower when attempting to change \ + identity from {my_old_pubkey} to {my_pubkey} on \ + set-identity, Exiting: {err}" + ); + // drop(_exit) will set the exit flag, eventually tearing down the entire process + return; + } + }; + // Ensure the validator can land votes with the new identity before + // becoming leader + has_new_vote_been_rooted = !wait_for_vote_to_start_leader; + warn!("Identity changed from {my_old_pubkey} to {my_pubkey}"); + } + + if !poh_controller.has_pending_message() { + Self::reset_poh_recorder( + &my_pubkey, + &blockstore, + reset_bank.clone(), + &mut poh_controller, + &leader_schedule_cache, + ); + last_reset = reset_bank.last_blockhash(); + last_reset_bank_descendants = vec![]; + } + + if let Some(last_voted_slot) = tower.last_voted_slot() { + // If the current heaviest bank is not a descendant of the last voted slot, + // there must be a partition + partition_info.update( + Self::is_partition_detected( + &ancestors, + last_voted_slot, + heaviest_bank.slot(), + ), + heaviest_bank.slot(), + last_voted_slot, + reset_bank.slot(), + heaviest_fork_failures, + ); + } } } - } - reset_bank_time.stop(); - - let mut start_leader_time = Measure::start("start_leader_time"); - let mut dump_then_repair_correct_slots_time = - Measure::start("dump_then_repair_correct_slots_time"); - // Used for correctness check - let poh_bank = poh_recorder.read().unwrap().bank(); - // Dump any duplicate slots that have been confirmed by the network in - // anticipation of repairing the confirmed version of the slot. - // - // Has to be before `maybe_start_leader()`. Otherwise, `ancestors` and `descendants` - // will be outdated, and we cannot assume `poh_bank` will be in either of these maps. - Self::dump_then_repair_correct_slots( - &mut duplicate_slots_to_repair, - &mut ancestors, - &mut descendants, - &mut progress, - &bank_forks, - &blockstore, - poh_bank.map(|bank| bank.slot()), - &mut purge_repair_slot_counter, - &dumped_slots_sender, - &my_pubkey, - &leader_schedule_cache, - ); - dump_then_repair_correct_slots_time.stop(); - - let mut retransmit_not_propagated_time = - Measure::start("retransmit_not_propagated_time"); - Self::retransmit_latest_unpropagated_leader_slot( - &poh_recorder, - &retransmit_slots_sender, - &mut progress, - ); - retransmit_not_propagated_time.stop(); - - // From this point on, its not safe to use ancestors/descendants since maybe_start_leader - // may add a bank that will not included in either of these maps. - drop(ancestors); - drop(descendants); - if !tpu_has_bank { - Self::maybe_start_leader( - &my_pubkey, + reset_bank_time.stop(); + + let mut start_leader_time = Measure::start("start_leader_time"); + let mut dump_then_repair_correct_slots_time = + Measure::start("dump_then_repair_correct_slots_time"); + // Used for correctness check + let poh_bank = shared_poh_bank.load(); + // Dump any duplicate slots that have been confirmed by the network in + // anticipation of repairing the confirmed version of the slot. + // + // Has to be before `maybe_start_leader()`. Otherwise, `ancestors` and `descendants` + // will be outdated, and we cannot assume `poh_bank` will be in either of these maps. + Self::dump_then_repair_correct_slots( + &mut duplicate_slots_to_repair, + &mut ancestors, + &mut descendants, + &mut progress, &bank_forks, - &poh_recorder, + &blockstore, + poh_bank.map(|bank| bank.slot()), + &mut purge_repair_slot_counter, + &dumped_slots_sender, + &my_pubkey, &leader_schedule_cache, - rpc_subscriptions.as_deref(), - &slot_status_notifier, - &mut progress, - &retransmit_slots_sender, - &mut skipped_slots_info, - &banking_tracer, - has_new_vote_been_rooted, ); + dump_then_repair_correct_slots_time.stop(); - let poh_bank = poh_recorder.read().unwrap().bank(); - if let Some(bank) = poh_bank { - Self::log_leader_change( - &my_pubkey, - bank.slot(), - &mut current_leader, + let mut retransmit_not_propagated_time = + Measure::start("retransmit_not_propagated_time"); + Self::retransmit_latest_unpropagated_leader_slot( + &poh_recorder, + &retransmit_slots_sender, + &mut progress, + ); + retransmit_not_propagated_time.stop(); + + // From this point on, its not safe to use ancestors/descendants since maybe_start_leader + // may add a bank that will not included in either of these maps. + drop(ancestors); + drop(descendants); + if !tpu_has_bank && !poh_controller.has_pending_message() { + if let Some(poh_slot) = Self::maybe_start_leader( &my_pubkey, - ); + &bank_forks, + &poh_recorder, + &mut poh_controller, + &leader_schedule_cache, + rpc_subscriptions.as_deref(), + &slot_status_notifier, + &mut progress, + &retransmit_slots_sender, + &mut skipped_slots_info, + &banking_tracer, + has_new_vote_been_rooted, + &first_alpenglow_slot, + &mut is_alpenglow_migration_complete, + ) { + Self::log_leader_change( + &my_pubkey, + poh_slot, + &mut current_leader, + &my_pubkey, + ); + } } + start_leader_time.stop(); + + replay_timing.update_non_alpenglow( + collect_frozen_banks_time.as_us(), + compute_bank_stats_time.as_us(), + select_vote_and_reset_forks_time.as_us(), + reset_bank_time.as_us(), + voting_time.as_us(), + select_forks_time.as_us(), + compute_slot_stats_time.as_us(), + heaviest_fork_failures_time.as_us(), + u64::from(did_complete_bank), + process_ancestor_hashes_duplicate_slots_time.as_us(), + process_duplicate_confirmed_slots_time.as_us(), + process_unfrozen_gossip_verified_vote_hashes_time.as_us(), + process_popular_pruned_forks_time.as_us(), + process_duplicate_slots_time.as_us(), + dump_then_repair_correct_slots_time.as_us(), + retransmit_not_propagated_time.as_us(), + start_leader_time.as_us(), + ); } - start_leader_time.stop(); let mut wait_receive_time = Measure::start("wait_receive_time"); if !did_complete_bank { @@ -1195,27 +1267,10 @@ impl ReplayStage { } wait_receive_time.stop(); - replay_timing.update( - collect_frozen_banks_time.as_us(), - compute_bank_stats_time.as_us(), - select_vote_and_reset_forks_time.as_us(), - start_leader_time.as_us(), - reset_bank_time.as_us(), - voting_time.as_us(), - select_forks_time.as_us(), - compute_slot_stats_time.as_us(), + replay_timing.update_common( generate_new_bank_forks_time.as_us(), replay_active_banks_time.as_us(), wait_receive_time.as_us(), - heaviest_fork_failures_time.as_us(), - u64::from(did_complete_bank), - process_ancestor_hashes_duplicate_slots_time.as_us(), - process_duplicate_confirmed_slots_time.as_us(), - process_unfrozen_gossip_verified_vote_hashes_time.as_us(), - process_popular_pruned_forks_time.as_us(), - process_duplicate_slots_time.as_us(), - dump_then_repair_correct_slots_time.as_us(), - retransmit_not_propagated_time.as_us(), ); } }; @@ -2072,12 +2127,13 @@ impl ReplayStage { /// - We have not landed a vote yet and the `wait_for_vote_to_start_leader` flag is set /// - We have failed the propagated check /// - /// Returns whether a new working bank was created and inserted into bank forks. + /// Returns Some new working bank slot if created and inserted into bank forks. #[allow(clippy::too_many_arguments)] fn maybe_start_leader( my_pubkey: &Pubkey, bank_forks: &Arc>, poh_recorder: &Arc>, + poh_controller: &mut PohController, leader_schedule_cache: &Arc, rpc_subscriptions: Option<&RpcSubscriptions>, slot_status_notifier: &Option, @@ -2086,13 +2142,13 @@ impl ReplayStage { skipped_slots_info: &mut SkippedSlotsInfo, banking_tracer: &Arc, has_new_vote_been_rooted: bool, - ) -> bool { + first_alpenglow_slot: &Option, + is_alpenglow_migration_complete: &mut bool, + ) -> Option { // all the individual calls to poh_recorder.read() are designed to // increase granularity, decrease contention - assert!(!poh_recorder.read().unwrap().has_bank()); - - let (poh_slot, parent_slot) = + let (poh_slot, parent_slot) = if !(*is_alpenglow_migration_complete) { match poh_recorder.read().unwrap().reached_leader_slot(my_pubkey) { PohLeaderStatus::Reached { poh_slot, @@ -2100,9 +2156,31 @@ impl ReplayStage { } => (poh_slot, parent_slot), PohLeaderStatus::NotReached => { trace!("{my_pubkey} poh_recorder hasn't reached_leader_slot"); - return false; + return None; } - }; + } + } else { + // Migration is already complete Votor will handle the rest + return None; + }; + + // Check if migration is necessary + if let Some(first_alpenglow_slot) = first_alpenglow_slot { + if !(*is_alpenglow_migration_complete) && poh_slot >= *first_alpenglow_slot { + // Initiate migration + // TODO: need to keep the ticks around for parent slots in previous epoch + // because reset below will delete those ticks + info!( + "initiating alpenglow migration from maybe_start_leader() for slot {poh_slot}", + ); + Self::maybe_initiate_alpenglow_migration( + poh_recorder, + is_alpenglow_migration_complete, + ); + // Votor will handle leader blocks from now on + return None; + } + } trace!("{my_pubkey} reached_leader_slot"); @@ -2111,33 +2189,28 @@ impl ReplayStage { "Poh recorder parent slot {parent_slot} is missing from bank_forks. This \ indicates that we are in the middle of a dump and repair. Unable to start leader" ); - return false; + return None; }; assert!(parent.is_frozen()); - if !parent.has_initial_accounts_hash_verification_completed() { - info!("startup verification incomplete, so skipping my leader slot"); - return false; - } - if bank_forks.read().unwrap().get(poh_slot).is_some() { warn!("{my_pubkey} already have bank in forks at {poh_slot}?"); - return false; + return None; } trace!("{my_pubkey} poh_slot {poh_slot} parent_slot {parent_slot}"); if let Some(next_leader) = leader_schedule_cache.slot_leader_at(poh_slot, Some(&parent)) { if !has_new_vote_been_rooted { info!("Haven't landed a vote, so skipping my leader slot"); - return false; + return None; } trace!("{my_pubkey} leader {next_leader} at poh slot: {poh_slot}"); // I guess I missed my slot if next_leader != *my_pubkey { - return false; + return None; } datapoint_info!( @@ -2175,7 +2248,7 @@ impl ReplayStage { latest_unconfirmed_leader_slot, ); } - return false; + return None; } let root_slot = bank_forks.read().unwrap().root(); @@ -2203,11 +2276,15 @@ impl ReplayStage { // new()-ing of its child bank banking_tracer.hash_event(parent.slot(), &parent.last_blockhash(), &parent.hash()); - update_bank_forks_and_poh_recorder_for_new_tpu_bank(bank_forks, poh_recorder, tpu_bank); - true + update_bank_forks_and_poh_recorder_for_new_tpu_bank( + bank_forks, + poh_controller, + tpu_bank, + ); + Some(poh_slot) } else { error!("{my_pubkey} No next leader found"); - false + None } } @@ -2259,14 +2336,11 @@ impl ReplayStage { err: &BlockstoreProcessorError, rpc_subscriptions: Option<&RpcSubscriptions>, slot_status_notifier: &Option, - duplicate_slots_tracker: &mut DuplicateSlotsTracker, - duplicate_confirmed_slots: &DuplicateConfirmedSlots, - epoch_slots_frozen_slots: &mut EpochSlotsFrozenSlots, progress: &mut ProgressMap, - heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice, duplicate_slots_to_repair: &mut DuplicateSlotsToRepair, ancestor_hashes_replay_update_sender: &AncestorHashesReplayUpdateSender, purge_repair_slot_counter: &mut PurgeRepairSlotCounter, + tbft_structs: &mut Option<&mut TowerBFTStructures>, ) { // Do not remove from progress map when marking dead! Needed by // `process_duplicate_confirmed_slots()` @@ -2317,35 +2391,20 @@ impl ReplayStage { }); } - let dead_state = DeadState::new_from_state( - slot, - duplicate_slots_tracker, - duplicate_confirmed_slots, + if let Some(TowerBFTStructures { heaviest_subtree_fork_choice, - epoch_slots_frozen_slots, - ); - check_slot_agrees_with_cluster( - slot, - root, - blockstore, duplicate_slots_tracker, + duplicate_confirmed_slots, epoch_slots_frozen_slots, - heaviest_subtree_fork_choice, - duplicate_slots_to_repair, - ancestor_hashes_replay_update_sender, - purge_repair_slot_counter, - SlotStateUpdate::Dead(dead_state), - ); - - // If we previously marked this slot as duplicate in blockstore, let the state machine know - if !duplicate_slots_tracker.contains(&slot) && blockstore.get_duplicate_slot(slot).is_some() + .. + }) = tbft_structs { - let duplicate_state = DuplicateState::new_from_state( + let dead_state = DeadState::new_from_state( slot, + duplicate_slots_tracker, duplicate_confirmed_slots, heaviest_subtree_fork_choice, - || true, - || None, + epoch_slots_frozen_slots, ); check_slot_agrees_with_cluster( slot, @@ -2357,8 +2416,33 @@ impl ReplayStage { duplicate_slots_to_repair, ancestor_hashes_replay_update_sender, purge_repair_slot_counter, - SlotStateUpdate::Duplicate(duplicate_state), + SlotStateUpdate::Dead(dead_state), ); + + // If we previously marked this slot as duplicate in blockstore, let the state machine know + if !duplicate_slots_tracker.contains(&slot) + && blockstore.get_duplicate_slot(slot).is_some() + { + let duplicate_state = DuplicateState::new_from_state( + slot, + duplicate_confirmed_slots, + heaviest_subtree_fork_choice, + || true, + || None, + ); + check_slot_agrees_with_cluster( + slot, + root, + blockstore, + duplicate_slots_tracker, + epoch_slots_frozen_slots, + heaviest_subtree_fork_choice, + duplicate_slots_to_repair, + ancestor_hashes_replay_update_sender, + purge_repair_slot_counter, + SlotStateUpdate::Duplicate(duplicate_state), + ); + } } } @@ -2378,18 +2462,15 @@ impl ReplayStage { snapshot_controller: Option<&SnapshotController>, rpc_subscriptions: Option<&RpcSubscriptions>, block_commitment_cache: &Arc>, - heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice, bank_notification_sender: &Option, - duplicate_slots_tracker: &mut DuplicateSlotsTracker, - duplicate_confirmed_slots: &mut DuplicateConfirmedSlots, - unfrozen_gossip_verified_vote_hashes: &mut UnfrozenGossipVerifiedVoteHashes, tracked_vote_transactions: &mut Vec, has_new_vote_been_rooted: &mut bool, replay_timing: &mut ReplayLoopTiming, voting_sender: &Sender, - epoch_slots_frozen_slots: &mut EpochSlotsFrozenSlots, drop_bank_sender: &Sender>, wait_to_vote_slot: Option, + first_alpenglow_slot: &mut Option, + tbft_structs: &mut TowerBFTStructures, ) -> Result<(), SetRootError> { if bank.is_empty() { datapoint_info!("replay_stage-voted_empty_bank", ("slot", bank.slot(), i64)); @@ -2398,8 +2479,29 @@ impl ReplayStage { let new_root = tower.record_bank_vote(bank); if let Some(new_root) = new_root { + if first_alpenglow_slot.is_none() { + if let Some(activation_slot) = bank_forks + .read() + .unwrap() + .root_bank() + .compute_pending_activation_slot(&agave_feature_set::alpenglow::id()) + { + *first_alpenglow_slot = Some(activation_slot); + info!( + "alpenglow feature detected in root bank {new_root}, to be enabled on \ + slot {activation_slot}", + ); + } + } + let highest_super_majority_root = Some( + block_commitment_cache + .read() + .unwrap() + .highest_super_majority_root(), + ); Self::check_and_handle_new_root( - bank, + &identity_keypair.pubkey(), + bank.parent_slot(), new_root, bank_forks, progress, @@ -2407,16 +2509,12 @@ impl ReplayStage { leader_schedule_cache, snapshot_controller, rpc_subscriptions, - block_commitment_cache, - heaviest_subtree_fork_choice, + highest_super_majority_root, bank_notification_sender, - duplicate_slots_tracker, - duplicate_confirmed_slots, - unfrozen_gossip_verified_vote_hashes, has_new_vote_been_rooted, tracked_vote_transactions, - epoch_slots_frozen_slots, drop_bank_sender, + tbft_structs, )?; } @@ -2475,11 +2573,6 @@ impl ReplayStage { has_new_vote_been_rooted: bool, wait_to_vote_slot: Option, ) -> GenerateVoteTxResult { - if !bank.has_initial_accounts_hash_verification_completed() { - info!("startup verification incomplete, so unable to vote"); - return GenerateVoteTxResult::Failed; - } - if authorized_voter_keypairs.is_empty() { return GenerateVoteTxResult::NonVoting; } @@ -2822,7 +2915,7 @@ impl ReplayStage { my_pubkey: &Pubkey, blockstore: &Blockstore, bank: Arc, - poh_recorder: &RwLock, + poh_controller: &mut PohController, leader_schedule_cache: &LeaderScheduleCache, ) { let slot = bank.slot(); @@ -2836,7 +2929,10 @@ impl ReplayStage { GRACE_TICKS_FACTOR * MAX_GRACE_SLOTS, ); - poh_recorder.write().unwrap().reset(bank, next_leader_slot); + if poh_controller.reset(bank, next_leader_slot).is_err() { + warn!("Failed to reset poh, poh service is disconnected"); + return; + } let next_leader_msg = if let Some(next_leader_slot) = next_leader_slot { format!("My next leader slot is {}", next_leader_slot.0) @@ -2993,17 +3089,17 @@ impl ReplayStage { debug!("bank_slot {bank_slot:?} is marked dead"); replay_result.is_slot_dead = true; } else { - let bank = bank_forks - .read() - .unwrap() - .get_with_scheduler(bank_slot) - .unwrap(); + let Some(bank) = bank_forks.read().unwrap().get_with_scheduler(bank_slot) else { + info!("Abandoning replay of unrooted slot {bank_slot}"); + return replay_result; + }; let parent_slot = bank.parent_slot(); let prev_leader_slot = progress.get_bank_prev_leader_slot(&bank); let (num_blocks_on_fork, num_dropped_blocks_on_fork) = { - let stats = progress - .get(&parent_slot) - .expect("parent of active bank must exist in progress map"); + let Some(stats) = progress.get(&parent_slot) else { + info!("Abandoning replay of unrooted slot {bank_slot}"); + return replay_result; + }; let num_blocks_on_fork = stats.num_blocks_on_fork + 1; let new_dropped_blocks = bank.slot() - parent_slot - 1; let num_dropped_blocks_on_fork = @@ -3045,20 +3141,32 @@ impl ReplayStage { replay_result } + fn maybe_initiate_alpenglow_migration( + poh_recorder: &RwLock, + is_alpenglow_migration_complete: &mut bool, + ) { + if *is_alpenglow_migration_complete { + error!("Attempting to start alpenglow migration but it is already complete"); + return; + } + info!("initiating alpenglow migration"); + // This by itself does not do anything, a follow up PR will enact action to + // turn off PoH based on this flag + poh_recorder.write().unwrap().is_alpenglow_enabled = true; + + *is_alpenglow_migration_complete = true; + info!("alpenglow migration complete!"); + } + #[allow(clippy::too_many_arguments)] fn process_replay_results( blockstore: &Blockstore, bank_forks: &RwLock, progress: &mut ProgressMap, transaction_status_sender: Option<&TransactionStatusSender>, - heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice, bank_notification_sender: &Option, rpc_subscriptions: Option<&RpcSubscriptions>, slot_status_notifier: &Option, - duplicate_slots_tracker: &mut DuplicateSlotsTracker, - duplicate_confirmed_slots: &DuplicateConfirmedSlots, - epoch_slots_frozen_slots: &mut EpochSlotsFrozenSlots, - unfrozen_gossip_verified_vote_hashes: &mut UnfrozenGossipVerifiedVoteHashes, latest_validator_votes_for_frozen_banks: &mut LatestValidatorVotesForFrozenBanks, cluster_slots_update_sender: &ClusterSlotsUpdateSender, cost_update_sender: &Sender, @@ -3068,6 +3176,10 @@ impl ReplayStage { replay_result_vec: &[ReplaySlotFromBlockstore], purge_repair_slot_counter: &mut PurgeRepairSlotCounter, my_pubkey: &Pubkey, + first_alpenglow_slot: Option, + poh_recorder: &RwLock, + is_alpenglow_migration_complete: &mut bool, + mut tbft_structs: Option<&mut TowerBFTStructures>, ) -> bool { // TODO: See if processing of blockstore replay results and bank completion can be made thread safe. let mut did_complete_bank = false; @@ -3079,11 +3191,10 @@ impl ReplayStage { } let bank_slot = replay_result.bank_slot; - let bank = &bank_forks - .read() - .unwrap() - .get_with_scheduler(bank_slot) - .unwrap(); + let Some(bank) = &bank_forks.read().unwrap().get_with_scheduler(bank_slot) else { + info!("Abandoning replay of unrooted slot {bank_slot}"); + continue; + }; if let Some(replay_result) = &replay_result.replay_result { match replay_result { Ok(replay_tx_count) => tx_count += replay_tx_count, @@ -3096,14 +3207,11 @@ impl ReplayStage { err, rpc_subscriptions, slot_status_notifier, - duplicate_slots_tracker, - duplicate_confirmed_slots, - epoch_slots_frozen_slots, progress, - heaviest_subtree_fork_choice, duplicate_slots_to_repair, ancestor_hashes_replay_update_sender, purge_repair_slot_counter, + &mut tbft_structs, ); // don't try to run the below logic to check if the bank is completed continue; @@ -3113,6 +3221,18 @@ impl ReplayStage { assert_eq!(bank_slot, bank.slot()); if bank.is_complete() { + if let Some(first_alpenglow_slot) = first_alpenglow_slot { + if !*is_alpenglow_migration_complete && bank.slot() >= first_alpenglow_slot { + info!( + "initiating alpenglow migration from replaying bank {}", + bank.slot() + ); + Self::maybe_initiate_alpenglow_migration( + poh_recorder, + is_alpenglow_migration_complete, + ); + } + } let mut bank_complete_time = Measure::start("bank_complete_time"); let bank_progress = progress .get_mut(&bank.slot()) @@ -3145,14 +3265,11 @@ impl ReplayStage { &BlockstoreProcessorError::InvalidTransaction(err), rpc_subscriptions, slot_status_notifier, - duplicate_slots_tracker, - duplicate_confirmed_slots, - epoch_slots_frozen_slots, progress, - heaviest_subtree_fork_choice, duplicate_slots_to_repair, ancestor_hashes_replay_update_sender, purge_repair_slot_counter, + &mut tbft_structs, ); // don't try to run the remaining normal processing for the completed bank continue; @@ -3178,14 +3295,11 @@ impl ReplayStage { &result_err, rpc_subscriptions, slot_status_notifier, - duplicate_slots_tracker, - duplicate_confirmed_slots, - epoch_slots_frozen_slots, progress, - heaviest_subtree_fork_choice, duplicate_slots_to_repair, ancestor_hashes_replay_update_sender, purge_repair_slot_counter, + &mut tbft_structs, ); continue; } @@ -3229,45 +3343,30 @@ impl ReplayStage { }); assert_ne!(bank.hash(), Hash::default()); - // Needs to be updated before `check_slot_agrees_with_cluster()` so that - // any updates in `check_slot_agrees_with_cluster()` on fork choice take - // effect - heaviest_subtree_fork_choice.add_new_leaf_slot( - (bank.slot(), bank.hash()), - Some((bank.parent_slot(), bank.parent_hash())), - ); - heaviest_subtree_fork_choice.maybe_print_state(); bank_progress.fork_stats.bank_hash = Some(bank.hash()); - let bank_frozen_state = BankFrozenState::new_from_state( - bank.slot(), - bank.hash(), - duplicate_slots_tracker, - duplicate_confirmed_slots, + if let Some(TowerBFTStructures { heaviest_subtree_fork_choice, - epoch_slots_frozen_slots, - ); - check_slot_agrees_with_cluster( - bank.slot(), - bank_forks.read().unwrap().root(), - blockstore, duplicate_slots_tracker, + duplicate_confirmed_slots, epoch_slots_frozen_slots, - heaviest_subtree_fork_choice, - duplicate_slots_to_repair, - ancestor_hashes_replay_update_sender, - purge_repair_slot_counter, - SlotStateUpdate::BankFrozen(bank_frozen_state), - ); - // If we previously marked this slot as duplicate in blockstore, let the state machine know - if !duplicate_slots_tracker.contains(&bank.slot()) - && blockstore.get_duplicate_slot(bank.slot()).is_some() + .. + }) = &mut tbft_structs { - let duplicate_state = DuplicateState::new_from_state( + // Needs to be updated before `check_slot_agrees_with_cluster()` so that + // any updates in `check_slot_agrees_with_cluster()` on fork choice take + // effect + heaviest_subtree_fork_choice.add_new_leaf_slot( + (bank.slot(), bank.hash()), + Some((bank.parent_slot(), bank.parent_hash())), + ); + heaviest_subtree_fork_choice.maybe_print_state(); + let bank_frozen_state = BankFrozenState::new_from_state( bank.slot(), + bank.hash(), + duplicate_slots_tracker, duplicate_confirmed_slots, heaviest_subtree_fork_choice, - || false, - || Some(bank.hash()), + epoch_slots_frozen_slots, ); check_slot_agrees_with_cluster( bank.slot(), @@ -3279,8 +3378,32 @@ impl ReplayStage { duplicate_slots_to_repair, ancestor_hashes_replay_update_sender, purge_repair_slot_counter, - SlotStateUpdate::Duplicate(duplicate_state), + SlotStateUpdate::BankFrozen(bank_frozen_state), ); + // If we previously marked this slot as duplicate in blockstore, let the state machine know + if !duplicate_slots_tracker.contains(&bank.slot()) + && blockstore.get_duplicate_slot(bank.slot()).is_some() + { + let duplicate_state = DuplicateState::new_from_state( + bank.slot(), + duplicate_confirmed_slots, + heaviest_subtree_fork_choice, + || false, + || Some(bank.hash()), + ); + check_slot_agrees_with_cluster( + bank.slot(), + bank_forks.read().unwrap().root(), + blockstore, + duplicate_slots_tracker, + epoch_slots_frozen_slots, + heaviest_subtree_fork_choice, + duplicate_slots_to_repair, + ancestor_hashes_replay_update_sender, + purge_repair_slot_counter, + SlotStateUpdate::Duplicate(duplicate_state), + ); + } } if let Some(sender) = bank_notification_sender { let dependency_work = sender @@ -3297,9 +3420,10 @@ impl ReplayStage { } let bank_hash = bank.hash(); - if let Some(new_frozen_voters) = - unfrozen_gossip_verified_vote_hashes.remove_slot_hash(bank.slot(), &bank_hash) - { + if let Some(new_frozen_voters) = tbft_structs.as_mut().and_then(|tbft| { + tbft.unfrozen_gossip_verified_vote_hashes + .remove_slot_hash(bank.slot(), &bank_hash) + }) { for pubkey in new_frozen_voters { latest_validator_votes_for_frozen_banks.check_add_vote( pubkey, @@ -3361,15 +3485,10 @@ impl ReplayStage { transaction_status_sender: Option<&TransactionStatusSender>, entry_notification_sender: Option<&EntryNotifierSender>, verify_recyclers: &VerifyRecyclers, - heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice, replay_vote_sender: &ReplayVoteSender, bank_notification_sender: &Option, rpc_subscriptions: Option<&RpcSubscriptions>, slot_status_notifier: &Option, - duplicate_slots_tracker: &mut DuplicateSlotsTracker, - duplicate_confirmed_slots: &DuplicateConfirmedSlots, - epoch_slots_frozen_slots: &mut EpochSlotsFrozenSlots, - unfrozen_gossip_verified_vote_hashes: &mut UnfrozenGossipVerifiedVoteHashes, latest_validator_votes_for_frozen_banks: &mut LatestValidatorVotesForFrozenBanks, cluster_slots_update_sender: &ClusterSlotsUpdateSender, cost_update_sender: &Sender, @@ -3382,6 +3501,10 @@ impl ReplayStage { replay_tx_thread_pool: &ThreadPool, prioritization_fee_cache: &PrioritizationFeeCache, purge_repair_slot_counter: &mut PurgeRepairSlotCounter, + poh_recorder: &RwLock, + first_alpenglow_slot: Option, + tbft_structs: Option<&mut TowerBFTStructures>, + is_alpenglow_migration_complete: &mut bool, ) -> bool /* completed a bank */ { let active_bank_slots = bank_forks.read().unwrap().active_bank_slots(); let num_active_banks = active_bank_slots.len(); @@ -3439,14 +3562,9 @@ impl ReplayStage { bank_forks, progress, transaction_status_sender, - heaviest_subtree_fork_choice, bank_notification_sender, rpc_subscriptions, slot_status_notifier, - duplicate_slots_tracker, - duplicate_confirmed_slots, - epoch_slots_frozen_slots, - unfrozen_gossip_verified_vote_hashes, latest_validator_votes_for_frozen_banks, cluster_slots_update_sender, cost_update_sender, @@ -3456,6 +3574,10 @@ impl ReplayStage { &replay_result_vec, purge_repair_slot_counter, my_pubkey, + first_alpenglow_slot, + poh_recorder, + is_alpenglow_migration_complete, + tbft_structs, ) } @@ -3981,8 +4103,12 @@ impl ReplayStage { } #[allow(clippy::too_many_arguments)] + /// A wrapper around `root_utils::check_and_handle_new_root` which: + /// - calls into `root_utils::set_bank_forks_root` + /// - Executes `set_progress_and_tower_bft_root` to cleanup tower bft structs and the progress map fn check_and_handle_new_root( - vote_bank: &Bank, + my_pubkey: &Pubkey, + parent_slot: Slot, new_root: Slot, bank_forks: &RwLock, progress: &mut ProgressMap, @@ -3990,122 +4116,49 @@ impl ReplayStage { leader_schedule_cache: &Arc, snapshot_controller: Option<&SnapshotController>, rpc_subscriptions: Option<&RpcSubscriptions>, - block_commitment_cache: &Arc>, - heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice, + highest_super_majority_root: Option, bank_notification_sender: &Option, - duplicate_slots_tracker: &mut DuplicateSlotsTracker, - duplicate_confirmed_slots: &mut DuplicateConfirmedSlots, - unfrozen_gossip_verified_vote_hashes: &mut UnfrozenGossipVerifiedVoteHashes, has_new_vote_been_rooted: &mut bool, tracked_vote_transactions: &mut Vec, - epoch_slots_frozen_slots: &mut EpochSlotsFrozenSlots, drop_bank_sender: &Sender>, + tbft_structs: &mut TowerBFTStructures, ) -> Result<(), SetRootError> { - // get the root bank before squash - let root_bank = bank_forks - .read() - .unwrap() - .get(new_root) - .expect("Root bank doesn't exist"); - let mut rooted_banks = root_bank.parents(); - let oldest_parent = rooted_banks.last().map(|last| last.parent_slot()); - rooted_banks.push(root_bank.clone()); - let rooted_slots: Vec<_> = rooted_banks.iter().map(|bank| bank.slot()).collect(); - // The following differs from rooted_slots by including the parent slot of the oldest parent bank. - let rooted_slots_with_parents = bank_notification_sender - .as_ref() - .is_some_and(|sender| sender.should_send_parents) - .then(|| { - let mut new_chain = rooted_slots.clone(); - new_chain.push(oldest_parent.unwrap_or_else(|| vote_bank.parent_slot())); - new_chain - }); - - // Call leader schedule_cache.set_root() before blockstore.set_root() because - // bank_forks.root is consumed by repair_service to update gossip, so we don't want to - // get shreds for repair on gossip before we update leader schedule, otherwise they may - // get dropped. - leader_schedule_cache.set_root(rooted_banks.last().unwrap()); - blockstore - .set_roots(rooted_slots.iter()) - .expect("Ledger set roots failed"); - let highest_super_majority_root = Some( - block_commitment_cache - .read() - .unwrap() - .highest_super_majority_root(), - ); - Self::handle_new_root( + root_utils::check_and_handle_new_root( + parent_slot, new_root, - bank_forks, - progress, snapshot_controller, highest_super_majority_root, - heaviest_subtree_fork_choice, - duplicate_slots_tracker, - duplicate_confirmed_slots, - unfrozen_gossip_verified_vote_hashes, - has_new_vote_been_rooted, - tracked_vote_transactions, - epoch_slots_frozen_slots, + bank_notification_sender, drop_bank_sender, - )?; - blockstore.slots_stats.mark_rooted(new_root); - if let Some(rpc_subscriptions) = rpc_subscriptions { - rpc_subscriptions.notify_roots(rooted_slots); - } - if let Some(sender) = bank_notification_sender { - let dependency_work = sender - .dependency_tracker - .as_ref() - .map(|s| s.get_current_declared_work()); - sender - .sender - .send((BankNotification::NewRootBank(root_bank), dependency_work)) - .unwrap_or_else(|err| warn!("bank_notification_sender failed: {err:?}")); - - if let Some(new_chain) = rooted_slots_with_parents { - sender - .sender - .send((BankNotification::NewRootedChain(new_chain), dependency_work)) - .unwrap_or_else(|err| warn!("bank_notification_sender failed: {err:?}")); - } - } - info!("new root {new_root}"); - Ok(()) + blockstore, + leader_schedule_cache, + bank_forks, + rpc_subscriptions, + my_pubkey, + move |bank_forks| { + Self::set_progress_and_tower_bft_root( + new_root, + bank_forks, + progress, + has_new_vote_been_rooted, + tracked_vote_transactions, + tbft_structs, + ) + }, + ) } - #[allow(clippy::too_many_arguments)] - pub fn handle_new_root( + // To avoid code duplication and keep compatibility with alpenglow, we add this + // extra callback in the rooting path. This happens immediately after setting the bank forks root + fn set_progress_and_tower_bft_root( new_root: Slot, - bank_forks: &RwLock, + bank_forks: &BankForks, progress: &mut ProgressMap, - snapshot_controller: Option<&SnapshotController>, - highest_super_majority_root: Option, - heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice, - duplicate_slots_tracker: &mut DuplicateSlotsTracker, - duplicate_confirmed_slots: &mut DuplicateConfirmedSlots, - unfrozen_gossip_verified_vote_hashes: &mut UnfrozenGossipVerifiedVoteHashes, has_new_vote_been_rooted: &mut bool, tracked_vote_transactions: &mut Vec, - epoch_slots_frozen_slots: &mut EpochSlotsFrozenSlots, - drop_bank_sender: &Sender>, - ) -> Result<(), SetRootError> { - bank_forks.read().unwrap().prune_program_cache(new_root); - let removed_banks = bank_forks.write().unwrap().set_root( - new_root, - snapshot_controller, - highest_super_majority_root, - )?; - - drop_bank_sender - .send(removed_banks) - .unwrap_or_else(|err| warn!("bank drop failed: {err:?}")); - - // Dropping the bank_forks write lock and reacquiring as a read lock is - // safe because updates to bank_forks are only made by a single thread. - let r_bank_forks = bank_forks.read().unwrap(); - let new_root_bank = &r_bank_forks[new_root]; + tbft_structs: &mut TowerBFTStructures, + ) { + let new_root_bank = &bank_forks[new_root]; if !*has_new_vote_been_rooted { for TrackedVoteTransaction { message_hash, @@ -4124,8 +4177,17 @@ impl ReplayStage { std::mem::take(tracked_vote_transactions); } } - progress.handle_new_root(&r_bank_forks); - heaviest_subtree_fork_choice.set_tree_root((new_root, r_bank_forks.root_bank().hash())); + + progress.handle_new_root(bank_forks); + let TowerBFTStructures { + heaviest_subtree_fork_choice, + duplicate_slots_tracker, + duplicate_confirmed_slots, + unfrozen_gossip_verified_vote_hashes, + epoch_slots_frozen_slots, + .. + } = tbft_structs; + heaviest_subtree_fork_choice.set_tree_root((new_root, bank_forks.root_bank().hash())); *duplicate_slots_tracker = duplicate_slots_tracker.split_off(&new_root); // duplicate_slots_tracker now only contains entries >= `new_root` @@ -4134,8 +4196,39 @@ impl ReplayStage { unfrozen_gossip_verified_vote_hashes.set_root(new_root); *epoch_slots_frozen_slots = epoch_slots_frozen_slots.split_off(&new_root); + // epoch_slots_frozen_slots now only contains entries >= `new_root` + } + + #[allow(clippy::too_many_arguments)] + pub fn handle_new_root( + new_root: Slot, + bank_forks: &RwLock, + progress: &mut ProgressMap, + snapshot_controller: Option<&SnapshotController>, + highest_super_majority_root: Option, + has_new_vote_been_rooted: &mut bool, + tracked_vote_transactions: &mut Vec, + drop_bank_sender: &Sender>, + tbft_structs: &mut TowerBFTStructures, + ) -> Result<(), SetRootError> { + root_utils::set_bank_forks_root( + new_root, + bank_forks, + snapshot_controller, + highest_super_majority_root, + drop_bank_sender, + move |bank_forks| { + Self::set_progress_and_tower_bft_root( + new_root, + bank_forks, + progress, + has_new_vote_been_rooted, + tracked_vote_transactions, + tbft_structs, + ) + }, + )?; Ok(()) - // epoch_slots_frozen_slots now only contains entries >= `new_root` } fn generate_new_bank_forks( @@ -4202,6 +4295,7 @@ impl ReplayStage { slot_status_notifier, NewBankOptions::default(), ); + blockstore_processor::set_alpenglow_ticks(&child_bank); let empty: Vec = vec![]; Self::update_fork_propagated_threshold_from_votes( progress, @@ -4218,9 +4312,18 @@ impl ReplayStage { let mut generate_new_bank_forks_write_lock = Measure::start("generate_new_bank_forks_write_lock"); - let mut forks = bank_forks.write().unwrap(); - for (_, bank) in new_banks { - forks.insert(bank); + if !new_banks.is_empty() { + let mut forks = bank_forks.write().unwrap(); + let root = forks.root(); + for (slot, bank) in new_banks { + if slot < root { + continue; + } + if forks.get(bank.parent_slot()).is_none() { + continue; + } + forks.insert(bank); + } } generate_new_bank_forks_write_lock.stop(); replay_timing.generate_new_bank_forks_read_lock_us += @@ -4354,6 +4457,7 @@ pub(crate) mod tests { get_tmp_ledger_path, get_tmp_ledger_path_auto_delete, shred::{ProcessShredsStats, ReedSolomonCache, Shred, Shredder}, }, + solana_poh::poh_recorder::create_test_recorder, solana_poh_config::PohConfig, solana_rpc::{ optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank, @@ -4419,7 +4523,8 @@ pub(crate) mod tests { pub(crate) my_pubkey: Pubkey, cluster_info: ClusterInfo, pub(crate) leader_schedule_cache: Arc, - poh_recorder: RwLock, + poh_recorder: Arc>, + poh_controller: PohController, tower: Tower, rpc_subscriptions: Arc, pub vote_simulator: VoteSimulator, @@ -4468,22 +4573,14 @@ pub(crate) mod tests { let root_bank = bank_forks.read().unwrap().root_bank(); let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&root_bank)); - // PohRecorder let working_bank = bank_forks.read().unwrap().working_bank(); - let poh_recorder = RwLock::new( - PohRecorder::new( - working_bank.tick_height(), - working_bank.last_blockhash(), - working_bank.clone(), - None, - working_bank.ticks_per_slot(), + let (_exit, poh_recorder, poh_controller, _transaction_recorder, _poh_service, _) = + create_test_recorder( + working_bank, blockstore.clone(), - &leader_schedule_cache, - &PohConfig::default(), - Arc::new(AtomicBool::new(false)), - ) - .0, - ); + Some(PohConfig::default()), + Some(leader_schedule_cache.clone()), + ); // Tower let my_vote_pubkey = my_keypairs.vote_keypair.pubkey(); @@ -4513,6 +4610,7 @@ pub(crate) mod tests { cluster_info, leader_schedule_cache, poh_recorder, + poh_controller, tower, rpc_subscriptions, vote_simulator, @@ -4655,45 +4753,48 @@ pub(crate) mod tests { let root_hash = root_bank.hash(); bank_forks.write().unwrap().insert(root_bank); - let mut heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new((root, root_hash)); + let heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new((root, root_hash)); let mut progress = ProgressMap::default(); for i in 0..=root { progress.insert(i, ForkProgress::new(Hash::default(), None, None, 0, 0)); } - let mut duplicate_slots_tracker: DuplicateSlotsTracker = + let duplicate_slots_tracker: DuplicateSlotsTracker = vec![root - 1, root, root + 1].into_iter().collect(); - let mut duplicate_confirmed_slots: DuplicateConfirmedSlots = vec![root - 1, root, root + 1] + let duplicate_confirmed_slots: DuplicateConfirmedSlots = vec![root - 1, root, root + 1] .into_iter() .map(|s| (s, Hash::default())) .collect(); - let mut unfrozen_gossip_verified_vote_hashes: UnfrozenGossipVerifiedVoteHashes = + let unfrozen_gossip_verified_vote_hashes: UnfrozenGossipVerifiedVoteHashes = UnfrozenGossipVerifiedVoteHashes { votes_per_slot: vec![root - 1, root, root + 1] .into_iter() .map(|s| (s, HashMap::new())) .collect(), }; - let mut epoch_slots_frozen_slots: EpochSlotsFrozenSlots = vec![root - 1, root, root + 1] + let epoch_slots_frozen_slots: EpochSlotsFrozenSlots = vec![root - 1, root, root + 1] .into_iter() .map(|slot| (slot, Hash::default())) .collect(); let (drop_bank_sender, _drop_bank_receiver) = unbounded(); + let mut tbft_structs = TowerBFTStructures { + heaviest_subtree_fork_choice, + duplicate_slots_tracker, + duplicate_confirmed_slots, + unfrozen_gossip_verified_vote_hashes, + epoch_slots_frozen_slots, + }; ReplayStage::handle_new_root( root, &bank_forks, &mut progress, None, // snapshot_controller None, - &mut heaviest_subtree_fork_choice, - &mut duplicate_slots_tracker, - &mut duplicate_confirmed_slots, - &mut unfrozen_gossip_verified_vote_hashes, &mut true, &mut Vec::new(), - &mut epoch_slots_frozen_slots, &drop_bank_sender, + &mut tbft_structs, ) .unwrap(); assert_eq!(bank_forks.read().unwrap().root(), root); @@ -4701,18 +4802,23 @@ pub(crate) mod tests { assert!(progress.get(&root).is_some()); // root - 1 is filtered out assert_eq!( - duplicate_slots_tracker.into_iter().collect::>(), + tbft_structs + .duplicate_slots_tracker + .into_iter() + .collect::>(), vec![root, root + 1] ); assert_eq!( - duplicate_confirmed_slots + tbft_structs + .duplicate_confirmed_slots .keys() .cloned() .collect::>(), vec![root, root + 1] ); assert_eq!( - unfrozen_gossip_verified_vote_hashes + tbft_structs + .unfrozen_gossip_verified_vote_hashes .votes_per_slot .keys() .cloned() @@ -4720,7 +4826,10 @@ pub(crate) mod tests { vec![root, root + 1] ); assert_eq!( - epoch_slots_frozen_slots.into_keys().collect::>(), + tbft_structs + .epoch_slots_frozen_slots + .into_keys() + .collect::>(), vec![root, root + 1] ); } @@ -4753,7 +4862,7 @@ pub(crate) mod tests { root_bank.freeze(); let root_hash = root_bank.hash(); bank_forks.write().unwrap().insert(root_bank); - let mut heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new((root, root_hash)); + let heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new((root, root_hash)); let mut progress = ProgressMap::default(); for i in 0..=root { progress.insert(i, ForkProgress::new(Hash::default(), None, None, 0, 0)); @@ -4765,14 +4874,16 @@ pub(crate) mod tests { &mut progress, None, // snapshot_controller Some(confirmed_root), - &mut heaviest_subtree_fork_choice, - &mut DuplicateSlotsTracker::default(), - &mut DuplicateConfirmedSlots::default(), - &mut UnfrozenGossipVerifiedVoteHashes::default(), &mut true, &mut Vec::new(), - &mut EpochSlotsFrozenSlots::default(), &drop_bank_sender, + &mut TowerBFTStructures { + heaviest_subtree_fork_choice, + duplicate_slots_tracker: DuplicateSlotsTracker::default(), + duplicate_confirmed_slots: DuplicateConfirmedSlots::default(), + unfrozen_gossip_verified_vote_hashes: UnfrozenGossipVerifiedVoteHashes::default(), + epoch_slots_frozen_slots: EpochSlotsFrozenSlots::default(), + }, ) .unwrap(); assert_eq!(bank_forks.read().unwrap().root(), root); @@ -4995,7 +5106,7 @@ pub(crate) mod tests { &keypair, &gibberish, true, - Some(Hash::default()), + Hash::default(), 0, 0, &reed_solomon_cache, @@ -5058,7 +5169,7 @@ pub(crate) mod tests { let VoteSimulator { mut progress, bank_forks, - mut heaviest_subtree_fork_choice, + mut tbft_structs, validator_keypairs, .. } = vote_simulator; @@ -5123,14 +5234,11 @@ pub(crate) mod tests { err, rpc_subscriptions.as_deref(), &slot_status_notifier, - &mut DuplicateSlotsTracker::default(), - &DuplicateConfirmedSlots::new(), - &mut EpochSlotsFrozenSlots::default(), &mut progress, - &mut heaviest_subtree_fork_choice, &mut DuplicateSlotsToRepair::default(), &ancestor_hashes_replay_update_sender, &mut PurgeRepairSlotCounter::default(), + &mut Some(&mut tbft_structs), ); } assert!(dead_slots.lock().unwrap().contains(&bank1.slot())); @@ -5154,7 +5262,7 @@ pub(crate) mod tests { let mut leader_vote_account = bank.get_account(pubkey).unwrap(); let mut vote_state = vote_state::from(&leader_vote_account).unwrap(); vote_state::process_slot_vote_unchecked(&mut vote_state, vote_slot); - let versioned = VoteStateVersions::new_current(vote_state.clone()); + let versioned = VoteStateVersions::new_v3(vote_state.clone()); vote_state::to(&versioned, &mut leader_vote_account).unwrap(); bank.store_account(pubkey, &leader_vote_account); (*pubkey, TowerVoteState::from(vote_state)) @@ -5284,7 +5392,7 @@ pub(crate) mod tests { mut genesis_config, mint_keypair, .. - } = create_genesis_config(solana_native_token::sol_to_lamports(1000.0)); + } = create_genesis_config(solana_native_token::LAMPORTS_PER_SOL * 1000); genesis_config.rent.lamports_per_byte_year = 50; genesis_config.rent.exemption_threshold = 2.0; let (ledger_path, _) = create_new_tmp_ledger!(&genesis_config); @@ -5388,7 +5496,7 @@ pub(crate) mod tests { &mut tower, &mut progress, &VoteTracker::default(), - &ClusterSlots::default(), + &ClusterSlots::default_for_tests(), &bank_forks, &mut heaviest_subtree_fork_choice, &mut latest_validator_votes_for_frozen_banks, @@ -5439,7 +5547,7 @@ pub(crate) mod tests { &mut tower, &mut progress, &VoteTracker::default(), - &ClusterSlots::default(), + &ClusterSlots::default_for_tests(), &bank_forks, &mut heaviest_subtree_fork_choice, &mut latest_validator_votes_for_frozen_banks, @@ -5474,7 +5582,7 @@ pub(crate) mod tests { &mut tower, &mut progress, &VoteTracker::default(), - &ClusterSlots::default(), + &ClusterSlots::default_for_tests(), &bank_forks, &mut heaviest_subtree_fork_choice, &mut latest_validator_votes_for_frozen_banks, @@ -5499,7 +5607,8 @@ pub(crate) mod tests { .frozen_banks() .map(|(_slot, bank)| bank) .collect(); - let heaviest_subtree_fork_choice = &mut vote_simulator.heaviest_subtree_fork_choice; + let heaviest_subtree_fork_choice = + &mut vote_simulator.tbft_structs.heaviest_subtree_fork_choice; let mut latest_validator_votes_for_frozen_banks = LatestValidatorVotesForFrozenBanks::default(); let ancestors = vote_simulator.bank_forks.read().unwrap().ancestors(); @@ -5512,7 +5621,7 @@ pub(crate) mod tests { &mut tower, &mut vote_simulator.progress, &VoteTracker::default(), - &ClusterSlots::default(), + &ClusterSlots::default_for_tests(), &vote_simulator.bank_forks, heaviest_subtree_fork_choice, &mut latest_validator_votes_for_frozen_banks, @@ -5580,9 +5689,9 @@ pub(crate) mod tests { &mut tower, &mut vote_simulator.progress, &VoteTracker::default(), - &ClusterSlots::default(), + &ClusterSlots::default_for_tests(), &vote_simulator.bank_forks, - &mut vote_simulator.heaviest_subtree_fork_choice, + &mut vote_simulator.tbft_structs.heaviest_subtree_fork_choice, &mut vote_simulator.latest_validator_votes_for_frozen_banks, ); @@ -5604,6 +5713,7 @@ pub(crate) mod tests { // The only leaf should always be chosen over parents assert_eq!( vote_simulator + .tbft_structs .heaviest_subtree_fork_choice .best_slot(&(bank.slot(), bank.hash())) .unwrap() @@ -5870,7 +5980,7 @@ pub(crate) mod tests { 10, &bank_forks_arc, &vote_tracker, - &ClusterSlots::default(), + &ClusterSlots::default_for_tests(), ); let propagated_stats = &progress_map.get(&10).unwrap().propagated_stats; @@ -5964,7 +6074,7 @@ pub(crate) mod tests { 10, &bank_forks_arc, &vote_tracker, - &ClusterSlots::default(), + &ClusterSlots::default_for_tests(), ); for i in 1..=10 { @@ -6051,7 +6161,7 @@ pub(crate) mod tests { 10, &bank_forks_arc, &vote_tracker, - &ClusterSlots::default(), + &ClusterSlots::default_for_tests(), ); // Only the first 5 banks should have reached the threshold @@ -6704,7 +6814,7 @@ pub(crate) mod tests { &mut tower, &mut progress, &vote_tracker, - &ClusterSlots::default(), + &ClusterSlots::default_for_tests(), &bank_forks, &mut HeaviestSubtreeForkChoice::new_from_bank_forks(bank_forks.clone()), &mut LatestValidatorVotesForFrozenBanks::default(), @@ -6752,7 +6862,7 @@ pub(crate) mod tests { &bank_forks, &mut progress, &mut tower, - &mut vote_simulator.heaviest_subtree_fork_choice, + &mut vote_simulator.tbft_structs.heaviest_subtree_fork_choice, &mut vote_simulator.latest_validator_votes_for_frozen_banks, None, ); @@ -6768,7 +6878,7 @@ pub(crate) mod tests { &bank_forks, &mut progress, &mut tower, - &mut vote_simulator.heaviest_subtree_fork_choice, + &mut vote_simulator.tbft_structs.heaviest_subtree_fork_choice, &mut vote_simulator.latest_validator_votes_for_frozen_banks, None, ); @@ -6786,7 +6896,7 @@ pub(crate) mod tests { let duplicate_state = DuplicateState::new_from_state( 5, &duplicate_confirmed_slots, - &vote_simulator.heaviest_subtree_fork_choice, + &vote_simulator.tbft_structs.heaviest_subtree_fork_choice, || progress.is_dead(5).unwrap_or(false), || Some(bank5_hash), ); @@ -6798,7 +6908,7 @@ pub(crate) mod tests { &blockstore, &mut duplicate_slots_tracker, &mut epoch_slots_frozen_slots, - &mut vote_simulator.heaviest_subtree_fork_choice, + &mut vote_simulator.tbft_structs.heaviest_subtree_fork_choice, &mut DuplicateSlotsToRepair::default(), &ancestor_hashes_replay_update_sender, &mut purge_repair_slot_counter, @@ -6812,7 +6922,7 @@ pub(crate) mod tests { &bank_forks, &mut progress, &mut tower, - &mut vote_simulator.heaviest_subtree_fork_choice, + &mut vote_simulator.tbft_structs.heaviest_subtree_fork_choice, &mut vote_simulator.latest_validator_votes_for_frozen_banks, None, ); @@ -6839,7 +6949,7 @@ pub(crate) mod tests { &bank_forks, &mut progress, &mut tower, - &mut vote_simulator.heaviest_subtree_fork_choice, + &mut vote_simulator.tbft_structs.heaviest_subtree_fork_choice, &mut vote_simulator.latest_validator_votes_for_frozen_banks, None, ); @@ -6867,7 +6977,7 @@ pub(crate) mod tests { &blockstore, &mut duplicate_slots_tracker, &mut epoch_slots_frozen_slots, - &mut vote_simulator.heaviest_subtree_fork_choice, + &mut vote_simulator.tbft_structs.heaviest_subtree_fork_choice, &mut duplicate_slots_to_repair, &ancestor_hashes_replay_update_sender, &mut purge_repair_slot_counter, @@ -6886,7 +6996,7 @@ pub(crate) mod tests { &bank_forks, &mut progress, &mut tower, - &mut vote_simulator.heaviest_subtree_fork_choice, + &mut vote_simulator.tbft_structs.heaviest_subtree_fork_choice, &mut vote_simulator.latest_validator_votes_for_frozen_banks, None, ); @@ -6904,6 +7014,7 @@ pub(crate) mod tests { // last vote which was previously marked as invalid and now duplicate confirmed let bank6_hash = bank_forks.read().unwrap().bank_hash(6).unwrap(); let _ = vote_simulator + .tbft_structs .heaviest_subtree_fork_choice .split_off(&(6, bank6_hash)); // Should now pick 5 as the heaviest fork from last vote again. @@ -6911,7 +7022,7 @@ pub(crate) mod tests { &bank_forks, &mut progress, &mut tower, - &mut vote_simulator.heaviest_subtree_fork_choice, + &mut vote_simulator.tbft_structs.heaviest_subtree_fork_choice, &mut vote_simulator.latest_validator_votes_for_frozen_banks, None, ); @@ -6970,7 +7081,7 @@ pub(crate) mod tests { &bank_forks, &mut progress, &mut tower, - &mut vote_simulator.heaviest_subtree_fork_choice, + &mut vote_simulator.tbft_structs.heaviest_subtree_fork_choice, &mut vote_simulator.latest_validator_votes_for_frozen_banks, None, ); @@ -6991,7 +7102,7 @@ pub(crate) mod tests { let duplicate_state = DuplicateState::new_from_state( 4, &duplicate_confirmed_slots, - &vote_simulator.heaviest_subtree_fork_choice, + &vote_simulator.tbft_structs.heaviest_subtree_fork_choice, || progress.is_dead(4).unwrap_or(false), || Some(bank4_hash), ); @@ -7003,7 +7114,7 @@ pub(crate) mod tests { &blockstore, &mut duplicate_slots_tracker, &mut epoch_slots_frozen_slots, - &mut vote_simulator.heaviest_subtree_fork_choice, + &mut vote_simulator.tbft_structs.heaviest_subtree_fork_choice, &mut DuplicateSlotsToRepair::default(), &ancestor_hashes_replay_update_sender, &mut PurgeRepairSlotCounter::default(), @@ -7014,7 +7125,7 @@ pub(crate) mod tests { &bank_forks, &mut progress, &mut tower, - &mut vote_simulator.heaviest_subtree_fork_choice, + &mut vote_simulator.tbft_structs.heaviest_subtree_fork_choice, &mut vote_simulator.latest_validator_votes_for_frozen_banks, None, ); @@ -7028,7 +7139,7 @@ pub(crate) mod tests { let duplicate_state = DuplicateState::new_from_state( 2, &duplicate_confirmed_slots, - &vote_simulator.heaviest_subtree_fork_choice, + &vote_simulator.tbft_structs.heaviest_subtree_fork_choice, || progress.is_dead(2).unwrap_or(false), || Some(bank2_hash), ); @@ -7038,7 +7149,7 @@ pub(crate) mod tests { &blockstore, &mut duplicate_slots_tracker, &mut epoch_slots_frozen_slots, - &mut vote_simulator.heaviest_subtree_fork_choice, + &mut vote_simulator.tbft_structs.heaviest_subtree_fork_choice, &mut DuplicateSlotsToRepair::default(), &ancestor_hashes_replay_update_sender, &mut PurgeRepairSlotCounter::default(), @@ -7049,7 +7160,7 @@ pub(crate) mod tests { &bank_forks, &mut progress, &mut tower, - &mut vote_simulator.heaviest_subtree_fork_choice, + &mut vote_simulator.tbft_structs.heaviest_subtree_fork_choice, &mut vote_simulator.latest_validator_votes_for_frozen_banks, None, ); @@ -7074,7 +7185,7 @@ pub(crate) mod tests { &blockstore, &mut duplicate_slots_tracker, &mut epoch_slots_frozen_slots, - &mut vote_simulator.heaviest_subtree_fork_choice, + &mut vote_simulator.tbft_structs.heaviest_subtree_fork_choice, &mut duplicate_slots_to_repair, &ancestor_hashes_replay_update_sender, &mut PurgeRepairSlotCounter::default(), @@ -7089,7 +7200,7 @@ pub(crate) mod tests { &bank_forks, &mut progress, &mut tower, - &mut vote_simulator.heaviest_subtree_fork_choice, + &mut vote_simulator.tbft_structs.heaviest_subtree_fork_choice, &mut vote_simulator.latest_validator_votes_for_frozen_banks, None, ); @@ -7208,7 +7319,7 @@ pub(crate) mod tests { let VoteSimulator { ref mut progress, ref bank_forks, - ref mut heaviest_subtree_fork_choice, + ref mut tbft_structs, .. } = vote_simulator; @@ -7238,7 +7349,7 @@ pub(crate) mod tests { blockstore, &mut duplicate_slots_tracker, &mut epoch_slots_frozen_slots, - heaviest_subtree_fork_choice, + &mut tbft_structs.heaviest_subtree_fork_choice, &mut duplicate_slots_to_repair, &ancestor_hashes_replay_update_sender, &mut PurgeRepairSlotCounter::default(), @@ -7296,7 +7407,7 @@ pub(crate) mod tests { let VoteSimulator { mut progress, bank_forks, - mut heaviest_subtree_fork_choice, + mut tbft_structs, mut latest_validator_votes_for_frozen_banks, .. } = vote_simulator; @@ -7318,14 +7429,15 @@ pub(crate) mod tests { &mut tower, &mut progress, &VoteTracker::default(), - &ClusterSlots::default(), + &ClusterSlots::default_for_tests(), &bank_forks, - &mut heaviest_subtree_fork_choice, + &mut tbft_structs.heaviest_subtree_fork_choice, &mut latest_validator_votes_for_frozen_banks, ); // Try to switch to vote to the heaviest slot 6, then return the vote results - let (heaviest_bank, heaviest_bank_on_same_fork) = heaviest_subtree_fork_choice + let (heaviest_bank, heaviest_bank_on_same_fork) = tbft_structs + .heaviest_subtree_fork_choice .select_forks(&frozen_banks, &tower, &progress, &ancestors, &bank_forks); assert_eq!(heaviest_bank.slot(), 7); assert!(heaviest_bank_on_same_fork.is_none()); @@ -7337,7 +7449,7 @@ pub(crate) mod tests { &progress, &mut tower, &latest_validator_votes_for_frozen_banks, - &heaviest_subtree_fork_choice, + &tbft_structs.heaviest_subtree_fork_choice, ) } @@ -7411,14 +7523,14 @@ pub(crate) mod tests { let VoteSimulator { mut progress, bank_forks, - mut heaviest_subtree_fork_choice, + mut tbft_structs, mut latest_validator_votes_for_frozen_banks, .. } = vote_simulator; // Check that the new branch with slot 2 is different than the original version. let bank_1_hash = bank_forks.read().unwrap().bank_hash(1).unwrap(); - let children_of_1 = (&heaviest_subtree_fork_choice) + let children_of_1 = (&tbft_structs.heaviest_subtree_fork_choice) .children(&(1, bank_1_hash)) .unwrap(); let duplicate_versions_of_2 = children_of_1.filter(|(slot, _hash)| *slot == 2).count(); @@ -7441,13 +7553,14 @@ pub(crate) mod tests { &mut tower, &mut progress, &VoteTracker::default(), - &ClusterSlots::default(), + &ClusterSlots::default_for_tests(), &bank_forks, - &mut heaviest_subtree_fork_choice, + &mut tbft_structs.heaviest_subtree_fork_choice, &mut latest_validator_votes_for_frozen_banks, ); // Try to switch to vote to the heaviest slot 5, then return the vote results - let (heaviest_bank, heaviest_bank_on_same_fork) = heaviest_subtree_fork_choice + let (heaviest_bank, heaviest_bank_on_same_fork) = tbft_structs + .heaviest_subtree_fork_choice .select_forks(&frozen_banks, &tower, &progress, &ancestors, &bank_forks); assert_eq!(heaviest_bank.slot(), 5); assert!(heaviest_bank_on_same_fork.is_none()); @@ -7459,7 +7572,7 @@ pub(crate) mod tests { &progress, &mut tower, &latest_validator_votes_for_frozen_banks, - &heaviest_subtree_fork_choice, + &tbft_structs.heaviest_subtree_fork_choice, ) } @@ -7524,7 +7637,7 @@ pub(crate) mod tests { let ( VoteSimulator { bank_forks, - mut heaviest_subtree_fork_choice, + mut tbft_structs, mut latest_validator_votes_for_frozen_banks, vote_pubkeys, .. @@ -7537,7 +7650,13 @@ pub(crate) mod tests { let (gossip_verified_vote_hash_sender, gossip_verified_vote_hash_receiver) = unbounded(); // Best slot is 4 - assert_eq!(heaviest_subtree_fork_choice.best_overall_slot().0, 4); + assert_eq!( + tbft_structs + .heaviest_subtree_fork_choice + .best_overall_slot() + .0, + 4 + ); // Cast a vote for slot 3 on one fork let vote_slot = 3; @@ -7548,19 +7667,27 @@ pub(crate) mod tests { ReplayStage::process_gossip_verified_vote_hashes( &gossip_verified_vote_hash_receiver, &mut unfrozen_gossip_verified_vote_hashes, - &heaviest_subtree_fork_choice, + &tbft_structs.heaviest_subtree_fork_choice, &mut latest_validator_votes_for_frozen_banks, ); // Pick the best fork. Gossip votes shouldn't affect fork choice - heaviest_subtree_fork_choice.compute_bank_stats( - &vote_bank, - &Tower::default(), - &mut latest_validator_votes_for_frozen_banks, - ); + tbft_structs + .heaviest_subtree_fork_choice + .compute_bank_stats( + &vote_bank, + &Tower::default(), + &mut latest_validator_votes_for_frozen_banks, + ); // Best slot is still 4 - assert_eq!(heaviest_subtree_fork_choice.best_overall_slot().0, 4); + assert_eq!( + tbft_structs + .heaviest_subtree_fork_choice + .best_overall_slot() + .0, + 4 + ); } #[test] @@ -7607,8 +7734,6 @@ pub(crate) mod tests { ), ); - bank0.set_initial_accounts_hash_verification_completed(); - let (voting_sender, voting_receiver) = unbounded(); // Simulate landing a vote for slot 0 landing in slot 1 @@ -7649,7 +7774,7 @@ pub(crate) mod tests { .unwrap(); let connection_cache = if DEFAULT_VOTE_USE_QUIC { - ConnectionCache::new_quic( + ConnectionCache::new_quic_for_tests( "connection_cache_vote_quic", DEFAULT_TPU_CONNECTION_POOL_SIZE, ) @@ -7754,7 +7879,7 @@ pub(crate) mod tests { .unwrap(); let connection_cache = if DEFAULT_VOTE_USE_QUIC { - ConnectionCache::new_quic( + ConnectionCache::new_quic_for_tests( "connection_cache_vote_quic", DEFAULT_TPU_CONNECTION_POOL_SIZE, ) @@ -7882,7 +8007,7 @@ pub(crate) mod tests { .recv_timeout(Duration::from_secs(1)) .unwrap(); let connection_cache = if DEFAULT_VOTE_USE_QUIC { - ConnectionCache::new_quic( + ConnectionCache::new_quic_for_tests( "connection_cache_vote_quic", DEFAULT_TPU_CONNECTION_POOL_SIZE, ) @@ -8024,7 +8149,7 @@ pub(crate) mod tests { .recv_timeout(Duration::from_secs(1)) .unwrap(); let connection_cache = if DEFAULT_VOTE_USE_QUIC { - ConnectionCache::new_quic( + ConnectionCache::new_quic_for_tests( "connection_cache_vote_quic", DEFAULT_TPU_CONNECTION_POOL_SIZE, ) @@ -8095,7 +8220,7 @@ pub(crate) mod tests { let VoteSimulator { mut validator_keypairs, bank_forks, - mut heaviest_subtree_fork_choice, + mut tbft_structs, mut latest_validator_votes_for_frozen_banks, mut progress, .. @@ -8111,8 +8236,6 @@ pub(crate) mod tests { let my_vote_pubkey = my_vote_keypair[0].pubkey(); let bank0 = bank_forks.read().unwrap().get(0).unwrap(); - bank0.set_initial_accounts_hash_verification_completed(); - // Add a new fork starting from 0 with bigger slot number, we assume it has a bigger // weight, but we cannot switch because of lockout. let other_fork_slot = 1; @@ -8214,9 +8337,9 @@ pub(crate) mod tests { &mut tower, &mut progress, &VoteTracker::default(), - &ClusterSlots::default(), + &ClusterSlots::default_for_tests(), &bank_forks, - &mut heaviest_subtree_fork_choice, + &mut tbft_structs.heaviest_subtree_fork_choice, &mut latest_validator_votes_for_frozen_banks, ); assert_eq!(tower.last_voted_slot(), Some(last_voted_slot)); @@ -8230,7 +8353,7 @@ pub(crate) mod tests { &progress, &mut tower, &latest_validator_votes_for_frozen_banks, - &heaviest_subtree_fork_choice, + &tbft_structs.heaviest_subtree_fork_choice, ); assert!(vote_bank.is_some()); assert_eq!(vote_bank.unwrap().0.slot(), tip_of_voted_fork); @@ -8246,7 +8369,7 @@ pub(crate) mod tests { &progress, &mut tower, &latest_validator_votes_for_frozen_banks, - &heaviest_subtree_fork_choice, + &tbft_structs.heaviest_subtree_fork_choice, ); assert!(vote_bank.is_none()); @@ -8261,7 +8384,7 @@ pub(crate) mod tests { &progress, &mut tower, &latest_validator_votes_for_frozen_banks, - &heaviest_subtree_fork_choice, + &tbft_structs.heaviest_subtree_fork_choice, ); assert!(vote_bank.is_none()); @@ -8278,7 +8401,7 @@ pub(crate) mod tests { &progress, &mut tower, &latest_validator_votes_for_frozen_banks, - &heaviest_subtree_fork_choice, + &tbft_structs.heaviest_subtree_fork_choice, ); assert!(vote_bank.is_none()); } @@ -8549,6 +8672,7 @@ pub(crate) mod tests { validator_node_to_vote_keys, leader_schedule_cache, poh_recorder, + mut poh_controller, vote_simulator, rpc_subscriptions, ref my_pubkey, @@ -8599,10 +8723,9 @@ pub(crate) mod tests { .expect("Just inserted"); progress.get_retransmit_info_mut(0).unwrap().retry_time = Instant::now(); - poh_recorder - .write() - .unwrap() - .reset(bank_to_dump, Some((slot_to_dump + 1, slot_to_dump + 1))); + poh_controller + .reset_sync(bank_to_dump, Some((slot_to_dump + 1, slot_to_dump + 1))) + .unwrap(); assert_eq!(poh_recorder.read().unwrap().start_slot(), slot_to_dump); // Now dump and repair slot_to_dump @@ -8648,10 +8771,11 @@ pub(crate) mod tests { let rpc_subscriptions = Some(rpc_subscriptions); - assert!(!ReplayStage::maybe_start_leader( + assert!(ReplayStage::maybe_start_leader( my_pubkey, bank_forks, &poh_recorder, + &mut poh_controller, &leader_schedule_cache, rpc_subscriptions.as_deref(), &None, @@ -8660,7 +8784,10 @@ pub(crate) mod tests { &mut SkippedSlotsInfo::default(), &banking_tracer, has_new_vote_been_rooted, - )); + &None, + &mut false, + ) + .is_none()); } #[test] @@ -8734,7 +8861,7 @@ pub(crate) mod tests { tower, progress, &VoteTracker::default(), - &ClusterSlots::default(), + &ClusterSlots::default_for_tests(), bank_forks, heaviest_subtree_fork_choice, latest_validator_votes_for_frozen_banks, @@ -8815,6 +8942,13 @@ pub(crate) mod tests { map1.len() == map2.len() && map1.iter().all(|(k, v)| map2.get(k).unwrap() == v) } + // test-helper to wait for poh service to pick up and handle poh controller messages + fn wait_for_poh_service(poh_controller: &PohController) { + while poh_controller.has_pending_message() { + std::hint::spin_loop(); + } + } + #[test] fn test_check_for_vote_only_mode() { let in_vote_only_mode = AtomicBool::new(false); @@ -8873,7 +9007,7 @@ pub(crate) mod tests { &bank_forks, &mut progress, &mut tower, - &mut vote_simulator.heaviest_subtree_fork_choice, + &mut vote_simulator.tbft_structs.heaviest_subtree_fork_choice, &mut vote_simulator.latest_validator_votes_for_frozen_banks, Some(my_vote_pubkey), ); @@ -8892,7 +9026,7 @@ pub(crate) mod tests { &bank_forks, &mut progress, &mut tower, - &mut vote_simulator.heaviest_subtree_fork_choice, + &mut vote_simulator.tbft_structs.heaviest_subtree_fork_choice, &mut vote_simulator.latest_validator_votes_for_frozen_banks, Some(my_vote_pubkey), ); @@ -8956,7 +9090,7 @@ pub(crate) mod tests { &bank_forks, &mut progress, &mut tower, - &mut vote_simulator.heaviest_subtree_fork_choice, + &mut vote_simulator.tbft_structs.heaviest_subtree_fork_choice, &mut vote_simulator.latest_validator_votes_for_frozen_banks, Some(my_vote_pubkey), ); @@ -8969,7 +9103,7 @@ pub(crate) mod tests { &bank_forks, &mut progress, &mut tower, - &mut vote_simulator.heaviest_subtree_fork_choice, + &mut vote_simulator.tbft_structs.heaviest_subtree_fork_choice, &mut vote_simulator.latest_validator_votes_for_frozen_banks, Some(my_vote_pubkey), ); @@ -9245,6 +9379,7 @@ pub(crate) mod tests { my_pubkey, leader_schedule_cache, poh_recorder, + mut poh_controller, vote_simulator, rpc_subscriptions, .. @@ -9258,8 +9393,6 @@ pub(crate) mod tests { let working_bank = bank_forks.read().unwrap().working_bank(); assert!(working_bank.is_complete()); assert!(working_bank.is_frozen()); - // Mark startup verification as complete to avoid skipping leader slots - working_bank.set_initial_accounts_hash_verification_completed(); // Insert a block two slots greater than current bank. This slot does // not have a corresponding Bank in BankForks; this emulates a scenario @@ -9277,9 +9410,10 @@ pub(crate) mod tests { &my_pubkey, &blockstore, working_bank.clone(), - &poh_recorder, + &mut poh_controller, &leader_schedule_cache, ); + wait_for_poh_service(&poh_controller); // Register just over one slot worth of ticks directly with PoH recorder let num_poh_ticks = @@ -9307,10 +9441,11 @@ pub(crate) mod tests { poh_recorder.read().unwrap().reached_leader_slot(&my_pubkey), PohLeaderStatus::NotReached ); - assert!(!ReplayStage::maybe_start_leader( + assert!(ReplayStage::maybe_start_leader( &my_pubkey, &bank_forks, &poh_recorder, + &mut poh_controller, &leader_schedule_cache, rpc_subscriptions.as_deref(), &None, @@ -9319,7 +9454,10 @@ pub(crate) mod tests { &mut SkippedSlotsInfo::default(), &banking_tracer, has_new_vote_been_rooted, - )); + &None, + &mut false, + ) + .is_none()); // Register another slots worth of ticks with PoH recorder poh_recorder @@ -9337,6 +9475,7 @@ pub(crate) mod tests { &my_pubkey, &bank_forks, &poh_recorder, + &mut poh_controller, &leader_schedule_cache, rpc_subscriptions.as_deref(), &None, @@ -9345,7 +9484,12 @@ pub(crate) mod tests { &mut SkippedSlotsInfo::default(), &banking_tracer, has_new_vote_been_rooted, - )); + &None, + &mut false, + ) + .is_some()); + wait_for_poh_service(&poh_controller); + // Get the new working bank, which is also the new leader bank/slot let working_bank = bank_forks.read().unwrap().working_bank(); // The new bank's slot must NOT be dummy_slot as the blockstore already @@ -9370,7 +9514,7 @@ pub(crate) mod tests { setup_forks_from_tree(tree, 3, Some(Box::new(generate_votes))); let VoteSimulator { bank_forks, - mut heaviest_subtree_fork_choice, + mut tbft_structs, mut progress, .. } = vote_simulator; @@ -9388,7 +9532,7 @@ pub(crate) mod tests { &bank_forks, &mut progress, &mut DuplicateSlotsTracker::default(), - &mut heaviest_subtree_fork_choice, + &mut tbft_structs.heaviest_subtree_fork_choice, &mut EpochSlotsFrozenSlots::default(), &mut DuplicateSlotsToRepair::default(), &ancestor_hashes_replay_update_sender, @@ -9408,7 +9552,7 @@ pub(crate) mod tests { &bank_forks, &mut progress, &mut DuplicateSlotsTracker::default(), - &mut heaviest_subtree_fork_choice, + &mut tbft_structs.heaviest_subtree_fork_choice, &mut EpochSlotsFrozenSlots::default(), &mut DuplicateSlotsToRepair::default(), &ancestor_hashes_replay_update_sender, @@ -9417,7 +9561,8 @@ pub(crate) mod tests { ); assert_eq!(*duplicate_confirmed_slots.get(&5).unwrap(), bank_hash_5); - assert!(heaviest_subtree_fork_choice + assert!(tbft_structs + .heaviest_subtree_fork_choice .is_duplicate_confirmed(&(5, bank_hash_5)) .unwrap_or(false)); @@ -9431,7 +9576,7 @@ pub(crate) mod tests { &bank_forks, &mut progress, &mut DuplicateSlotsTracker::default(), - &mut heaviest_subtree_fork_choice, + &mut tbft_structs.heaviest_subtree_fork_choice, &mut EpochSlotsFrozenSlots::default(), &mut DuplicateSlotsToRepair::default(), &ancestor_hashes_replay_update_sender, @@ -9440,11 +9585,13 @@ pub(crate) mod tests { ); assert_eq!(*duplicate_confirmed_slots.get(&5).unwrap(), bank_hash_5); - assert!(heaviest_subtree_fork_choice + assert!(tbft_structs + .heaviest_subtree_fork_choice .is_duplicate_confirmed(&(5, bank_hash_5)) .unwrap_or(false)); assert_eq!(*duplicate_confirmed_slots.get(&6).unwrap(), bank_hash_6); - assert!(heaviest_subtree_fork_choice + assert!(tbft_structs + .heaviest_subtree_fork_choice .is_duplicate_confirmed(&(6, bank_hash_6)) .unwrap_or(false)); @@ -9456,7 +9603,7 @@ pub(crate) mod tests { &bank_forks, &mut progress, &mut DuplicateSlotsTracker::default(), - &mut heaviest_subtree_fork_choice, + &mut tbft_structs.heaviest_subtree_fork_choice, &mut EpochSlotsFrozenSlots::default(), &mut DuplicateSlotsToRepair::default(), &ancestor_hashes_replay_update_sender, @@ -9480,7 +9627,7 @@ pub(crate) mod tests { setup_forks_from_tree(tree, 3, Some(Box::new(generate_votes))); let VoteSimulator { bank_forks, - mut heaviest_subtree_fork_choice, + mut tbft_structs, progress, .. } = vote_simulator; @@ -9502,7 +9649,7 @@ pub(crate) mod tests { &mut EpochSlotsFrozenSlots::default(), &bank_forks, &progress, - &mut heaviest_subtree_fork_choice, + &mut tbft_structs.heaviest_subtree_fork_choice, &mut DuplicateSlotsToRepair::default(), &ancestor_hashes_replay_update_sender, &mut PurgeRepairSlotCounter::default(), @@ -9522,14 +9669,15 @@ pub(crate) mod tests { &mut EpochSlotsFrozenSlots::default(), &bank_forks, &progress, - &mut heaviest_subtree_fork_choice, + &mut tbft_structs.heaviest_subtree_fork_choice, &mut DuplicateSlotsToRepair::default(), &ancestor_hashes_replay_update_sender, &mut PurgeRepairSlotCounter::default(), ); assert_eq!(*duplicate_confirmed_slots.get(&5).unwrap(), bank_hash_5); - assert!(heaviest_subtree_fork_choice + assert!(tbft_structs + .heaviest_subtree_fork_choice .is_duplicate_confirmed(&(5, bank_hash_5)) .unwrap_or(false)); @@ -9552,18 +9700,20 @@ pub(crate) mod tests { &mut EpochSlotsFrozenSlots::default(), &bank_forks, &progress, - &mut heaviest_subtree_fork_choice, + &mut tbft_structs.heaviest_subtree_fork_choice, &mut DuplicateSlotsToRepair::default(), &ancestor_hashes_replay_update_sender, &mut PurgeRepairSlotCounter::default(), ); assert_eq!(*duplicate_confirmed_slots.get(&5).unwrap(), bank_hash_5); - assert!(heaviest_subtree_fork_choice + assert!(tbft_structs + .heaviest_subtree_fork_choice .is_duplicate_confirmed(&(5, bank_hash_5)) .unwrap_or(false)); assert_eq!(*duplicate_confirmed_slots.get(&6).unwrap(), bank_hash_6); - assert!(heaviest_subtree_fork_choice + assert!(tbft_structs + .heaviest_subtree_fork_choice .is_duplicate_confirmed(&(6, bank_hash_6)) .unwrap_or(false)); @@ -9578,10 +9728,250 @@ pub(crate) mod tests { &mut EpochSlotsFrozenSlots::default(), &bank_forks, &progress, - &mut heaviest_subtree_fork_choice, + &mut tbft_structs.heaviest_subtree_fork_choice, + &mut DuplicateSlotsToRepair::default(), + &ancestor_hashes_replay_update_sender, + &mut PurgeRepairSlotCounter::default(), + ); + } + + #[test] + fn test_alpenglow_poh_migration_from_leader() { + solana_logger::setup(); + + let ReplayBlockstoreComponents { + blockstore, + my_pubkey, + leader_schedule_cache, + poh_recorder, + mut poh_controller, + vote_simulator, + rpc_subscriptions, + .. + } = replay_blockstore_components(None, 1, None); + let VoteSimulator { + bank_forks, + mut progress, + .. + } = vote_simulator; + let rpc_subscriptions = Some(rpc_subscriptions); + + let working_bank = bank_forks.read().unwrap().working_bank(); + assert!(working_bank.is_complete()); + assert!(working_bank.is_frozen()); + + let poh_slot = working_bank.slot() + 2; + let alpenglow_slot = working_bank.slot() + 3; + let initial_slot = working_bank.slot(); + let mut is_alpenglow_migration_complete = false; + + // Reset PoH recorder to the completed bank to ensure consistent state + ReplayStage::reset_poh_recorder( + &my_pubkey, + &blockstore, + working_bank.clone(), + &mut poh_controller, + &leader_schedule_cache, + ); + wait_for_poh_service(&poh_controller); + + // Register just over one slot worth of ticks directly with PoH recorder + let num_poh_ticks = + (working_bank.ticks_per_slot() * working_bank.hashes_per_tick().unwrap()) + 1; + poh_recorder + .write() + .map(|mut poh_recorder| { + for _ in 0..num_poh_ticks + 1 { + poh_recorder.tick(); + } + }) + .unwrap(); + + let poh_recorder = Arc::new(poh_recorder); + let (retransmit_slots_sender, _) = unbounded(); + let (banking_tracer, _) = BankingTracer::new(None).unwrap(); + let has_new_vote_been_rooted = true; + + // We should start leader for the poh slot, however alpenglow migration should not be started + assert!(ReplayStage::maybe_start_leader( + &my_pubkey, + &bank_forks, + &poh_recorder, + &mut poh_controller, + &leader_schedule_cache, + rpc_subscriptions.as_deref(), + &None, + &mut progress, + &retransmit_slots_sender, + &mut SkippedSlotsInfo::default(), + &banking_tracer, + has_new_vote_been_rooted, + &None, + &mut is_alpenglow_migration_complete, + ) + .is_some()); + assert!(!is_alpenglow_migration_complete); + wait_for_poh_service(&poh_controller); + let working_bank = bank_forks.read().unwrap().working_bank(); + assert_eq!(working_bank.slot(), poh_slot); + assert_eq!(working_bank.parent_slot(), initial_slot); + + // Register another slots worth of ticks with PoH recorder + poh_recorder + .write() + .map(|mut poh_recorder| { + for _ in 0..num_poh_ticks + 1 { + poh_recorder.tick(); + } + }) + .unwrap(); + + // We should now *fail* to start leader for the alpenglow slot, + // however the migration must have succeeded + assert!(ReplayStage::maybe_start_leader( + &my_pubkey, + &bank_forks, + &poh_recorder, + &mut poh_controller, + &leader_schedule_cache, + rpc_subscriptions.as_deref(), + &None, + &mut progress, + &retransmit_slots_sender, + &mut SkippedSlotsInfo::default(), + &banking_tracer, + has_new_vote_been_rooted, + &Some(alpenglow_slot), + &mut is_alpenglow_migration_complete, + ) + .is_none()); + assert!(is_alpenglow_migration_complete); + wait_for_poh_service(&poh_controller); + + // Working bank should not be updated past the poh slot + let working_bank = bank_forks.read().unwrap().working_bank(); + assert_eq!(working_bank.slot(), poh_slot); + assert_eq!(working_bank.parent_slot(), initial_slot); + } + + #[test] + fn test_alpenglow_poh_migration_from_replay() { + solana_logger::setup(); + + let ReplayBlockstoreComponents { + blockstore, + my_pubkey, + poh_recorder, + vote_simulator, + .. + } = replay_blockstore_components(Some(tr(0) / tr(1) / tr(2) / tr(3) / tr(4)), 1, None); + let VoteSimulator { + bank_forks, + mut progress, + mut latest_validator_votes_for_frozen_banks, + mut tbft_structs, + .. + } = vote_simulator; + let (cluster_slots_update_sender, _cluster_slots_update_receiver) = unbounded(); + let (cost_update_sender, _cost_update_receiver) = unbounded(); + let (ancestor_hashes_replay_update_sender, _ancestor_hashes_replay_update_receiver) = + unbounded(); + + let poh_slot = bank_forks.read().unwrap().highest_slot() + 1; + let first_alpenglow_slot = bank_forks.read().unwrap().highest_slot() + 2; + let mut is_alpenglow_migration_complete = false; + + // Finishing the poh slot should not trigger migration + let parent_bank = bank_forks.read().unwrap().working_bank(); + let poh_bank = Bank::new_from_parent(parent_bank, &Pubkey::new_unique(), poh_slot); + poh_bank.set_tick_height(poh_bank.max_tick_height()); + progress.insert( + poh_slot, + ForkProgress::new_from_bank( + &poh_bank, + poh_bank.collector_id(), + &Pubkey::new_unique(), + Some(0), + 0, + 0, + ), + ); + bank_forks.write().unwrap().insert(poh_bank); + let replay_result_vec = vec![ReplaySlotFromBlockstore { + is_slot_dead: false, + bank_slot: poh_slot, + replay_result: None, + }]; + + ReplayStage::process_replay_results( + &blockstore, + &bank_forks, + &mut progress, + None, + &None, + None, + &None, + &mut latest_validator_votes_for_frozen_banks, + &cluster_slots_update_sender, + &cost_update_sender, &mut DuplicateSlotsToRepair::default(), &ancestor_hashes_replay_update_sender, + None, + &replay_result_vec, + &mut PurgeRepairSlotCounter::default(), + &my_pubkey, + Some(first_alpenglow_slot), + &poh_recorder, + &mut is_alpenglow_migration_complete, + Some(&mut tbft_structs), + ); + assert!(!is_alpenglow_migration_complete); + + // Finishing the alpenglow slot should now trigger the migration + let parent_bank = bank_forks.read().unwrap().working_bank(); + let ag_bank = + Bank::new_from_parent(parent_bank, &Pubkey::new_unique(), first_alpenglow_slot); + ag_bank.set_tick_height(ag_bank.max_tick_height()); + progress.insert( + first_alpenglow_slot, + ForkProgress::new_from_bank( + &ag_bank, + ag_bank.collector_id(), + &Pubkey::new_unique(), + Some(0), + 0, + 0, + ), + ); + bank_forks.write().unwrap().insert(ag_bank); + let replay_result_vec = vec![ReplaySlotFromBlockstore { + is_slot_dead: false, + bank_slot: first_alpenglow_slot, + replay_result: None, + }]; + + ReplayStage::process_replay_results( + &blockstore, + &bank_forks, + &mut progress, + None, + &None, + None, + &None, + &mut latest_validator_votes_for_frozen_banks, + &cluster_slots_update_sender, + &cost_update_sender, + &mut DuplicateSlotsToRepair::default(), + &ancestor_hashes_replay_update_sender, + None, + &replay_result_vec, &mut PurgeRepairSlotCounter::default(), + &my_pubkey, + Some(first_alpenglow_slot), + &poh_recorder, + &mut is_alpenglow_migration_complete, + Some(&mut tbft_structs), ); + assert!(is_alpenglow_migration_complete); } } diff --git a/core/src/shred_fetch_stage.rs b/core/src/shred_fetch_stage.rs index 3f321e0ab19f16..7434c3382ee1a8 100644 --- a/core/src/shred_fetch_stage.rs +++ b/core/src/shred_fetch_stage.rs @@ -13,11 +13,10 @@ use { solana_ledger::shred::{self, should_discard_shred, ShredFetchStats}, solana_packet::{Meta, PACKET_DATA_SIZE}, solana_perf::packet::{ - PacketBatch, PacketBatchRecycler, PacketFlags, PacketRef, PinnedPacketBatch, - PACKETS_PER_BATCH, + BytesPacket, BytesPacketBatch, PacketBatch, PacketBatchRecycler, PacketFlags, PacketRef, }, solana_pubkey::Pubkey, - solana_runtime::bank_forks::BankForks, + solana_runtime::bank_forks::{BankForks, SharableBanks}, solana_streamer::{ evicting_sender::EvictingSender, streamer::{self, ChannelSend, PacketBatchReceiver, StreamerReceiveStats}, @@ -65,7 +64,7 @@ impl ShredFetchStage { recvr: PacketBatchReceiver, recvr_stats: Option>, sendr: EvictingSender, - bank_forks: &RwLock, + sharable_banks: &SharableBanks, shred_version: u16, name: &'static str, flags: PacketFlags, @@ -83,18 +82,17 @@ impl ShredFetchStage { let ( mut last_root, mut slots_per_epoch, - mut _feature_set, - mut _epoch_schedule, + mut feature_set, + mut epoch_schedule, mut last_slot, ) = { - let bank_forks_r = bank_forks.read().unwrap(); - let root_bank = bank_forks_r.root_bank(); + let root_bank = sharable_banks.root(); ( root_bank.slot(), root_bank.get_slots_in_epoch(root_bank.epoch()), root_bank.feature_set.clone(), root_bank.epoch_schedule().clone(), - bank_forks_r.highest_slot(), + sharable_banks.working().slot(), ) }; let mut stats = ShredFetchStats::default(); @@ -102,13 +100,10 @@ impl ShredFetchStage { for mut packet_batch in recvr { if last_updated.elapsed().as_millis() as u64 > DEFAULT_MS_PER_SLOT { last_updated = Instant::now(); - let root_bank = { - let bank_forks_r = bank_forks.read().unwrap(); - last_slot = bank_forks_r.highest_slot(); - bank_forks_r.root_bank() - }; - _feature_set = root_bank.feature_set.clone(); - _epoch_schedule = root_bank.epoch_schedule().clone(); + last_slot = sharable_banks.working().slot(); + let root_bank = sharable_banks.root(); + feature_set = root_bank.feature_set.clone(); + epoch_schedule = root_bank.epoch_schedule().clone(); last_root = root_bank.slot(); slots_per_epoch = root_bank.get_slots_in_epoch(root_bank.epoch()); keypair = repair_context.as_ref().copied().map(RepairContext::keypair); @@ -150,6 +145,14 @@ impl ShredFetchStage { // Filter out shreds that are way too far in the future to avoid the // overhead of having to hold onto them. let max_slot = last_slot + MAX_SHRED_DISTANCE_MINIMUM.max(2 * slots_per_epoch); + let enforce_fixed_fec_set = |shred_slot| { + check_feature_activation( + &agave_feature_set::enforce_fixed_fec_set::id(), + shred_slot, + &feature_set, + &epoch_schedule, + ) + }; let turbine_disabled = turbine_disabled.load(Ordering::Relaxed); for mut packet in packet_batch.iter_mut().filter(|p| !p.meta().discard()) { if turbine_disabled @@ -158,6 +161,7 @@ impl ShredFetchStage { last_root, max_slot, shred_version, + enforce_fixed_fec_set, &mut stats, ) { @@ -198,6 +202,7 @@ impl ShredFetchStage { repair_context: Option, turbine_disabled: Arc, ) -> (Vec>, JoinHandle<()>) { + let sharable_banks = bank_forks.read().unwrap().sharable_banks(); let (packet_sender, packet_receiver) = EvictingSender::new_bounded(SHRED_FETCH_CHANNEL_SIZE); let receiver_stats = Arc::new(StreamerReceiveStats::new(receiver_name)); @@ -226,7 +231,7 @@ impl ShredFetchStage { packet_receiver, Some(receiver_stats), sender, - &bank_forks, + &sharable_banks, shred_version, name, flags, @@ -298,7 +303,6 @@ impl ShredFetchStage { { let (packet_sender, packet_receiver) = unbounded(); let bank_forks = bank_forks.clone(); - let recycler = recycler.clone(); let exit = exit.clone(); let sender = sender.clone(); let turbine_disabled = turbine_disabled.clone(); @@ -310,7 +314,6 @@ impl ShredFetchStage { repair_response_quic_receiver, PacketFlags::REPAIR, packet_sender, - recycler, exit, ) }) @@ -318,11 +321,12 @@ impl ShredFetchStage { Builder::new() .name("solTvuFetchRpr".to_string()) .spawn(move || { + let sharable_banks = bank_forks.read().unwrap().sharable_banks(); Self::modify_packets( packet_receiver, None, sender, - &bank_forks, + &sharable_banks, shred_version, "shred_fetch_repair_quic", PacketFlags::REPAIR, @@ -344,7 +348,6 @@ impl ShredFetchStage { turbine_quic_endpoint_receiver, PacketFlags::empty(), packet_sender, - recycler, exit, ) }) @@ -352,11 +355,12 @@ impl ShredFetchStage { Builder::new() .name("solTvuFetchQuic".to_string()) .spawn(move || { + let sharable_banks = bank_forks.read().unwrap().sharable_banks(); Self::modify_packets( packet_receiver, None, sender, - &bank_forks, + &sharable_banks, shred_version, "shred_fetch_quic", PacketFlags::empty(), @@ -405,7 +409,6 @@ pub(crate) fn receive_quic_datagrams( quic_datagrams_receiver: Receiver<(Pubkey, SocketAddr, Bytes)>, flags: PacketFlags, sender: Sender, - recycler: PacketBatchRecycler, exit: Arc, ) { const RECV_TIMEOUT: Duration = Duration::from_secs(1); @@ -416,44 +419,31 @@ pub(crate) fn receive_quic_datagrams( Err(RecvTimeoutError::Timeout) => continue, Err(RecvTimeoutError::Disconnected) => return, }; - let mut packet_batch = PinnedPacketBatch::new_with_recycler( - &recycler, - PACKETS_PER_BATCH, - "receive_quic_datagrams", - ); - unsafe { - packet_batch.set_len(PACKETS_PER_BATCH); - }; let deadline = Instant::now() + PACKET_COALESCE_DURATION; let entries = std::iter::once(entry).chain( std::iter::repeat_with(|| quic_datagrams_receiver.recv_deadline(deadline).ok()) .while_some(), ); - let size = entries + let packet_batch: BytesPacketBatch = entries .filter(|(_, _, bytes)| bytes.len() <= PACKET_DATA_SIZE) - .zip(packet_batch.iter_mut()) - .map(|((_pubkey, addr, bytes), packet)| { - *packet.meta_mut() = Meta { + .map(|(_pubkey, addr, bytes)| { + let meta = Meta { size: bytes.len(), addr: addr.ip(), port: addr.port(), flags, }; - packet.buffer_mut()[..bytes.len()].copy_from_slice(&bytes); + BytesPacket::new(bytes, meta) }) - .count(); - if size > 0 { - packet_batch.truncate(size); - if sender.send(packet_batch.into()).is_err() { - return; // The receiver end of the channel is disconnected. - } + .collect(); + if !packet_batch.is_empty() && sender.send(packet_batch.into()).is_err() { + return; // The receiver end of the channel is disconnected. } } } // Returns true if the feature is effective for the shred slot. #[must_use] -#[allow(dead_code)] fn check_feature_activation( feature: &Pubkey, shred_slot: Slot, diff --git a/core/src/sigverify.rs b/core/src/sigverify.rs index 412e09b45cb110..de54c72597d820 100644 --- a/core/src/sigverify.rs +++ b/core/src/sigverify.rs @@ -13,7 +13,7 @@ use { sigverify_stage::{SigVerifier, SigVerifyServiceError}, }, agave_banking_stage_ingress_types::BankingPacketBatch, - crossbeam_channel::Sender, + crossbeam_channel::{Sender, TrySendError}, solana_perf::{cuda_runtime::PinnedVec, packet::PacketBatch, recycler::Recycler, sigverify}, }; @@ -61,7 +61,11 @@ impl SigVerifier for TransactionSigVerifier { if let Some(forward_stage_sender) = &self.forward_stage_sender { self.banking_stage_sender .send(banking_packet_batch.clone())?; - let _ = forward_stage_sender.try_send((banking_packet_batch, self.reject_non_vote)); + if let Err(TrySendError::Full(_)) = + forward_stage_sender.try_send((banking_packet_batch, self.reject_non_vote)) + { + warn!("forwarding stage channel is full, dropping packets."); + } } else { self.banking_stage_sender.send(banking_packet_batch)?; } diff --git a/core/src/snapshot_packager_service.rs b/core/src/snapshot_packager_service.rs index c0eae3e2ab90ea..cde473edfdfe59 100644 --- a/core/src/snapshot_packager_service.rs +++ b/core/src/snapshot_packager_service.rs @@ -202,8 +202,8 @@ impl SnapshotPackagerService { ); if let Err(err) = result { warn!("Failed to hard link account storages: {err}"); - // If hard linking the storages failed, we do *NOT* want to write - // the "storages flushed" file, so return early. + // If hard linking the storages failed, we do *NOT* want to mark the bank snapshot as + // loadable so return early. return; } info!( @@ -211,9 +211,9 @@ impl SnapshotPackagerService { start.elapsed(), ); - let result = snapshot_utils::write_storages_flushed_file(&bank_snapshot_dir); + let result = snapshot_utils::mark_bank_snapshot_as_loadable(&bank_snapshot_dir); if let Err(err) = result { - warn!("Failed to mark snapshot storages 'flushed': {err}"); + warn!("Failed to mark bank snapshot as loadable: {err}"); } } } diff --git a/core/src/system_monitor_service.rs b/core/src/system_monitor_service.rs index 0bfd2e71d0780d..55fc4802303a03 100644 --- a/core/src/system_monitor_service.rs +++ b/core/src/system_monitor_service.rs @@ -429,7 +429,7 @@ impl SystemMonitorService { } fn normalize_err(key: &str, error: E) -> String { - format!("Failed to query value for {}: {}", key, error) + format!("Failed to query value for {key}: {error}") } INTERESTING_LIMITS .iter() @@ -438,7 +438,7 @@ impl SystemMonitorService { .map_err(|e| normalize_err(key, e)) .and_then(|val| val.parse::().map_err(|e| normalize_err(key, e))) .unwrap_or_else(|e| { - error!("{}", e); + error!("{e}"); -1 }); (*key, interesting_limit, current_value) @@ -498,7 +498,7 @@ impl SystemMonitorService { } *net_stats = Some(new_stats); } - Err(e) => warn!("read_net_stats: {}", e), + Err(e) => warn!("read_net_stats: {e}"), } } @@ -833,7 +833,7 @@ impl SystemMonitorService { } *disk_stats = Some(new_stats); } - Err(e) => warn!("read_disk_stats: {}", e), + Err(e) => warn!("read_disk_stats: {e}"), } } diff --git a/core/src/tpu.rs b/core/src/tpu.rs index d647ea04f4aeba..f6958828c79020 100644 --- a/core/src/tpu.rs +++ b/core/src/tpu.rs @@ -1,12 +1,6 @@ //! The `tpu` module implements the Transaction Processing Unit, a //! multi-stage transaction processing pipeline in software. -// allow multiple connections for NAT and any open/close overlap -#[deprecated( - since = "2.2.0", - note = "Use solana_streamer::quic::DEFAULT_MAX_QUIC_CONNECTIONS_PER_PEER instead" -)] -pub use solana_streamer::quic::DEFAULT_MAX_QUIC_CONNECTIONS_PER_PEER as MAX_QUIC_CONNECTIONS_PER_PEER; pub use { crate::forwarding_stage::ForwardingClientOption, solana_streamer::quic::DEFAULT_TPU_COALESCE, }; @@ -55,7 +49,7 @@ use { vote_sender_types::{ReplayVoteReceiver, ReplayVoteSender}, }, solana_streamer::{ - quic::{spawn_server_multi, QuicServerParams, SpawnServerResult}, + quic::{spawn_server_with_cancel, QuicServerParams, SpawnServerResult}, streamer::StakedNodes, }, solana_turbine::{ @@ -65,11 +59,13 @@ use { std::{ collections::HashMap, net::{SocketAddr, UdpSocket}, + num::NonZeroUsize, sync::{atomic::AtomicBool, Arc, RwLock}, thread::{self, JoinHandle}, time::Duration, }, tokio::sync::mpsc::Sender as AsyncSender, + tokio_util::sync::CancellationToken, }; pub struct TpuSockets { @@ -104,7 +100,7 @@ pub struct Tpu { fetch_stage: FetchStage, sig_verifier: SigVerifier, vote_sigverify_stage: SigVerifyStage, - banking_stage: BankingStage, + banking_stage: Arc>>, forwarding_stage: JoinHandle<()>, cluster_info_vote_listener: ClusterInfoVoteListener, broadcast_stage: BroadcastStage, @@ -156,10 +152,12 @@ impl Tpu { vote_quic_server_config: QuicServerParams, prioritization_fee_cache: &Arc, block_production_method: BlockProductionMethod, + block_production_num_workers: NonZeroUsize, transaction_struct: TransactionStructure, enable_block_production_forwarding: bool, _generator_config: Option, /* vestigial code for replay invalidator */ key_notifiers: Arc>, + cancel: CancellationToken, ) -> Self { let TpuSockets { transactions: transactions_sockets, @@ -212,15 +210,15 @@ impl Tpu { endpoints: _, thread: tpu_vote_quic_t, key_updater: vote_streamer_key_updater, - } = spawn_server_multi( + } = spawn_server_with_cancel( "solQuicTVo", "quic_streamer_tpu_vote", tpu_vote_quic_sockets, keypair, vote_packet_sender.clone(), - exit.clone(), staked_nodes.clone(), vote_quic_server_config, + cancel.clone(), ) .unwrap(); @@ -230,15 +228,15 @@ impl Tpu { endpoints: _, thread: tpu_quic_t, key_updater, - } = spawn_server_multi( + } = spawn_server_with_cancel( "solQuicTpu", "quic_streamer_tpu", transactions_quic_sockets, keypair, packet_sender, - exit.clone(), staked_nodes.clone(), tpu_quic_server_config, + cancel.clone(), ) .unwrap(); (Some(tpu_quic_t), Some(key_updater)) @@ -252,15 +250,15 @@ impl Tpu { endpoints: _, thread: tpu_forwards_quic_t, key_updater: forwards_key_updater, - } = spawn_server_multi( + } = spawn_server_with_cancel( "solQuicTpuFwd", "quic_streamer_tpu_forwards", transactions_forwards_quic_sockets, keypair, forwarded_packet_sender, - exit.clone(), staked_nodes.clone(), tpu_fwd_quic_server_config, + cancel, ) .unwrap(); (Some(tpu_forwards_quic_t), Some(forwards_key_updater)) @@ -323,19 +321,20 @@ impl Tpu { duplicate_confirmed_slot_sender, ); - let banking_stage = BankingStage::new( + let banking_stage = BankingStage::new_num_threads( block_production_method, transaction_struct, - poh_recorder, + poh_recorder.clone(), transaction_recorder, non_vote_receiver, tpu_vote_receiver, gossip_vote_receiver, + block_production_num_workers, transaction_status_sender, replay_vote_sender, log_messages_bytes_limit, bank_forks.clone(), - prioritization_fee_cache, + prioritization_fee_cache.clone(), ); let SpawnForwardingStageResult { @@ -345,7 +344,7 @@ impl Tpu { forward_stage_receiver, client, vote_forwarding_client_socket, - bank_forks.read().unwrap().sharable_root_bank(), + bank_forks.read().unwrap().sharable_banks(), ForwardAddressGetter::new(cluster_info.clone(), poh_recorder.clone()), DataBudget::default(), ); @@ -392,7 +391,7 @@ impl Tpu { fetch_stage, sig_verifier, vote_sigverify_stage, - banking_stage, + banking_stage: Arc::new(RwLock::new(Some(banking_stage))), forwarding_stage, cluster_info_vote_listener, broadcast_stage, @@ -405,13 +404,22 @@ impl Tpu { } } + pub fn banking_stage(&self) -> Arc>> { + self.banking_stage.clone() + } + pub fn join(self) -> thread::Result<()> { let results = vec![ self.fetch_stage.join(), self.sig_verifier.join(), self.vote_sigverify_stage.join(), self.cluster_info_vote_listener.join(), - self.banking_stage.join(), + self.banking_stage + .write() + .unwrap() + .take() + .expect("banking_stage must be Some") + .join(), self.forwarding_stage.join(), self.staked_nodes_updater_service.join(), self.tpu_quic_t.map_or(Ok(()), |t| t.join()), diff --git a/core/src/tvu.rs b/core/src/tvu.rs index d3165d2e1e9f58..8858305854b968 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -35,7 +35,7 @@ use { blockstore_processor::TransactionStatusSender, entry_notifier_service::EntryNotifierSender, leader_schedule_cache::LeaderScheduleCache, }, - solana_poh::poh_recorder::PohRecorder, + solana_poh::{poh_controller::PohController, poh_recorder::PohRecorder}, solana_pubkey::Pubkey, solana_rpc::{ max_slots::MaxSlots, optimistically_confirmed_bank_tracker::BankNotificationSenderConfig, @@ -86,6 +86,7 @@ pub struct TvuSockets { pub repair: UdpSocket, pub retransmit: Vec, pub ancestor_hashes_requests: UdpSocket, + pub alpenglow: Option, } pub struct TvuConfig { @@ -136,6 +137,7 @@ impl Tvu { ledger_signal_receiver: Receiver, rpc_subscriptions: Option>, poh_recorder: &Arc>, + poh_controller: PohController, tower: Tower, tower_storage: Arc, leader_schedule_cache: &Arc, @@ -180,6 +182,7 @@ impl Tvu { fetch: fetch_sockets, retransmit: retransmit_sockets, ancestor_hashes_requests: ancestor_hashes_socket, + alpenglow: alpenglow_socket, } = sockets; let (fetch_sender, fetch_receiver) = EvictingSender::new_bounded(SHRED_FETCH_CHANNEL_SIZE); @@ -335,6 +338,7 @@ impl Tvu { bank_forks: bank_forks.clone(), cluster_info: cluster_info.clone(), poh_recorder: poh_recorder.clone(), + poh_controller, tower, vote_tracker, cluster_slots, @@ -350,6 +354,8 @@ impl Tvu { poh_recorder.clone(), tower_storage, vote_connection_cache.clone(), + alpenglow_socket, + bank_forks.clone(), ); let warm_quic_cache_service = create_cache_warmer_if_needed( @@ -511,8 +517,14 @@ pub mod tests { .expect("Expected to successfully open ledger"); let blockstore = Arc::new(blockstore); let bank = bank_forks.read().unwrap().working_bank(); - let (exit, poh_recorder, _transaction_recorder, poh_service, _entry_receiver) = - create_test_recorder(bank.clone(), blockstore.clone(), None, None); + let ( + exit, + poh_recorder, + poh_controller, + _transaction_recorder, + poh_service, + _entry_receiver, + ) = create_test_recorder(bank.clone(), blockstore.clone(), None, None); let vote_keypair = Keypair::new(); let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default())); @@ -524,14 +536,14 @@ pub mod tests { let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); let outstanding_repair_requests = Arc::>::default(); - let cluster_slots = Arc::new(ClusterSlots::default()); + let cluster_slots = Arc::new(ClusterSlots::default_for_tests()); let wen_restart_repair_slots = if enable_wen_restart { Some(Arc::new(RwLock::new(vec![]))) } else { None }; let connection_cache = if DEFAULT_VOTE_USE_QUIC { - ConnectionCache::new_quic( + ConnectionCache::new_quic_for_tests( "connection_cache_vote_quic", DEFAULT_TPU_CONNECTION_POOL_SIZE, ) @@ -553,6 +565,7 @@ pub mod tests { retransmit: target1.sockets.retransmit_sockets, fetch: target1.sockets.tvu, ancestor_hashes_requests: target1.sockets.ancestor_hashes_requests, + alpenglow: target1.sockets.alpenglow, } }, blockstore, @@ -565,6 +578,7 @@ pub mod tests { OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), ))), &poh_recorder, + poh_controller, Tower::default(), Arc::new(FileTowerStorage::default()), &leader_schedule_cache, diff --git a/core/src/validator.rs b/core/src/validator.rs index 58774f5f5d642a..96a524ce5aa9ba 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -4,6 +4,7 @@ pub use solana_perf::report_target_features; use { crate::{ admin_rpc_post_init::{AdminRpcRequestMetadataPostInit, KeyUpdaterType, KeyUpdaters}, + banking_stage::BankingStage, banking_trace::{self, BankingTracer, TraceError}, cluster_info_vote_listener::VoteTracker, completed_data_sets_service::CompletedDataSetsService, @@ -39,11 +40,15 @@ use { }, utils::move_and_async_delete_path_contents, }, - solana_client::connection_cache::{ConnectionCache, Protocol}, + solana_client::{ + client_option::ClientOption, + connection_cache::{ConnectionCache, Protocol}, + }, solana_clock::Slot, + solana_cluster_type::ClusterType, solana_entry::poh::compute_hash_time, solana_epoch_schedule::MAX_LEADER_SCHEDULE_EPOCH_OFFSET, - solana_genesis_config::{ClusterType, GenesisConfig}, + solana_genesis_config::GenesisConfig, solana_geyser_plugin_manager::{ geyser_plugin_service::GeyserPluginService, GeyserPluginManagerRequest, }, @@ -55,7 +60,7 @@ use { contact_info::ContactInfo, crds_gossip_pull::CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS, gossip_service::GossipService, - node::Node, + node::{Node, NodeMultihoming}, }, solana_hard_forks::HardForks, solana_hash::Hash, @@ -78,6 +83,7 @@ use { solana_measure::measure::Measure, solana_metrics::{datapoint_info, metrics::metrics_config_sanity_check}, solana_poh::{ + poh_controller::PohController, poh_recorder::PohRecorder, poh_service::{self, PohService}, transaction_recorder::TransactionRecorder, @@ -93,7 +99,7 @@ use { rpc::JsonRpcConfig, rpc_completed_slots_service::RpcCompletedSlotsService, rpc_pubsub_service::{PubSubConfig, PubSubService}, - rpc_service::{ClientOption, JsonRpcService, JsonRpcServiceConfig}, + rpc_service::{JsonRpcService, JsonRpcServiceConfig}, rpc_subscriptions::RpcSubscriptions, transaction_notifier_interface::TransactionNotifierArc, transaction_status_service::TransactionStatusService, @@ -181,8 +187,21 @@ impl BlockVerificationMethod { } } -#[derive(Clone, EnumString, EnumVariantNames, Default, IntoStaticStr, Display)] +#[derive( + Clone, + Debug, + EnumString, + EnumVariantNames, + Default, + IntoStaticStr, + Display, + Serialize, + Deserialize, + PartialEq, + Eq, +)] #[strum(serialize_all = "kebab-case")] +#[serde(rename_all = "kebab-case")] pub enum BlockProductionMethod { CentralScheduler, #[default] @@ -199,8 +218,21 @@ impl BlockProductionMethod { } } -#[derive(Clone, EnumString, EnumVariantNames, Default, IntoStaticStr, Display)] +#[derive( + Clone, + Debug, + EnumString, + EnumVariantNames, + Default, + IntoStaticStr, + Display, + Serialize, + Deserialize, + PartialEq, + Eq, +)] #[strum(serialize_all = "kebab-case")] +#[serde(rename_all = "kebab-case")] pub enum TransactionStructure { Sdk, #[default] @@ -268,7 +300,7 @@ pub struct ValidatorConfig { pub poh_pinned_cpu_core: usize, pub poh_hashes_per_batch: u64, pub process_ledger_before_services: bool, - pub accounts_db_config: Option, + pub accounts_db_config: AccountsDbConfig, pub warp_slot: Option, pub accounts_db_skip_shrink: bool, pub accounts_db_force_initial_clean: bool, @@ -282,6 +314,7 @@ pub struct ValidatorConfig { pub banking_trace_dir_byte_limit: banking_trace::DirByteLimit, pub block_verification_method: BlockVerificationMethod, pub block_production_method: BlockProductionMethod, + pub block_production_num_workers: NonZeroUsize, pub transaction_struct: TransactionStructure, pub enable_block_production_forwarding: bool, pub generator_config: Option, @@ -354,12 +387,13 @@ impl ValidatorConfig { validator_exit: Arc::new(RwLock::new(Exit::default())), validator_exit_backpressure: HashMap::default(), no_wait_for_vote_to_start_leader: true, - accounts_db_config: Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), + accounts_db_config: ACCOUNTS_DB_CONFIG_FOR_TESTING, wait_to_vote_slot: None, runtime_config: RuntimeConfig::default(), banking_trace_dir_byte_limit: 0, block_verification_method: BlockVerificationMethod::default(), block_production_method: BlockProductionMethod::default(), + block_production_num_workers: BankingStage::default_num_workers(), transaction_struct: TransactionStructure::default(), // enable forwarding by default for tests enable_block_production_forwarding: true, @@ -667,6 +701,10 @@ impl Validator { sigverify::init(); info!("Initializing sigverify done."); + solana_accounts_db::validate_memlock_limit_for_disk_io( + config.accounts_db_config.memlock_budget_size, + )?; + if !ledger_path.is_dir() { return Err(anyhow!( "ledger directory does not exist or is not accessible: {ledger_path:?}" @@ -697,8 +735,8 @@ impl Validator { timer.stop(); info!("Cleaning orphaned account snapshot directories done. {timer}"); - // token used to cancel tpu-client-next. - let cancel_tpu_client_next = CancellationToken::new(); + // token used to cancel tpu-client-next and streamer. + let cancel = CancellationToken::new(); { let exit = exit.clone(); config @@ -706,12 +744,12 @@ impl Validator { .write() .unwrap() .register_exit(Box::new(move || exit.store(true, Ordering::Relaxed))); - let cancel_tpu_client_next = cancel_tpu_client_next.clone(); + let cancel = cancel.clone(); config .validator_exit .write() .unwrap() - .register_exit(Box::new(move || cancel_tpu_client_next.cancel())); + .register_exit(Box::new(move || cancel.cancel())); } let ( @@ -834,7 +872,9 @@ impl Validator { cluster_info.set_contact_debug_interval(config.contact_debug_interval); cluster_info.set_entrypoints(cluster_entrypoints); cluster_info.restore_contact_info(ledger_path, config.contact_save_interval); + cluster_info.set_bind_ip_addrs(node.bind_ip_addrs.clone()); let cluster_info = Arc::new(cluster_info); + let node_multihoming = Arc::new(NodeMultihoming::from(&node)); assert!(is_snapshot_config_valid(&config.snapshot_config)); @@ -900,10 +940,8 @@ impl Validator { let prioritization_fee_cache = Arc::new(PrioritizationFeeCache::default()); let leader_schedule_cache = Arc::new(leader_schedule_cache); - let startup_verification_complete; let (mut poh_recorder, entry_receiver) = { let bank = &bank_forks.read().unwrap().working_bank(); - startup_verification_complete = Arc::clone(bank.get_startup_verification_complete()); PohRecorder::new_with_clear_signal( bank.tick_height(), bank.last_blockhash(), @@ -925,6 +963,7 @@ impl Validator { let transaction_recorder = TransactionRecorder::new(record_sender, poh_recorder.is_exited.clone()); let poh_recorder = Arc::new(RwLock::new(poh_recorder)); + let (poh_controller, poh_service_message_receiver) = PohController::new(); let (banking_tracer, tracer_thread) = BankingTracer::new((config.banking_trace_dir_byte_limit > 0).then_some(( @@ -1030,18 +1069,23 @@ impl Validator { let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); - let mut tpu_transactions_forwards_client = - Some(node.sockets.tpu_transaction_forwarding_client); - + let mut tpu_transactions_forwards_client_sockets = + Some(node.sockets.tpu_transaction_forwarding_clients); let connection_cache = match (config.use_tpu_client_next, use_quic) { (false, true) => Some(Arc::new(ConnectionCache::new_with_client_options( "connection_cache_tpu_quic", tpu_connection_pool_size, - Some( - tpu_transactions_forwards_client + Some({ + // this conversion is not beautiful but rust does not allow popping single + // elements from a boxed slice + let socketbox: Box<[_; 1]> = tpu_transactions_forwards_client_sockets .take() - .expect("Socket should exist."), - ), + .unwrap() + .try_into() + .expect("Multihoming support for connection cache is not available"); + let [sock] = *socketbox; + sock + }), Some(( &identity_keypair, node.info @@ -1134,7 +1178,7 @@ impl Validator { Arc::as_ref(&identity_keypair), node.sockets.rpc_sts_client, runtime_handle.clone(), - cancel_tpu_client_next.clone(), + cancel.clone(), ) } else { let Some(connection_cache) = &connection_cache else { @@ -1156,7 +1200,6 @@ impl Validator { validator_exit: config.validator_exit.clone(), exit: exit.clone(), override_health_check: rpc_override_health_check.clone(), - startup_verification_complete, optimistically_confirmed_bank: optimistically_confirmed_bank.clone(), send_transaction_service_config: config.send_transaction_service_config.clone(), max_slots: max_slots.clone(), @@ -1301,7 +1344,7 @@ impl Validator { let serve_repair = config.repair_handler_type.create_serve_repair( blockstore.clone(), cluster_info.clone(), - bank_forks.read().unwrap().sharable_root_bank(), + bank_forks.read().unwrap().sharable_banks(), config.repair_whitelist.clone(), ); let (repair_request_quic_sender, repair_request_quic_receiver) = unbounded(); @@ -1332,6 +1375,7 @@ impl Validator { config.poh_pinned_cpu_core, config.poh_hashes_per_batch, record_receiver, + poh_service_message_receiver, ); assert_eq!( blockstore.get_new_shred_signals_len(), @@ -1454,8 +1498,13 @@ impl Validator { let outstanding_repair_requests = Arc::>::default(); - let cluster_slots = - Arc::new(crate::cluster_slots_service::cluster_slots::ClusterSlots::default()); + let root_bank = bank_forks.read().unwrap().root_bank(); + let cluster_slots = Arc::new({ + crate::cluster_slots_service::cluster_slots::ClusterSlots::new( + &root_bank, + &cluster_info, + ) + }); // If RPC is supported and ConnectionCache is used, pass ConnectionCache for being warmup inside Tvu. let connection_cache_for_warmup = @@ -1477,6 +1526,15 @@ impl Validator { (None, None) }; + // disable all2all tests if not allowed for a given cluster type + let alpenglow_socket = if genesis_config.cluster_type == ClusterType::Testnet + || genesis_config.cluster_type == ClusterType::Development + { + node.sockets.alpenglow + } else { + None + }; + let tvu = Tvu::new( vote_account, authorized_voter_keypairs, @@ -1487,11 +1545,13 @@ impl Validator { retransmit: node.sockets.retransmit_sockets, fetch: node.sockets.tvu, ancestor_hashes_requests: node.sockets.ancestor_hashes_requests, + alpenglow: alpenglow_socket, }, blockstore.clone(), ledger_signal_receiver, rpc_subscriptions.clone(), &poh_recorder, + poh_controller, tower, config.tower_storage.clone(), &leader_schedule_cache, @@ -1571,11 +1631,10 @@ impl Validator { .unwrap_or_else(|| current_runtime_handle.as_ref().unwrap()); ForwardingClientOption::TpuClientNext(( Arc::as_ref(&identity_keypair), - tpu_transactions_forwards_client - .take() - .expect("Socket should exist."), + tpu_transactions_forwards_client_sockets.take().unwrap(), runtime_handle.clone(), - cancel_tpu_client_next, + cancel.clone(), + node_multihoming.clone(), )) }; let tpu = Tpu::new_with_client( @@ -1626,10 +1685,12 @@ impl Validator { vote_quic_server_config, &prioritization_fee_cache, config.block_production_method.clone(), + config.block_production_num_workers, config.transaction_struct.clone(), config.enable_block_production_forwarding, config.generator_config.clone(), key_notifiers.clone(), + cancel, ); datapoint_info!( @@ -1663,7 +1724,8 @@ impl Validator { repair_socket: Arc::new(node.sockets.repair), outstanding_repair_requests, cluster_slots, - gossip_socket: Some(node.sockets.gossip.clone()), + node: Some(node_multihoming), + banking_stage: tpu.banking_stage(), }); Ok(Self { @@ -1720,7 +1782,7 @@ impl Validator { info!("{:?}", node.info); info!( "local gossip address: {}", - node.sockets.gossip.local_addr().unwrap() + node.sockets.gossip[0].local_addr().unwrap() ); info!( "local broadcast address: {}", @@ -2662,7 +2724,7 @@ fn get_stake_percent_in_gossip(bank: &Bank, cluster_info: &ClusterInfo, log: boo // Staked nodes entries will not expire until an epoch after. So it // is necessary here to filter for recent entries to establish liveness. let peers: HashMap<_, _> = cluster_info - .tvu_peers(|q| q.clone()) + .tvu_peers(ContactInfo::clone) .into_iter() .filter(|node| { let age = now.saturating_sub(node.wallclock()); @@ -2744,11 +2806,7 @@ fn cleanup_accounts_paths(config: &ValidatorConfig) { for account_path in &config.account_paths { move_and_async_delete_path_contents(account_path); } - if let Some(shrink_paths) = config - .accounts_db_config - .as_ref() - .and_then(|config| config.shrink_paths.as_ref()) - { + if let Some(shrink_paths) = &config.accounts_db_config.shrink_paths { for shrink_path in shrink_paths { move_and_async_delete_path_contents(shrink_path); } diff --git a/core/src/vote_simulator.rs b/core/src/vote_simulator.rs index 74a4b5620933ea..37d58a9eca653d 100644 --- a/core/src/vote_simulator.rs +++ b/core/src/vote_simulator.rs @@ -14,7 +14,7 @@ use { repair::cluster_slot_state_verifier::{ DuplicateConfirmedSlots, DuplicateSlotsTracker, EpochSlotsFrozenSlots, }, - replay_stage::{HeaviestForkFailures, ReplayStage}, + replay_stage::{HeaviestForkFailures, ReplayStage, TowerBFTStructures}, unfrozen_gossip_verified_vote_hashes::UnfrozenGossipVerifiedVoteHashes, }, crossbeam_channel::unbounded, @@ -44,8 +44,8 @@ pub struct VoteSimulator { pub vote_pubkeys: Vec, pub bank_forks: Arc>, pub progress: ProgressMap, - pub heaviest_subtree_fork_choice: HeaviestSubtreeForkChoice, pub latest_validator_votes_for_frozen_banks: LatestValidatorVotesForFrozenBanks, + pub tbft_structs: TowerBFTStructures, } impl VoteSimulator { @@ -64,8 +64,14 @@ impl VoteSimulator { vote_pubkeys, bank_forks, progress, - heaviest_subtree_fork_choice, latest_validator_votes_for_frozen_banks: LatestValidatorVotesForFrozenBanks::default(), + tbft_structs: TowerBFTStructures { + heaviest_subtree_fork_choice, + duplicate_slots_tracker: DuplicateSlotsTracker::default(), + duplicate_confirmed_slots: DuplicateConfirmedSlots::default(), + unfrozen_gossip_verified_vote_hashes: UnfrozenGossipVerifiedVoteHashes::default(), + epoch_slots_frozen_slots: EpochSlotsFrozenSlots::default(), + }, } } @@ -151,10 +157,12 @@ impl VoteSimulator { .get_fork_stats_mut(new_bank.slot()) .expect("All frozen banks must exist in the Progress map") .bank_hash = Some(new_bank.hash()); - self.heaviest_subtree_fork_choice.add_new_leaf_slot( - (new_bank.slot(), new_bank.hash()), - Some((new_bank.parent_slot(), new_bank.parent_hash())), - ); + self.tbft_structs + .heaviest_subtree_fork_choice + .add_new_leaf_slot( + (new_bank.slot(), new_bank.hash()), + Some((new_bank.parent_slot(), new_bank.parent_hash())), + ); } walk.forward(); @@ -184,9 +192,9 @@ impl VoteSimulator { tower, &mut self.progress, &VoteTracker::default(), - &ClusterSlots::default(), + &ClusterSlots::default_for_tests(), &self.bank_forks, - &mut self.heaviest_subtree_fork_choice, + &mut self.tbft_structs.heaviest_subtree_fork_choice, &mut self.latest_validator_votes_for_frozen_banks, ); @@ -210,7 +218,7 @@ impl VoteSimulator { &self.progress, tower, &self.latest_validator_votes_for_frozen_banks, - &self.heaviest_subtree_fork_choice, + &self.tbft_structs.heaviest_subtree_fork_choice, ); // Make sure this slot isn't locked out or failing threshold @@ -235,14 +243,10 @@ impl VoteSimulator { &mut self.progress, None, // snapshot_controller None, - &mut self.heaviest_subtree_fork_choice, - &mut DuplicateSlotsTracker::default(), - &mut DuplicateConfirmedSlots::default(), - &mut UnfrozenGossipVerifiedVoteHashes::default(), &mut true, &mut Vec::new(), - &mut EpochSlotsFrozenSlots::default(), &drop_bank_sender, + &mut self.tbft_structs, ) .unwrap() } diff --git a/core/src/voting_service.rs b/core/src/voting_service.rs index 45a733df9296a4..ab5f74c015b355 100644 --- a/core/src/voting_service.rs +++ b/core/src/voting_service.rs @@ -1,6 +1,7 @@ use { crate::{ consensus::tower_storage::{SavedTowerVersions, TowerStorage}, + mock_alpenglow_consensus::MockAlpenglowConsensus, next_leader::upcoming_leader_tpu_vote_sockets, }, bincode::serialize, @@ -8,13 +9,14 @@ use { solana_client::connection_cache::ConnectionCache, solana_clock::{Slot, FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET}, solana_connection_cache::client_connection::ClientConnection, - solana_gossip::cluster_info::ClusterInfo, + solana_gossip::{cluster_info::ClusterInfo, epoch_specs::EpochSpecs}, solana_measure::measure::Measure, solana_poh::poh_recorder::PohRecorder, + solana_runtime::bank_forks::BankForks, solana_transaction::Transaction, solana_transaction_error::TransportError, std::{ - net::SocketAddr, + net::{SocketAddr, UdpSocket}, sync::{Arc, RwLock}, thread::{self, Builder, JoinHandle}, }, @@ -65,7 +67,7 @@ fn send_vote_transaction( .tpu(connection_cache.protocol()) }) .ok_or(SendVoteError::InvalidTpuAddress)?; - let buf = serialize(transaction)?; + let buf = Arc::new(serialize(transaction)?); let client = connection_cache.get_connection(&tpu); client.send_data_async(buf).map_err(|err| { @@ -85,18 +87,49 @@ impl VotingService { poh_recorder: Arc>, tower_storage: Arc, connection_cache: Arc, + alpenglow_socket: Option, + bank_forks: Arc>, ) -> Self { let thread_hdl = Builder::new() .name("solVoteService".to_string()) - .spawn(move || { - for vote_op in vote_receiver.iter() { - Self::handle_vote( - &cluster_info, - &poh_recorder, - tower_storage.as_ref(), - vote_op, - connection_cache.clone(), - ); + .spawn({ + let mut mock_alpenglow = alpenglow_socket.map(|s| { + MockAlpenglowConsensus::new( + s, + cluster_info.clone(), + EpochSpecs::from(bank_forks.clone()), + ) + }); + move || { + for vote_op in vote_receiver.iter() { + // Figure out if we are casting a vote for a new slot, and what slot it is for + let vote_slot = match vote_op { + VoteOp::PushVote { + tx: _, + ref tower_slots, + .. + } => tower_slots.iter().copied().last(), + _ => None, + }; + // perform all the normal vote handling routines + Self::handle_vote( + &cluster_info, + &poh_recorder, + tower_storage.as_ref(), + vote_op, + connection_cache.clone(), + ); + // trigger mock alpenglow vote if we have just cast an actual vote + if let Some(slot) = vote_slot { + if let Some(ag) = mock_alpenglow.as_mut() { + let root_bank = { bank_forks.read().unwrap().root_bank() }; + ag.signal_new_slot(slot, &root_bank); + } + } + } + if let Some(ag) = mock_alpenglow { + let _ = ag.join(); + } } }) .unwrap(); diff --git a/core/src/window_service.rs b/core/src/window_service.rs index 27c3e02fa89657..973de3cabb46a4 100644 --- a/core/src/window_service.rs +++ b/core/src/window_service.rs @@ -11,7 +11,6 @@ use { result::{Error, Result}, }, agave_feature_set as feature_set, - assert_matches::debug_assert_matches, crossbeam_channel::{unbounded, Receiver, RecvTimeoutError, Sender}, rayon::{prelude::*, ThreadPool}, solana_clock::{Slot, DEFAULT_MS_PER_SLOT}, @@ -208,9 +207,6 @@ where } if repair { ws_metrics.num_repairs.fetch_add(1, Ordering::Relaxed); - debug_assert_matches!(shred, shred::Payload::Unique(_)); - } else { - debug_assert_matches!(shred, shred::Payload::Shared(_)); } let shred = Shred::new_from_serialized_shred(shred).ok()?; Some((Cow::Owned(shred), repair)) @@ -490,7 +486,7 @@ mod test { entries, true, // is_last_in_slot // chained_merkle_root - Some(Hash::new_from_array(rand::thread_rng().gen())), + Hash::new_from_array(rand::thread_rng().gen()), 0, // next_shred_index 0, // next_code_index &ReedSolomonCache::default(), diff --git a/core/tests/fork-selection.rs b/core/tests/fork-selection.rs index d54fbfba8094b4..9c3a525d5f1f1a 100644 --- a/core/tests/fork-selection.rs +++ b/core/tests/fork-selection.rs @@ -1,6 +1,6 @@ //! Fork Selection Simulation //! -//! Description of the algorithm can be found in [Managing Forks](https://docs.solanalabs.com/consensus/managing-forks). +//! Description of the algorithm can be found in [Managing Forks](https://docs.anza.xyz/consensus/managing-forks). //! //! A test library function exists for configuring networks. //! ``` diff --git a/core/tests/scheduler_cost_adjustment.rs b/core/tests/scheduler_cost_adjustment.rs index 04ff1f49ad7a95..3fcaad127bb21b 100644 --- a/core/tests/scheduler_cost_adjustment.rs +++ b/core/tests/scheduler_cost_adjustment.rs @@ -1,6 +1,6 @@ #![cfg(test)] use { - solana_account::Account, + solana_account::{Account, ReadableAccount}, solana_clock::{Slot, MAX_PROCESSING_AGE}, solana_compute_budget::compute_budget_limits::MAX_BUILTIN_ALLOCATION_COMPUTE_UNIT_LIMIT, solana_compute_budget_interface::ComputeBudgetInstruction, @@ -10,23 +10,21 @@ use { solana_keypair::Keypair, solana_loader_v3_interface::state::UpgradeableLoaderState, solana_message::Message, - solana_native_token::sol_to_lamports, + solana_native_token::LAMPORTS_PER_SOL, solana_pubkey::Pubkey, solana_rent::Rent, solana_runtime::{bank::Bank, bank_forks::BankForks}, solana_runtime_transaction::runtime_transaction::RuntimeTransaction, - solana_sdk_ids::{bpf_loader, bpf_loader_upgradeable, secp256k1_program}, + solana_sdk_ids::{bpf_loader_upgradeable, secp256k1_program}, solana_signer::Signer, solana_svm::transaction_processor::ExecutionRecordingConfig, + solana_svm_timings::ExecuteTimings, solana_system_interface::instruction as system_instruction, - solana_timings::ExecuteTimings, solana_transaction::Transaction, solana_transaction_error::{TransactionError, TransactionResult as Result}, std::sync::{Arc, RwLock}, }; -const MEMO_PROGRAM_ELF: &[u8] = include_bytes!("../../program-test/src/programs/spl_memo-3.0.0.so"); - fn new_bank_from_parent_with_bank_forks( bank_forks: &RwLock, parent: Arc, @@ -58,7 +56,7 @@ struct TestSetup { impl TestSetup { fn new() -> Self { - let (mut genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); + let (mut genesis_config, mint_keypair) = create_genesis_config(LAMPORTS_PER_SOL); genesis_config.rent = Rent::default(); Self { genesis_config, @@ -67,17 +65,14 @@ impl TestSetup { } fn install_memo_program_account(&mut self) { - self.genesis_config.accounts.insert( - spl_memo_interface::v3::id(), - Account { - lamports: u64::MAX, - // borrows memo elf for executing memo ix in order to set up test condition - data: MEMO_PROGRAM_ELF.to_vec(), - owner: bpf_loader::id(), - executable: true, - rent_epoch: 0, - }, - ); + let (pubkey, account) = solana_program_binaries::by_id( + &spl_memo_interface::v3::id(), + &self.genesis_config.rent, + ) + .unwrap() + .swap_remove(0); + + self.genesis_config.add_account(pubkey, account); } fn execute_test_transaction(&mut self, ixs: &[Instruction]) -> TestResult { @@ -167,10 +162,17 @@ impl TestSetup { let payer_address = self.mint_keypair.pubkey(); let upgrade_authority_address = payer_address; + let (_, memo) = solana_program_binaries::by_id( + &spl_memo_interface::v3::id(), + &self.genesis_config.rent, + ) + .unwrap() + .swap_remove(0); + // Stash a valid buffer account before attempting a deployment. { let metadata_offset = UpgradeableLoaderState::size_of_buffer_metadata(); - let space = UpgradeableLoaderState::size_of_buffer(MEMO_PROGRAM_ELF.len()); + let space = UpgradeableLoaderState::size_of_buffer(memo.data().len()); let lamports = self.genesis_config.rent.minimum_balance(space); let mut data = vec![0; space]; @@ -181,7 +183,7 @@ impl TestSetup { }, ) .unwrap(); - data[metadata_offset..].copy_from_slice(MEMO_PROGRAM_ELF); + data[metadata_offset..].copy_from_slice(memo.data()); self.genesis_config.accounts.insert( buffer_address, @@ -217,7 +219,7 @@ impl TestSetup { &buffer_address, &upgrade_authority_address, /* program_lamports */ 0, // Doesn't matter here. - /* max_data_len */ MEMO_PROGRAM_ELF.len().saturating_mul(2), + /* max_data_len */ memo.data().len().saturating_mul(2), ) .unwrap() .pop() diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index 154ce1c9e327fe..ccbbdd4620506f 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -83,7 +83,6 @@ impl SnapshotTestConfig { vec![accounts_dir.clone()], ); bank0.freeze(); - bank0.set_initial_accounts_hash_verification_completed(); let bank_forks_arc = BankForks::new_rw_arc(bank0); let snapshot_config = SnapshotConfig { @@ -128,7 +127,7 @@ fn restore_from_snapshot( let full_snapshot_archive_info = FullSnapshotArchiveInfo::new_from_path(full_snapshot_archive_path).unwrap(); - let (deserialized_bank, _timing) = snapshot_bank_utils::bank_from_snapshot_archives( + let deserialized_bank = snapshot_bank_utils::bank_from_snapshot_archives( account_paths, &snapshot_config.bank_snapshots_dir, &full_snapshot_archive_info, @@ -137,16 +136,14 @@ fn restore_from_snapshot( &RuntimeConfig::default(), None, None, - None, false, false, false, - Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), + ACCOUNTS_DB_CONFIG_FOR_TESTING, None, Arc::default(), ) .unwrap(); - deserialized_bank.wait_for_initial_accounts_hash_verification_completed_for_tests(); let bank = old_bank_forks.get(deserialized_bank.slot()).unwrap(); assert_eq!(bank.as_ref(), &deserialized_bank); @@ -542,15 +539,13 @@ fn restore_from_snapshots_and_check_banks_are_equal( &RuntimeConfig::default(), None, None, - None, false, false, false, - Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), + ACCOUNTS_DB_CONFIG_FOR_TESTING, None, Arc::default(), )?; - deserialized_bank.wait_for_initial_accounts_hash_verification_completed_for_tests(); assert_eq!(bank, &deserialized_bank); @@ -737,16 +732,14 @@ fn test_snapshots_with_background_services() { &RuntimeConfig::default(), None, None, - None, false, false, false, - Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), + ACCOUNTS_DB_CONFIG_FOR_TESTING, None, exit.clone(), ) .unwrap(); - deserialized_bank.wait_for_initial_accounts_hash_verification_completed_for_tests(); assert_eq!( deserialized_bank.slot(), diff --git a/core/tests/unified_scheduler.rs b/core/tests/unified_scheduler.rs index e9cb465d60cb6b..342f722017b3c8 100644 --- a/core/tests/unified_scheduler.rs +++ b/core/tests/unified_scheduler.rs @@ -15,7 +15,7 @@ use { repair::cluster_slot_state_verifier::{ DuplicateConfirmedSlots, DuplicateSlotsTracker, EpochSlotsFrozenSlots, }, - replay_stage::ReplayStage, + replay_stage::{ReplayStage, TowerBFTStructures}, unfrozen_gossip_verified_vote_hashes::UnfrozenGossipVerifiedVoteHashes, }, solana_entry::entry::Entry, @@ -33,8 +33,8 @@ use { prioritization_fee_cache::PrioritizationFeeCache, }, solana_runtime_transaction::runtime_transaction::RuntimeTransaction, + solana_svm_timings::ExecuteTimings, solana_system_transaction as system_transaction, - solana_timings::ExecuteTimings, solana_transaction_error::TransactionResult as Result, solana_unified_scheduler_logic::{SchedulingMode, Task}, solana_unified_scheduler_pool::{ @@ -135,44 +135,47 @@ fn test_scheduler_waited_by_drop_bank_service() { info!("calling handle_new_root()..."); // Mostly copied from: test_handle_new_root() { - let mut heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new((root, root_hash)); + let heaviest_subtree_fork_choice = HeaviestSubtreeForkChoice::new((root, root_hash)); let mut progress = ProgressMap::default(); for i in genesis..=root { progress.insert(i, ForkProgress::new(Hash::default(), None, None, 0, 0)); } - let mut duplicate_slots_tracker: DuplicateSlotsTracker = + let duplicate_slots_tracker: DuplicateSlotsTracker = vec![root - 1, root, root + 1].into_iter().collect(); - let mut duplicate_confirmed_slots: DuplicateConfirmedSlots = vec![root - 1, root, root + 1] + let duplicate_confirmed_slots: DuplicateConfirmedSlots = vec![root - 1, root, root + 1] .into_iter() .map(|s| (s, Hash::default())) .collect(); - let mut unfrozen_gossip_verified_vote_hashes: UnfrozenGossipVerifiedVoteHashes = + let unfrozen_gossip_verified_vote_hashes: UnfrozenGossipVerifiedVoteHashes = UnfrozenGossipVerifiedVoteHashes { votes_per_slot: vec![root - 1, root, root + 1] .into_iter() .map(|s| (s, HashMap::new())) .collect(), }; - let mut epoch_slots_frozen_slots: EpochSlotsFrozenSlots = vec![root - 1, root, root + 1] + let epoch_slots_frozen_slots: EpochSlotsFrozenSlots = vec![root - 1, root, root + 1] .into_iter() .map(|slot| (slot, Hash::default())) .collect(); + let mut tbft_structs = TowerBFTStructures { + heaviest_subtree_fork_choice, + duplicate_slots_tracker, + duplicate_confirmed_slots, + unfrozen_gossip_verified_vote_hashes, + epoch_slots_frozen_slots, + }; ReplayStage::handle_new_root( root, &bank_forks, &mut progress, None, // snapshot_controller None, - &mut heaviest_subtree_fork_choice, - &mut duplicate_slots_tracker, - &mut duplicate_confirmed_slots, - &mut unfrozen_gossip_verified_vote_hashes, &mut true, &mut Vec::new(), - &mut epoch_slots_frozen_slots, &drop_bank_sender1, + &mut tbft_structs, ) .unwrap(); } @@ -221,13 +224,19 @@ fn test_scheduler_producing_blocks() { let genesis_bank = bank_forks.read().unwrap().working_bank_with_scheduler(); genesis_bank.set_fork_graph_in_program_cache(Arc::downgrade(&bank_forks)); let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&genesis_bank)); - let (exit, poh_recorder, transaction_recorder, poh_service, signal_receiver) = - create_test_recorder( - genesis_bank.clone(), - blockstore.clone(), - None, - Some(leader_schedule_cache), - ); + let ( + exit, + poh_recorder, + mut poh_controller, + transaction_recorder, + poh_service, + signal_receiver, + ) = create_test_recorder( + genesis_bank.clone(), + blockstore.clone(), + None, + Some(leader_schedule_cache), + ); let pool = DefaultSchedulerPool::new(None, None, None, None, ignored_prioritization_fee_cache); let channels = { let banking_tracer = BankingTracer::new_disabled(); @@ -239,7 +248,7 @@ fn test_scheduler_producing_blocks() { &channels, &poh_recorder, transaction_recorder, - BankingStage::num_threads(), + BankingStage::default_num_workers(), ); bank_forks.write().unwrap().install_scheduler_pool(pool); @@ -264,10 +273,9 @@ fn test_scheduler_producing_blocks() { .write() .unwrap() .insert_with_scheduling_mode(SchedulingMode::BlockProduction, tpu_bank); - poh_recorder - .write() - .unwrap() - .set_bank(tpu_bank.clone_with_scheduler()); + poh_controller + .set_bank_sync(tpu_bank.clone_with_scheduler()) + .unwrap(); tpu_bank.unpause_new_block_production_scheduler(); let tpu_bank = bank_forks.read().unwrap().working_bank_with_scheduler(); assert_eq!(tpu_bank.transaction_count(), 0); diff --git a/cost-model/src/cost_tracker.rs b/cost-model/src/cost_tracker.rs index 881e997467d0f2..29464adc21314f 100644 --- a/cost-model/src/cost_tracker.rs +++ b/cost-model/src/cost_tracker.rs @@ -4,7 +4,10 @@ //! - add_transaction_cost(&tx_cost), mutable function to accumulate tx_cost to tracker. //! use { - crate::{block_cost_limits::*, transaction_cost::TransactionCost}, + crate::{ + block_cost_limits::*, cost_tracker_post_analysis::CostTrackerPostAnalysis, + transaction_cost::TransactionCost, + }, solana_metrics::datapoint_info, solana_pubkey::Pubkey, solana_runtime_transaction::transaction_with_meta::TransactionWithMeta, @@ -239,6 +242,7 @@ impl CostTracker { } let (costliest_account, costliest_account_cost) = self.find_costliest_account(); + let number_of_contended_accounts = self.find_number_of_contended_accounts(); datapoint_info!( "cost_tracker_stats", @@ -282,6 +286,7 @@ impl CostTracker { ), ("total_transaction_fee", total_transaction_fee, i64), ("total_priority_fee", total_priority_fee, i64), + ("number_of_contended_accounts", number_of_contended_accounts, i64), ); } @@ -293,6 +298,19 @@ impl CostTracker { .unwrap_or_default() } + fn find_number_of_contended_accounts(&self) -> usize { + // accounts has more than 95% of account_cu_limit is considered as highly contended + let contended_cost_mark: u64 = self + .account_cost_limit + .saturating_mul(95) + .saturating_div(100); + + self.cost_by_writable_accounts + .values() + .filter(|&&cost| cost >= contended_cost_mark) + .count() + } + fn would_fit( &self, tx_cost: &TransactionCost, @@ -418,6 +436,15 @@ impl CostTracker { } } +/// Implement the trait for the cost tracker +/// This is only used for post-analysis to avoid lock contention +/// Do not use in the hot path +impl CostTrackerPostAnalysis for CostTracker { + fn get_cost_by_writable_accounts(&self) -> &HashMap { + &self.cost_by_writable_accounts + } +} + #[cfg(test)] mod tests { use { @@ -983,4 +1010,20 @@ mod tests { assert_eq!(0, cost_tracker.vote_cost); assert_eq!(0, cost_tracker.allocated_accounts_data_size.0); } + + #[test] + fn test_get_cost_by_writable_accounts_post_analysis() { + let mut cost_tracker = CostTracker::default(); + let cost = 100u64; + let transaction = WritableKeysTransaction(vec![Pubkey::new_unique()]); + let tx_cost = simple_transaction_cost(&transaction, cost); + cost_tracker.add_transaction_cost(&tx_cost); + let cost_by_writable_accounts = cost_tracker.get_cost_by_writable_accounts(); + assert_eq!(1, cost_by_writable_accounts.len()); + assert_eq!(cost, *cost_by_writable_accounts.values().next().unwrap()); + assert_eq!( + *cost_by_writable_accounts, + cost_tracker.cost_by_writable_accounts + ); + } } diff --git a/cost-model/src/cost_tracker_post_analysis.rs b/cost-model/src/cost_tracker_post_analysis.rs new file mode 100644 index 00000000000000..904bad8ebd8734 --- /dev/null +++ b/cost-model/src/cost_tracker_post_analysis.rs @@ -0,0 +1,8 @@ +use {solana_pubkey::Pubkey, std::collections::HashMap}; + +/// Trait to help with post-analysis of a given block +pub trait CostTrackerPostAnalysis { + /// Only use in post-analyze to avoid lock contention + /// Do not use in the hot path + fn get_cost_by_writable_accounts(&self) -> &HashMap; +} diff --git a/cost-model/src/lib.rs b/cost-model/src/lib.rs index f408a18de3b377..872d21106801d3 100644 --- a/cost-model/src/lib.rs +++ b/cost-model/src/lib.rs @@ -4,6 +4,7 @@ pub mod block_cost_limits; pub mod cost_model; pub mod cost_tracker; +pub mod cost_tracker_post_analysis; pub mod transaction_cost; #[cfg_attr(feature = "frozen-abi", macro_use)] diff --git a/docs/README.md b/docs/README.md index c6356e7a2ea090..f5c543cebddb68 100644 --- a/docs/README.md +++ b/docs/README.md @@ -39,7 +39,7 @@ The build script generates static content into the `build` directory and can be ./build.sh ``` -Running this build script requires **Docker**, and will auto fetch the [solanalabs/rust](https://hub.docker.com/r/solanalabs/rust) image from Docker hub to compile the desired version of the [Solana CLI](https://docs.solanalabs.com/cli) from source. +Running this build script requires **Docker**, and will auto fetch the [solanalabs/rust](https://hub.docker.com/r/solanalabs/rust) image from Docker hub to compile the desired version of the [Solana CLI](https://docs.anza.xyz/cli) from source. This build script will also: @@ -62,13 +62,13 @@ npm run start ## CI Build Flow -The docs are built and published in Github Actions with the `docs.yml` workflow. On each PR, the docs are built, but not published. +The docs are built and published in GitHub Actions with the `docs.yml` workflow. On each PR, the docs are built but not published. In each post-commit build, docs are built and published using `vercel` to their respective domain depending on the build branch. -- Master branch docs are published to `edge.docs.solanalabs.com` -- Beta branch docs are published to `beta.docs.solanalabs.com` -- Latest release tag docs are published to `docs.solanalabs.com` +- Master branch docs are published to `edge.docs.anza.xyz` +- Beta branch docs are published to `beta.docs.anza.xyz` +- Latest release tag docs are published to `docs.anza.xyz` ## Common Issues diff --git a/docs/package-lock.json b/docs/package-lock.json index fe57de9bf3c4b3..1bbb4d1e4efc7f 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -3165,6 +3165,18 @@ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.24.51.tgz", "integrity": "sha512-1P1OROm/rdubP5aFDSZQILU0vrLCJ4fvHt6EoqHEM+2D/G5MK3bIaymUKLit8Js9gbns5UyJnkP/TZROLw4tUA==" }, + "node_modules/@sindresorhus/is": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-4.6.0.tgz", + "integrity": "sha512-t09vSN3MdfsyCHoFcTRCH/iUtG7OJ0CsjzB8cjAmKc/va/kIgeDI/TxsigdncE/4be734m0cvIYwNaV4i2XqAw==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/is?sponsor=1" + } + }, "node_modules/@slorber/static-site-generator-webpack-plugin": { "version": "4.0.7", "resolved": "https://registry.npmjs.org/@slorber/static-site-generator-webpack-plugin/-/static-site-generator-webpack-plugin-4.0.7.tgz", @@ -3429,6 +3441,18 @@ "url": "https://github.com/sponsors/gregberge" } }, + "node_modules/@szmarczak/http-timer": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-4.0.6.tgz", + "integrity": "sha512-4BAffykYOgO+5nzBWYwE3W90sBgLJoUPRWWcL8wlyiM8IB8ipJz3UMJ9KXQd1RKQXpKp8Tutn80HZtWsu2u76w==", + "license": "MIT", + "dependencies": { + "defer-to-connect": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, "node_modules/@trysound/sax": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/@trysound/sax/-/sax-0.2.0.tgz", @@ -3454,6 +3478,18 @@ "@types/node": "*" } }, + "node_modules/@types/cacheable-request": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/@types/cacheable-request/-/cacheable-request-6.0.3.tgz", + "integrity": "sha512-IQ3EbTzGxIigb1I3qPZc1rWJnH0BmSKv5QYTalEwweFvyBDLSAe24zP0le/hyi7ecGfZVlIVAg4BZqb8WBwKqw==", + "license": "MIT", + "dependencies": { + "@types/http-cache-semantics": "*", + "@types/keyv": "^3.1.4", + "@types/node": "*", + "@types/responselike": "^1.0.0" + } + }, "node_modules/@types/connect": { "version": "3.4.35", "resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.35.tgz", @@ -3536,6 +3572,12 @@ "resolved": "https://registry.npmjs.org/@types/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz", "integrity": "sha512-oh/6byDPnL1zeNXFrDXFLyZjkr1MsBG667IM792caf1L2UPOOMf65NFzjUH/ltyfwjAGfs1rsX1eftK0jC/KIg==" }, + "node_modules/@types/http-cache-semantics": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/@types/http-cache-semantics/-/http-cache-semantics-4.0.4.tgz", + "integrity": "sha512-1m0bIFVc7eJWyve9S0RnuRgcQqF/Xd5QsUZAZeQFr1Q3/p9JWoQQEqmVy+DPTNpGXwhgIetAoYF8JSc33q29QA==", + "license": "MIT" + }, "node_modules/@types/http-proxy": { "version": "1.17.9", "resolved": "https://registry.npmjs.org/@types/http-proxy/-/http-proxy-1.17.9.tgz", @@ -3575,6 +3617,15 @@ "resolved": "https://registry.npmjs.org/@types/katex/-/katex-0.11.1.tgz", "integrity": "sha512-DUlIj2nk0YnJdlWgsFuVKcX27MLW0KbKmGVoUHmFr+74FYYNUDAaj9ZqTADvsbE8rfxuVmSFc7KczYn5Y09ozg==" }, + "node_modules/@types/keyv": { + "version": "3.1.4", + "resolved": "https://registry.npmjs.org/@types/keyv/-/keyv-3.1.4.tgz", + "integrity": "sha512-BQ5aZNSCpj7D6K2ksrRCTmKRLEpnPvWDiLPfoGyhZ++8YtiK9d/3DBKPJgry359X/P1PfruyYwvnvwFjuEiEIg==", + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, "node_modules/@types/mdast": { "version": "3.0.10", "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-3.0.10.tgz", @@ -3660,6 +3711,15 @@ "@types/react-router": "*" } }, + "node_modules/@types/responselike": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@types/responselike/-/responselike-1.0.3.tgz", + "integrity": "sha512-H/+L+UkTV33uf49PH5pCAUBVPNj2nDBXTN+qS1dOwyyg24l3CcicicCA7ca+HMvJBZcFgl5r8e+RR6elsb4Lyw==", + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, "node_modules/@types/retry": { "version": "0.12.0", "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz", @@ -4587,9 +4647,10 @@ } }, "node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "license": "MIT", "dependencies": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" @@ -4652,6 +4713,48 @@ "node": ">= 0.8" } }, + "node_modules/cacheable-lookup": { + "version": "5.0.4", + "resolved": "https://registry.npmjs.org/cacheable-lookup/-/cacheable-lookup-5.0.4.tgz", + "integrity": "sha512-2/kNscPhpcxrOigMZzbiWF7dz8ilhb/nIHU3EyZiXWXpeq/au8qJ8VhdftMkty3n7Gj6HIGalQG8oiBNB3AJgA==", + "license": "MIT", + "engines": { + "node": ">=10.6.0" + } + }, + "node_modules/cacheable-request": { + "version": "7.0.4", + "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-7.0.4.tgz", + "integrity": "sha512-v+p6ongsrp0yTGbJXjgxPow2+DL93DASP4kXCDKb8/bwRtt9OEF3whggkkDkGNzgcWy2XaF4a8nZglC7uElscg==", + "license": "MIT", + "dependencies": { + "clone-response": "^1.0.2", + "get-stream": "^5.1.0", + "http-cache-semantics": "^4.0.0", + "keyv": "^4.0.0", + "lowercase-keys": "^2.0.0", + "normalize-url": "^6.0.1", + "responselike": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cacheable-request/node_modules/get-stream": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz", + "integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==", + "license": "MIT", + "dependencies": { + "pump": "^3.0.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/call-bind": { "version": "1.0.8", "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.8.tgz", @@ -5762,6 +5865,33 @@ } } }, + "node_modules/decompress-response": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz", + "integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==", + "license": "MIT", + "dependencies": { + "mimic-response": "^3.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/decompress-response/node_modules/mimic-response": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz", + "integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/deep-extend": { "version": "0.6.0", "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", @@ -5794,6 +5924,15 @@ "node": ">= 10" } }, + "node_modules/defer-to-connect": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-2.0.1.tgz", + "integrity": "sha512-4tvttepXG1VaYGrRibk5EwJd1t4udunSOVMdLSAL6mId1ix438oPwPZMALY41FCijukO1L0twNcGsdzS7dHgDg==", + "license": "MIT", + "engines": { + "node": ">=10" + } + }, "node_modules/define-data-property": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", @@ -6089,12 +6228,6 @@ "resolved": "https://registry.npmjs.org/duplexer/-/duplexer-0.1.2.tgz", "integrity": "sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg==" }, - "node_modules/duplexer3": { - "version": "0.1.5", - "resolved": "https://registry.npmjs.org/duplexer3/-/duplexer3-0.1.5.tgz", - "integrity": "sha512-1A8za6ws41LQgv9HrE/66jyC5yuSjQ3L/KOpFtoBilsAK2iA2wuS5rTt1OCzIvtS2V7nVmedsUU+DGRcjBmOYA==", - "license": "BSD-3-Clause" - }, "node_modules/eastasianwidth": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", @@ -7610,6 +7743,31 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/got": { + "version": "11.8.6", + "resolved": "https://registry.npmjs.org/got/-/got-11.8.6.tgz", + "integrity": "sha512-6tfZ91bOr7bOXnK7PRDCGBLa1H4U080YHNaAQ2KsMGlLEzRbk44nsZF2E1IeRc3vtJHPVbKCYgdFbaGO2ljd8g==", + "license": "MIT", + "dependencies": { + "@sindresorhus/is": "^4.0.0", + "@szmarczak/http-timer": "^4.0.5", + "@types/cacheable-request": "^6.0.1", + "@types/responselike": "^1.0.0", + "cacheable-lookup": "^5.0.3", + "cacheable-request": "^7.0.2", + "decompress-response": "^6.0.0", + "http2-wrapper": "^1.0.0-beta.5.2", + "lowercase-keys": "^2.0.0", + "p-cancelable": "^2.0.0", + "responselike": "^2.0.0" + }, + "engines": { + "node": ">=10.19.0" + }, + "funding": { + "url": "https://github.com/sindresorhus/got?sponsor=1" + } + }, "node_modules/graceful-fs": { "version": "4.2.11", "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", @@ -8045,9 +8203,9 @@ } }, "node_modules/http-cache-semantics": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz", - "integrity": "sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ==", + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.2.0.tgz", + "integrity": "sha512-dTxcvPXqPvXBQpq5dUr6mEMJX4oIEFv6bwom3FDwKRDsuIjjJGANqhBuoAn9c1RQJIdAKav33ED65E2ys+87QQ==", "license": "BSD-2-Clause" }, "node_modules/http-deceiver": { @@ -8123,6 +8281,19 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/http2-wrapper": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/http2-wrapper/-/http2-wrapper-1.0.3.tgz", + "integrity": "sha512-V+23sDMr12Wnz7iTcDeJr3O6AIxlnvT/bmaAAAP/Xda35C90p9599p0F1eHR/N1KILWSoWVAiOMFjBBXaXSMxg==", + "license": "MIT", + "dependencies": { + "quick-lru": "^5.1.1", + "resolve-alpn": "^1.0.0" + }, + "engines": { + "node": ">=10.19.0" + } + }, "node_modules/human-signals": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", @@ -9025,6 +9196,12 @@ "node": ">=4" } }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "license": "MIT" + }, "node_modules/json-parse-even-better-errors": { "version": "2.3.1", "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", @@ -9075,29 +9252,13 @@ "node": ">=4.0" } }, - "node_modules/katex": { - "version": "0.16.22", - "resolved": "https://registry.npmjs.org/katex/-/katex-0.16.22.tgz", - "integrity": "sha512-XCHRdUw4lf3SKBaJe4EvgqIuWwkPSo9XoeO8GjQW94Bp7TWv9hNhzZjZ+OH9yf1UmLygb7DIT5GSFQiyt16zYg==", - "funding": [ - "https://opencollective.com/katex", - "https://github.com/sponsors/katex" - ], + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", "license": "MIT", "dependencies": { - "commander": "^8.3.0" - }, - "bin": { - "katex": "cli.js" - } - }, - "node_modules/katex/node_modules/commander": { - "version": "8.3.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-8.3.0.tgz", - "integrity": "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==", - "license": "MIT", - "engines": { - "node": ">= 12" + "json-buffer": "3.0.1" } }, "node_modules/kind-of": { @@ -9945,6 +10106,15 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/p-cancelable": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-2.1.1.tgz", + "integrity": "sha512-BZOr3nRQHOntUjTrH8+Lh54smKHoHyur8We1V8DSMVrl5A2malOOwuJRnKRDjSnkoeBh4at6BwEnb5I7Jl31wg==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, "node_modules/p-limit": { "version": "2.3.0", "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", @@ -10018,172 +10188,6 @@ "node": ">=8" } }, - "node_modules/package-json/node_modules/@sindresorhus/is": { - "version": "0.14.0", - "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-0.14.0.tgz", - "integrity": "sha512-9NET910DNaIPngYnLLPeg+Ogzqsi9uM4mSboU5y6p8S5DzMTVEsJZrawi+BoDNUVBa2DhJqQYUFvMDfgU062LQ==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/package-json/node_modules/@szmarczak/http-timer": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-1.1.2.tgz", - "integrity": "sha512-XIB2XbzHTN6ieIjfIMV9hlVcfPU26s2vafYWQcZHWXHOxiaRZYEDKEwdl129Zyg50+foYV2jCgtrqSA6qNuNSA==", - "license": "MIT", - "dependencies": { - "defer-to-connect": "^1.0.1" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/package-json/node_modules/cacheable-request": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-6.1.0.tgz", - "integrity": "sha512-Oj3cAGPCqOZX7Rz64Uny2GYAZNliQSqfbePrgAQ1wKAihYmCUnraBtJtKcGR4xz7wF+LoJC+ssFZvv5BgF9Igg==", - "license": "MIT", - "dependencies": { - "clone-response": "^1.0.2", - "get-stream": "^5.1.0", - "http-cache-semantics": "^4.0.0", - "keyv": "^3.0.0", - "lowercase-keys": "^2.0.0", - "normalize-url": "^4.1.0", - "responselike": "^1.0.2" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/package-json/node_modules/cacheable-request/node_modules/get-stream": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz", - "integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==", - "license": "MIT", - "dependencies": { - "pump": "^3.0.0" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/package-json/node_modules/decompress-response": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-3.3.0.tgz", - "integrity": "sha512-BzRPQuY1ip+qDonAOz42gRm/pg9F768C+npV/4JOsxRC2sq+Rlk+Q4ZCAsOhnIaMrgarILY+RMUIvMmmX1qAEA==", - "license": "MIT", - "dependencies": { - "mimic-response": "^1.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/package-json/node_modules/defer-to-connect": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-1.1.3.tgz", - "integrity": "sha512-0ISdNousHvZT2EiFlZeZAHBUvSxmKswVCEf8hW7KWgG4a8MVEu/3Vb6uWYozkjylyCxe0JBIiRB1jV45S70WVQ==", - "license": "MIT" - }, - "node_modules/package-json/node_modules/get-stream": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-4.1.0.tgz", - "integrity": "sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w==", - "license": "MIT", - "dependencies": { - "pump": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/package-json/node_modules/got": { - "version": "9.6.0", - "resolved": "https://registry.npmjs.org/got/-/got-9.6.0.tgz", - "integrity": "sha512-R7eWptXuGYxwijs0eV+v3o6+XH1IqVK8dJOEecQfTmkncw9AV4dcw/Dhxi8MdlqPthxxpZyizMzyg8RTmEsG+Q==", - "license": "MIT", - "dependencies": { - "@sindresorhus/is": "^0.14.0", - "@szmarczak/http-timer": "^1.1.2", - "cacheable-request": "^6.0.0", - "decompress-response": "^3.3.0", - "duplexer3": "^0.1.4", - "get-stream": "^4.1.0", - "lowercase-keys": "^1.0.1", - "mimic-response": "^1.0.1", - "p-cancelable": "^1.0.0", - "to-readable-stream": "^1.0.0", - "url-parse-lax": "^3.0.0" - }, - "engines": { - "node": ">=8.6" - } - }, - "node_modules/package-json/node_modules/got/node_modules/lowercase-keys": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.1.tgz", - "integrity": "sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/package-json/node_modules/json-buffer": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.0.tgz", - "integrity": "sha512-CuUqjv0FUZIdXkHPI8MezCnFCdaTAacej1TZYulLoAg1h/PhwkdXFN4V/gzY4g+fMBCOV2xF+rp7t2XD2ns/NQ==", - "license": "MIT" - }, - "node_modules/package-json/node_modules/keyv": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/keyv/-/keyv-3.1.0.tgz", - "integrity": "sha512-9ykJ/46SN/9KPM/sichzQ7OvXyGDYKGTaDlKMGCAlg2UK8KRy4jb0d8sFc+0Tt0YYnThq8X2RZgCg74RPxgcVA==", - "license": "MIT", - "dependencies": { - "json-buffer": "3.0.0" - } - }, - "node_modules/package-json/node_modules/normalize-url": { - "version": "4.5.1", - "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-4.5.1.tgz", - "integrity": "sha512-9UZCFRHQdNrfTpGg8+1INIg93B6zE0aXMVFkw1WFwvO4SlZywU6aLg5Of0Ap/PgcbSw4LNxvMWXMeugwMCX0AA==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/package-json/node_modules/p-cancelable": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-1.1.0.tgz", - "integrity": "sha512-s73XxOZ4zpt1edZYZzvhqFa6uvQc1vwUa0K0BdtIZgQMAJj9IbebH+JkgKZc9h+B05PKHLOTl4ajG1BmNrVZlw==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/package-json/node_modules/responselike": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/responselike/-/responselike-1.0.2.tgz", - "integrity": "sha512-/Fpe5guzJk1gPqdJLJR5u7eG/gNY4nImjbRDaVWVMRhne55TCmj2i9Q+54PBRfatRC8v/rIiv9BN0pMd9OV5EQ==", - "license": "MIT", - "dependencies": { - "lowercase-keys": "^1.0.0" - } - }, - "node_modules/package-json/node_modules/responselike/node_modules/lowercase-keys": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.1.tgz", - "integrity": "sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/package-json/node_modules/semver": { "version": "6.3.1", "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", @@ -11057,15 +11061,6 @@ "node": ">= 0.8.0" } }, - "node_modules/prepend-http": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/prepend-http/-/prepend-http-2.0.0.tgz", - "integrity": "sha512-ravE6m9Atw9Z/jjttRUZ+clIXogdghyZAuWJ3qEzjT+jI/dL1ifAqhZeC5VHzQp1MSt1+jxKkFNemj/iO7tVUA==", - "license": "MIT", - "engines": { - "node": ">=4" - } - }, "node_modules/prettier": { "version": "2.8.8", "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.8.8.tgz", @@ -11267,6 +11262,18 @@ } ] }, + "node_modules/quick-lru": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-5.1.1.tgz", + "integrity": "sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/randombytes": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", @@ -11843,6 +11850,24 @@ "url": "https://opencollective.com/unified" } }, + "node_modules/rehype-katex/node_modules/commander": { + "version": "2.20.3", + "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", + "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==", + "license": "MIT" + }, + "node_modules/rehype-katex/node_modules/katex": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/katex/-/katex-0.12.0.tgz", + "integrity": "sha512-y+8btoc/CK70XqcHqjxiGWBOeIL8upbS0peTPXTvgrh21n1RiWWcIpSWM+4uXq+IAgNh9YYQWdc7LVDPDAEEAg==", + "license": "MIT", + "dependencies": { + "commander": "^2.19.0" + }, + "bin": { + "katex": "cli.js" + } + }, "node_modules/rehype-parse": { "version": "7.0.1", "resolved": "https://registry.npmjs.org/rehype-parse/-/rehype-parse-7.0.1.tgz", @@ -12165,6 +12190,12 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/resolve-alpn": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/resolve-alpn/-/resolve-alpn-1.2.1.tgz", + "integrity": "sha512-0a1F4l73/ZFZOakJnQ3FvkJ2+gSTQWz/r2KE5OdDY0TxPm5h4GkqkWWfM47T7HsbnOtcJVEF4epCVy6u7Q3K+g==", + "license": "MIT" + }, "node_modules/resolve-from": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", @@ -12178,6 +12209,18 @@ "resolved": "https://registry.npmjs.org/resolve-pathname/-/resolve-pathname-3.0.0.tgz", "integrity": "sha512-C7rARubxI8bXFNB/hqcp/4iUeIXJhJZvFPFPiSPRnhU5UPxzMFIl+2E6yY6c4k9giDJAhtV+enfA+G89N6Csng==" }, + "node_modules/responselike": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/responselike/-/responselike-2.0.1.tgz", + "integrity": "sha512-4gl03wn3hj1HP3yzgdI7d3lCkF95F21Pz4BPGvKHinyQzALR5CapwC8yIi0Rh58DEMQ/SguC03wFj2k0M/mHhw==", + "license": "MIT", + "dependencies": { + "lowercase-keys": "^2.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/retry": { "version": "0.13.1", "resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz", @@ -13592,15 +13635,6 @@ "resolved": "https://registry.npmjs.org/tiny-warning/-/tiny-warning-1.0.3.tgz", "integrity": "sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==" }, - "node_modules/to-readable-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/to-readable-stream/-/to-readable-stream-1.0.0.tgz", - "integrity": "sha512-Iq25XBt6zD5npPhlLVXGFN3/gyR2/qODcKNNyTMd4vbm39HUaOiAM4PMq0eMVC/Tkxz+Zjdsc55g9yyz+Yq00Q==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, "node_modules/to-regex-range": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", @@ -14167,18 +14201,6 @@ "url": "https://opencollective.com/webpack" } }, - "node_modules/url-parse-lax": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/url-parse-lax/-/url-parse-lax-3.0.0.tgz", - "integrity": "sha512-NjFKA0DidqPa5ciFcSrXnAltTtzz84ogy+NebPvfEgAck0+TNg4UJ4IN+fB7zRZfbgUf0syOo9MDxFkDSMuFaQ==", - "license": "MIT", - "dependencies": { - "prepend-http": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, "node_modules/use-composed-ref": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/use-composed-ref/-/use-composed-ref-1.4.0.tgz", diff --git a/docs/publish-docs.sh b/docs/publish-docs.sh index c14e9bc9dc3dea..d88b770acefc23 100755 --- a/docs/publish-docs.sh +++ b/docs/publish-docs.sh @@ -73,7 +73,6 @@ cat > "$CONFIG_FILE" < "$CONFIG_FILE" < "$CONFIG_FILE" < "$CONFIG_FILE" <` -To remove partition `net.sh netem --config-file --netem-cmd cleanup` -The partitioning is also removed if you do `net.sh stop` or `restart`. - -An example config that produces 3 almost equal partitions: - -``` -{ - "partitions":[ - 34, - 33, - 33 - ], - "interconnects":[ - { - "a":0, - "b":1, - "config":"loss 15% delay 25ms" - }, - { - "a":1, - "b":0, - "config":"loss 15% delay 25ms" - }, - { - "a":0, - "b":2, - "config":"loss 10% delay 15ms" - }, - { - "a":2, - "b":0, - "config":"loss 10% delay 15ms" - }, - { - "a":2, - "b":1, - "config":"loss 5% delay 5ms" - }, - { - "a":1, - "b":2, - "config":"loss 5% delay 5ms" - } - ] -} -``` diff --git a/docs/src/implemented-proposals/installer.md b/docs/src/implemented-proposals/installer.md index e93932366ffdef..f61c5fab844fc2 100644 --- a/docs/src/implemented-proposals/installer.md +++ b/docs/src/implemented-proposals/installer.md @@ -113,9 +113,15 @@ A release archive is expected to be a tar file compressed with bzip2 with the fo The `agave-install` tool is used by the user to install and update their cluster software. +:::info +As of v3.0 `agave-install` does not install the `agave-validator` binary, which is required to run a validator node. +Validator operators are required to [build from source](../cli/install.md#build-from-source). + +::: + It manages the following files and directories in the user's home directory: -- `~/.config/solana/install/config.yml` - user configuration and information about currently installed software version +- `~/.config/solana/install/config.yml` - user configuration and information about the currently installed software version - `~/.local/share/solana/install/bin` - a symlink to the current release. eg, `~/.local/share/solana-update/-/bin` - `~/.local/share/solana/install/releases//` - contents of a release diff --git a/docs/src/operations/best-practices/general.md b/docs/src/operations/best-practices/general.md index 3ff0e78b825fc0..96c3e47a94e14a 100644 --- a/docs/src/operations/best-practices/general.md +++ b/docs/src/operations/best-practices/general.md @@ -36,7 +36,7 @@ This is a monthly call that is hosted by the Solana Foundation. ### Community Led Validator Call -This is also a monthly call which is hosted by the Solana validator community itself. +This is also a monthly call hosted by the Solana validator community itself. - Schedule: every fourth Thursday of the month 18:00 CET - Agenda: See [HackMD site](https://hackmd.io/1DFauFMWTZG37-U7CXhxMg?view#Solana-Community-Validator-Call-Agendas). - This call is **not recorded** @@ -89,45 +89,18 @@ will need to upgrade often, so it is important to get comfortable with this process. > **Note** validator nodes do not need to be offline while the newest version is -> being downloaded or built from source. All methods below can be done before +> being built from source. All methods below can be done before > the validator process is restarted. -### Building From Source +### Building the newest version from source -It is a best practice to always build your Agave binaries from source. If you -build from source, you are certain that the code you are building has not been -tampered with before the binary was created. You may also be able to optimize -your `agave-validator` binary to your specific hardware. - -If you build from source on the validator machine (or a machine with the same -CPU), you can target your specific architecture using the `-march` flag. Refer -to the following doc for -[instructions on building from source](../../cli/install.md#build-from-source). - -### agave-install - -If you are not comfortable building from source, or you need to quickly install -a new version to test something out, you could instead try using the -`agave-install` command. - -Assuming you want to install Agave version `2.0.15`, you would execute the -following: - -``` -agave-install init 2.0.15 -``` - -This command downloads the executable for `2.0.15` and installs it into a -`.local` directory. You can also look at `agave-install --help` for more -options. - -> **Note** this command only works if you already have the solana cli installed. -> If you do not have the cli installed, refer to -> [install solana cli tools](../../cli/install.md) +The easiest way to upgrade the Solana CLI software is to build the newest +version from source. See the +[build from source](../../cli/install.md#build-from-source) instructions for details. ### Restart -For all install methods, the validator process will need to be restarted before +The validator process will need to be restarted before the newly installed version is in use. Use `agave-validator exit` to restart your validator process. @@ -150,7 +123,7 @@ ledger. Therefore, you should not download a new snapshot any time your validator is offline or experiences an issue. Downloading a snapshot should only be reserved for occasions when you do not have local state. Prolonged downtime or the first install of a new validator are examples of times when you may not -have state locally. In other cases such as restarts for upgrades, a snapshot +have state locally. In other cases, such as restarts for upgrades, a snapshot download should be avoided. To avoid downloading a snapshot on restart, add the following flag to the diff --git a/docs/src/operations/guides/validator-failover.md b/docs/src/operations/guides/validator-failover.md index 3647aee0a4887a..e9469ae61f7be5 100644 --- a/docs/src/operations/guides/validator-failover.md +++ b/docs/src/operations/guides/validator-failover.md @@ -6,14 +6,14 @@ pagination_label: "Validator Guides: Node Failover" --- A simple two machine instance failover method is described here, which allows you to: -* Upgrade your validator software with virtually no down time, and +* Upgrade your validator software with virtually no downtime, and * Failover to the secondary instance when your monitoring detects a problem with the primary instance without any safety issues that would otherwise be associated with running two instances of your validator. You will need: * Two non-delinquent validator nodes * Identities that are not associated with a staked vote account on both validators to use when not actively voting -* Validator startup scripts both modified to use symbolic link as the identity +* Validator startup scripts both modified to use a symbolic link as the identity * Validator startup scripts both modified to include staked identity as authorized voter ## Setup @@ -32,7 +32,7 @@ The identity flag and authorized voter flags should be modified on both validato Note that `identity.json` is not a real file but a symbolic link we will create shortly. However, the authorized voter flag does need to point to the staked identity file (your main identity). In this guide, the main identity is renamed to `staked-identity.json` for clarity and simplicity. -You can certainly name your main identity file however you'd like; just make sure it is specified as an authorized voter as shown below: +You can certainly name your main identity file however you'd like; make sure it is specified as an authorized voter as shown below: ``` exec /home/sol/bin/agave-validator \ @@ -90,7 +90,7 @@ scp /mnt/ledger/tower-1_9-$(solana-keygen pubkey /home/sol/staked-identity.json) #### Inactive Validator * Set identity to your staked identity (requiring the tower) -* Rewrite symbolic link to reflect this +* Rewrite the symbolic link to reflect this ``` #!/bin/bash diff --git a/docs/src/operations/prerequisites.md b/docs/src/operations/prerequisites.md index b1bba3d4502975..caa3e58cca6ec3 100644 --- a/docs/src/operations/prerequisites.md +++ b/docs/src/operations/prerequisites.md @@ -13,7 +13,6 @@ Here is a list of some of the requirements for being a good operator: - Performant computer hardware and a fast internet connection - You can find a list of [hardware requirements here](./requirements.md) - - Solana helps facilitate data-center server rentals through the [Solana server program](https://solana.foundation/server-program) - Knowledge of the Linux terminal - Linux system administration - Accessing your machine via ssh and scp diff --git a/docs/src/operations/requirements.md b/docs/src/operations/requirements.md index 7300bed5ff0454..63e3afc1799463 100644 --- a/docs/src/operations/requirements.md +++ b/docs/src/operations/requirements.md @@ -23,7 +23,7 @@ The hardware recommendations below are provided as a guide. Operators are encou | **CPU** | - 2.8GHz base clock speed, or faster
- SHA extensions instruction support
- AMD Gen 3 or newer
- Intel Ice Lake or newer
- Higher clock speed is preferable over more cores
- AVX2 instruction support (to use official release binaries, self-compile otherwise)
- Support for AVX512f is helpful
|| | | 12 cores / 24 threads, or more | 16 cores / 32 threads, or more | | **RAM** | Error Correction Code (ECC) memory is suggested
Motherboard with 512GB capacity suggested || -| | 256GB or more| 512 GB or more for **all [account indexes](https://docs.solanalabs.com/operations/setup-an-rpc-node#account-indexing)** | +| | 256GB or more| 512 GB or more for **all [account indexes](https://docs.anza.xyz/operations/setup-an-rpc-node#account-indexing)** | | **Disk** | PCIe Gen3 x4 NVME SSD, or better, on each of:
- **Accounts**: 1TB, or larger. High TBW (Total Bytes Written)
- **Ledger**: 1TB or larger. High TBW suggested
- **Snapshots**: 500GB or larger. High TBW suggested
- **OS**: (Optional) 500GB, or larger. SATA OK

The OS may be installed on the ledger disk, though testing has shown better performance with the ledger on its own disk

Accounts and ledger *can* be stored on the same disk, however due to high IOPS, this is not recommended

The Samsung 970 and 980 Pro series SSDs are popular with the validator community | Consider a larger ledger disk if longer transaction history is required

Accounts and ledger **should not** be stored on the same disk | | **GPUs** | Not necessary at this time
Operators in the validator community do not use GPUs currently | | diff --git a/docs/src/operations/setup-a-validator.md b/docs/src/operations/setup-a-validator.md index 783a1a3465080e..e4d21b53ea9932 100644 --- a/docs/src/operations/setup-a-validator.md +++ b/docs/src/operations/setup-a-validator.md @@ -5,7 +5,7 @@ sidebar_position: 5 --- This is a guide for getting your validator setup on the Solana testnet cluster -for the first time. Testnet is a Solana cluster that is used for performance +for the first time. Testnet is a Solana cluster used for performance testing of the software before the software is used on mainnet. Since testnet is stress tested daily, it is a good cluster to practice validator operations. @@ -31,19 +31,12 @@ locate the terminal program on your _trusted computer_. ## Install The Solana CLI Locally -To create your validator vote account, you need to install the -[Solana command line interface](../cli/index.md) on your local computer. - -You can either use -[Solana's Install Tool](../cli/install.md#use-solanas-install-tool) section from -the within these docs to install the CLI, or alternatively, you can also -[build from source](../cli/install.md#build-from-source). - -> Building from source is a great option for those that want a more secure and -> potentially more performant executable. +Validator operators are required to install the tools included in the Solana CLI using the [installation instructions](../cli/install.md). Once the Solana CLI is installed, you can return to this document once you are -able to run the following command and get an answer on your terminal: +able to run two commands and get an answer on your terminal. + +First, run the following command to verify that the Solana CLI is installed: ``` solana --version @@ -56,14 +49,28 @@ may be higher): solana-cli 1.14.17 (src:b29a37cf; feat:3488713414) ``` -Once you have successfully installed the cli, the next step is to change your +Now, run the following command to verify that the agave-validator binary is +installed: + +``` +agave-validator --version +``` + +You should see an output that looks similar to this (note your version number +may be higher): + +``` +agave-validator 2.3.1 (src:e3eca4c1; feat:3640012085, client:Agave) +``` + +Once you have successfully installed the cli and validator binary, the next step is to change your config so that it is making requests to the `testnet` cluster: ``` solana config set --url https://api.testnet.solana.com ``` -To verify that your config has change run: +To verify that your config has changed, run: ``` solana config get @@ -266,7 +273,7 @@ sudo mount /dev/nvme0n1 /mnt/ledger You will also want to mount the accounts db on a separate hard drive. The process will be similar to the ledger example above. -Assuming you have device at `/dev/nvme1n1`, format the device and verify it +Assuming you have a device at `/dev/nvme1n1`, format the device and verify it exists: ``` @@ -301,7 +308,7 @@ sudo mount /dev/nvme1n1 /mnt/accounts ### Linux -Your system will need to be tuned in order to run properly. Your validator may +Your system will need to be tuned to run properly. Your validator may not start without the settings below. #### **Optimize sysctl knobs** @@ -383,13 +390,11 @@ On the validator server, switch to the `sol` user: su - sol ``` -## Install The Solana CLI on Remote Machine +## Install agave-validator on Remote Machine -Your remote machine will need the Solana CLI installed to run the Agave validator -software. For simplicity, install the cli with user `sol`. Refer again to -[Solana's Install Tool](../cli/install.md#use-solanas-install-tool) or -[build from source](../cli/install.md#build-from-source). It is best for -operators to build from source rather than using the pre built binaries. +Your remote machine will need `agave-validator` installed to run the Agave validator +software. For simplicity, install the application with user `sol`. Refer again to +[build from source](../cli/install.md#build-from-source). ## Create A Validator Startup Script @@ -595,3 +600,19 @@ Make sure your ledger is on drive with at least `2TB` of space. This could be a networking/hardware issue, or you may need to get the latest snapshot from another validator node. + +### PoH hashes/second rate is slower than the cluster target + +If you are using `agave-validator` built from source, ensure that you are using a `release` build and not a `debug` build + +Ensure that your machine's CPU base clock speed is 2.8GHz or faster. Use `lscpu` to check your clock speed. `CPU(s) scaling MHz` can cause your clock speed to be underclocked. Some additional tuning: + +Set performance governor +```bash +echo performance | sudo tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor +``` + +Force minimum frequency to maximum +```bash +# Example if your maximum GHz is 2.8 +echo 2850000 | sudo tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_min_freq diff --git a/docs/src/operations/setup-an-rpc-node.md b/docs/src/operations/setup-an-rpc-node.md index 1fbd8202fcdcc5..20449fe2ee81af 100644 --- a/docs/src/operations/setup-an-rpc-node.md +++ b/docs/src/operations/setup-an-rpc-node.md @@ -4,7 +4,10 @@ sidebar_label: Setup an Agave RPC Node sidebar_position: 6 --- -Since a Solana RPC server runs the same process as a consensus validator, first follow the instructions on [how to setup a Solana validator](./setup-a-validator.md) to get started. Note, that you do not need to create a vote account if you are operating an RPC node. An RPC node typically does not vote. +Since a Solana RPC server runs the same process as a consensus validator, first follow the instructions on +[how to setup a Solana validator](./setup-a-validator.md) to get started. +Note that you do not need to create a vote account if you are operating an RPC node. +An RPC node typically does not vote. After your validator is running, you can refer to this section for the RPC node specific setup instructions. @@ -50,7 +53,7 @@ exec agave-validator \ ### Solana Bigtable -The Solana blockchain is able to create many transactions per second. Because of the volume of transactions on the chain, it is not practical for an RPC node to store the entire blockchain on the machine. Instead, RPC operators use the `--limit-ledger-size` flag to specify how many blocks to store on the RPC node. If the user of the RPC node needs historical blockchain data then the RPC server will have to access older blocks through a Solana bigtable instance. +The Solana blockchain is able to create many transactions per second. Because of the volume of transactions on the chain, it is not practical for an RPC node to store the entire blockchain on the machine. Instead, RPC operators use the `--limit-ledger-size` flag to specify how many blocks to store on the RPC node. If the user of the RPC node needs historical blockchain data, then the RPC server will have to access older blocks through a Solana bigtable instance. If you are interested in setting up your own bigtable instance, see these docs in the Solana GitHub repository: [solana-labs/solana-bigtable](https://github.com/solana-labs/solana-bigtable) @@ -66,9 +69,10 @@ The identities of the [known validators](./guides/validator-start.md#known-valid ## Examples for other clusters -Additional examples of other Solana cluster specific validator commands can be found on the [Clusters](../clusters/available.md) page. +Additional examples of other Solana cluster-specific validator commands can be found on the [Clusters](../clusters/available.md) page. -Keep in mind, you will still need to customize these commands to operate as an RPC node, as well other operator specific configuration settings. +Keep in mind, you will still need to customize these commands to operate as an RPC node, as well as other +operator-specific configuration settings. ## Account indexing @@ -79,7 +83,7 @@ requests that scan the entire account set -- like may perform poorly. If your validator needs to support any of these requests, you can use the `--account-index` parameter to activate one or more in-memory account indexes that significantly improve RPC performance by indexing accounts -by the key field. Currently supports the following parameter values: +by the key field. Currently, it supports the following parameter values: - `program-id`: each account indexed by its owning program; used by [getProgramAccounts](https://solana.com/docs/rpc/http/getprogramaccounts) - `spl-token-mint`: each SPL token account indexed by its token Mint; used by [getTokenAccountsByDelegate](https://solana.com/docs/rpc/http/gettokenaccountsbydelegate), and [getTokenLargestAccounts](https://solana.com/docs/rpc/http/gettokenlargestaccounts) diff --git a/docs/src/operations/validator-initiatives.md b/docs/src/operations/validator-initiatives.md deleted file mode 100644 index 7a3f061c4ebc42..00000000000000 --- a/docs/src/operations/validator-initiatives.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Solana Validator Initiatives -sidebar_label: Validator Initiatives -sidebar_position: 4 ---- - -There are a number of initiatives that may help operators get started or grow their delegation. All of these initiatives are completely optional. All Solana clusters are permissionless and an operator can join at any time. - - -## Solana Foundation Delegation Program - -The Solana Foundation helps facilitate the growth of the consensus validator network by running a SOL delegation program. The program is open to new applicants. You can find out more information [here](https://solana.org/delegation-program) - -## Tour De Sun 22 - -The Tour De Sun 22 program is closed and no longer accepting applications. Please see the [End of Tour De Sun 22 blog post](https://solana.com/news/solana-foundation-announces-end-of-tour-de-sun-22) for more information. - -## Solana Foundation Server Program - -Separately from the delegation program, The Solana Foundation offers a server program that provides servers in various data-centers all over the world. If you would like to run a consensus validator or RPC node, you may use this program to rent bare metal servers in various data-centers. The servers meet or exceed the [Solana validator hardware specs](./requirements.md#hardware-recommendations). No long-term lease commitments are required. To find out more, visit the [Solana server recommendations page](https://solana.inflect.com/). - -## Stake Pools - -The Solana ecosystem operates various stake pools. These stake pools delegate stake to validators using various delegation strategies. To learn more about stake pools and their delegation strategies, visit the [Solana Stake Pool documentation](https://spl.solana.com/stake-pool). diff --git a/docs/src/validator/geyser.md b/docs/src/validator/geyser.md index efea2e18e30269..cd9085c0dcff40 100644 --- a/docs/src/validator/geyser.md +++ b/docs/src/validator/geyser.md @@ -168,6 +168,91 @@ The `slot` points to the slot the transaction is executed at. For more details, please refer to the Rust documentation in [`agave-geyser-plugin-interface`]. +# Timing Relationships of Various Plugin Callbacks. + +Account update via update_account: As mentioned previously when is_startup is +false, the account is updated during transaction processing. The account update +has information about the transaction causing the update in the `txn` field. +Note, when account update is sent during start up, the txn field is None as +there is no transaction. + +``` +pub struct ReplicaAccountInfoV3<'a> { + /// The Pubkey for the account + pub pubkey: &'a [u8], + + /// The lamports for the account + pub lamports: u64, + + /// The Pubkey of the owner program account + pub owner: &'a [u8], + + /// This account's data contains a loaded program (and is now read-only) + pub executable: bool, + + /// The epoch at which this account will next owe rent + pub rent_epoch: u64, + + /// The data held in this account. + pub data: &'a [u8], + + /// A global monotonically increasing atomic number, which can be used + /// to tell the order of the account update. For example, when an + /// account is updated in the same slot multiple times, the update + /// with higher write_version should supersede the one with lower + /// write_version. + pub write_version: u64, + + /// Reference to transaction causing this account modification + pub txn: Option<&'a SanitizedTransaction>, +} +``` + +The updates are sent serially for different accounts via update_slot_status +in the transaction for a slot. After the accounts notifications are sent, the +SlotStatus::Processed event is sent. + +Starting with Agave 3.0, transaction notifications are sent before +SlotStatus::Processed. In prior Agave version, even though SlotStatus::Processed +is sent logically after the transaction events, because there are intermediate +threads emitting the notitications to the plugin, the plugin can see the +transaction notifications and the SlotStatus::Processed for a slot in either +order. + +Within a block, transactions are ordered with transaction index. Transactions +within a block are processed and notified in parallel. A plugin should use the +transaction index to determine their relative order. + +A plugin can use the notify_block_metadata to know the +executed_transaction_count for a given slot in the following structure: + +``` +/// Extending ReplicaBlockInfo by sending RewardsAndNumPartitions. +#[derive(Clone, Debug)] +#[repr(C)] +pub struct ReplicaBlockInfoV4<'a> { + pub parent_slot: Slot, + pub parent_blockhash: &'a str, + pub slot: Slot, + pub blockhash: &'a str, + pub rewards: &'a RewardsAndNumPartitions, + pub block_time: Option, + pub block_height: Option, + pub executed_transaction_count: u64, + pub entry_count: u64, +} +``` + +The plugin can associate accounts with transactions via the txn field in the +ReplicaAccountInfoV3 structure. It can also use ReplicaTransactionInfoV2 in +the notify_transaction callback to get the account addresses. + +The SlotStatus::Confirmed and SlotStatus::Processed events can reach the plugin +in any order as they are sent asynchronous to each other. A plugin should wait +for both events to confirm they are processed and confirmed. + +The SlotStatus::Rooted is sent after SlotStatus::Processed. + ## Example PostgreSQL Plugin The [`solana-accountsdb-plugin-postgres`] repository implements a plugin storing diff --git a/dos/Cargo.toml b/dos/Cargo.toml index dce537a837b429..4573466f7e22a8 100644 --- a/dos/Cargo.toml +++ b/dos/Cargo.toml @@ -27,12 +27,11 @@ solana-bench-tps = { workspace = true } solana-client = { workspace = true } solana-connection-cache = { workspace = true } solana-core = { workspace = true } -solana-faucet = { workspace = true } solana-gossip = { workspace = true } solana-hash = { workspace = true } solana-instruction = { workspace = true } solana-keypair = { workspace = true } -solana-logger = "=2.3.1" +solana-logger = "=3.0.0" solana-measure = { workspace = true } solana-message = { workspace = true } solana-net-utils = { workspace = true } @@ -54,5 +53,6 @@ solana-version = { workspace = true } [dev-dependencies] solana-core = { workspace = true, features = ["dev-context-only-utils"] } +solana-faucet = { workspace = true, features = ["dev-context-only-utils"] } solana-local-cluster = { workspace = true } solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } diff --git a/dos/src/cli.rs b/dos/src/cli.rs index b44e5a4e1add3c..07635e92c8b9c2 100644 --- a/dos/src/cli.rs +++ b/dos/src/cli.rs @@ -48,7 +48,8 @@ pub struct DosClientParameters { #[clap( long, conflicts_with("skip-gossip"), - help = "The shred version to use for gossip discovery. If not provided, will be discovered from the network" + help = "The shred version to use for gossip discovery. If not provided, will be \ + discovered from the network" )] pub shred_version: Option, @@ -175,7 +176,10 @@ fn validate_input(params: &DosClientParameters) { if params.data_type != DataType::Transaction { let tp = ¶ms.transaction_params; if tp.valid_blockhash || tp.valid_signatures || tp.unique_transactions { - eprintln!("Arguments valid-blockhash, valid-sign, unique-transactions are ignored if data-type != transaction"); + eprintln!( + "Arguments valid-blockhash, valid-sign, unique-transactions are ignored if \ + data-type != transaction" + ); exit(1); } } diff --git a/dos/src/main.rs b/dos/src/main.rs index 0dd9b9dfbaf055..7ee06b97658f61 100644 --- a/dos/src/main.rs +++ b/dos/src/main.rs @@ -438,9 +438,9 @@ fn get_target( } else { info!("************ NODE ***********"); for node in nodes { - info!("{:?}", node); + info!("{node:?}"); } - info!("ADDR = {}", entrypoint_addr); + info!("ADDR = {entrypoint_addr}"); for node in nodes { if node.gossip() == Some(entrypoint_addr) { @@ -652,7 +652,7 @@ fn run_dos( && params.transaction_params.unique_transactions { let (_, target_addr) = target.expect("should have target"); - info!("Targeting {}", target_addr); + info!("Targeting {target_addr}"); run_dos_transactions( target_addr, iterations, @@ -664,7 +664,7 @@ fn run_dos( ); } else { let (target_id, target_addr) = target.expect("should have target"); - info!("Targeting {}", target_addr); + info!("Targeting {target_addr}"); let mut data = match params.data_type { DataType::RepairHighest => { let slot = 100; @@ -700,7 +700,7 @@ fn run_dos( } DataType::Transaction => { let tp = params.transaction_params; - info!("{:?}", tp); + info!("{tp:?}"); let valid_blockhash = tp.valid_blockhash; let payers: Vec> = @@ -720,7 +720,7 @@ fn run_dos( let mut transaction_generator = TransactionGenerator::new(tp); let tx = transaction_generator.generate(payer, keypairs_chunk, client.as_ref()); - info!("{:?}", tx); + info!("{tx:?}"); bincode::serialize(&tx).unwrap() } _ => panic!("Unsupported data_type detected"), @@ -768,7 +768,7 @@ fn main() { cmd_params.shred_version = Some( solana_net_utils::get_cluster_shred_version(&cmd_params.entrypoint_addr) .unwrap_or_else(|err| { - eprintln!("Failed to get shred version: {}", err); + eprintln!("Failed to get shred version: {err}"); exit(1); }), ); @@ -831,7 +831,7 @@ pub mod test { use { super::*, solana_core::validator::ValidatorConfig, - solana_faucet::faucet::run_local_faucet, + solana_faucet::faucet::run_local_faucet_with_unique_port_for_tests, solana_local_cluster::{ cluster::Cluster, local_cluster::{ClusterConfig, LocalCluster}, @@ -1086,7 +1086,7 @@ pub mod test { // 1. Create faucet thread let faucet_keypair = Keypair::new(); let faucet_pubkey = faucet_keypair.pubkey(); - let faucet_addr = run_local_faucet(faucet_keypair, None); + let faucet_addr = run_local_faucet_with_unique_port_for_tests(faucet_keypair); let mut validator_config = ValidatorConfig::default_for_test(); validator_config.rpc_config = JsonRpcConfig { faucet_addr: Some(faucet_addr), @@ -1095,7 +1095,6 @@ pub mod test { // 2. Create a local cluster which is aware of faucet let num_nodes = 1; - let native_instruction_processors = vec![]; let cluster = LocalCluster::new( &mut ClusterConfig { node_stakes: vec![999_990; num_nodes], @@ -1110,7 +1109,6 @@ pub mod test { }, num_nodes, ), - native_instruction_processors, ..ClusterConfig::default() }, SocketAddrSpace::Unspecified, diff --git a/download-utils/src/lib.rs b/download-utils/src/lib.rs index 3f01d2eff75a72..a1e3c749acfbd8 100644 --- a/download-utils/src/lib.rs +++ b/download-utils/src/lib.rs @@ -106,7 +106,7 @@ pub fn download_snapshot_archive( progress_notify_callback, ) { Ok(()) => return Ok(()), - Err(err) => info!("{}", err), + Err(err) => info!("{err}"), } } Err(format!( diff --git a/entry/src/entry.rs b/entry/src/entry.rs index 11cd27ccf30532..ba0c7fa068d740 100644 --- a/entry/src/entry.rs +++ b/entry/src/entry.rs @@ -49,7 +49,7 @@ pub fn init_poh() { fn init(name: &OsStr) { static INIT_HOOK: Once = Once::new(); - info!("Loading {:?}", name); + info!("Loading {name:?}"); INIT_HOOK.call_once(|| { let path; let lib_name = if let Some(perf_libs_path) = solana_perf::perf_libs::locate_perf_libs() { @@ -887,10 +887,8 @@ impl EntrySlice for [Entry] { if entry.is_tick() { if *tick_hash_count != hashes_per_tick { warn!( - "invalid tick hash count!: entry: {:#?}, tick_hash_count: {}, hashes_per_tick: {}", - entry, - tick_hash_count, - hashes_per_tick + "invalid tick hash count!: entry: {entry:#?}, tick_hash_count: \ + {tick_hash_count}, hashes_per_tick: {hashes_per_tick}" ); return false; } @@ -1406,7 +1404,7 @@ mod tests { for _ in 0..100 { let mut time = Measure::start("ticks"); let num_ticks = thread_rng().gen_range(1..100); - info!("create {} ticks:", num_ticks); + info!("create {num_ticks} ticks:"); let mut entries = create_random_ticks(num_ticks, 100, Hash::default()); time.stop(); @@ -1417,12 +1415,12 @@ mod tests { entries[modify_idx].hash = hash(&[1, 2, 3]); } - info!("done.. {}", time); + info!("done.. {time}"); let mut time = Measure::start("poh"); let res = entries.verify(&Hash::default(), &thread_pool_for_tests()); assert_eq!(res, !modified); time.stop(); - info!("{} {}", time, res); + info!("{time} {res}"); } } diff --git a/entry/src/poh.rs b/entry/src/poh.rs index 0bb96307ae9bcc..7008a215bbe522 100644 --- a/entry/src/poh.rs +++ b/entry/src/poh.rs @@ -12,7 +12,7 @@ pub struct Poh { pub hash: Hash, num_hashes: u64, hashes_per_tick: u64, - remaining_hashes: u64, + remaining_hashes_until_tick: u64, tick_number: u64, slot_start_time: Instant, } @@ -36,7 +36,7 @@ impl Poh { hash, num_hashes: 0, hashes_per_tick, - remaining_hashes: hashes_per_tick, + remaining_hashes_until_tick: hashes_per_tick, tick_number, slot_start_time: now, } @@ -62,27 +62,27 @@ impl Poh { /// Return `true` if the caller needs to `tick()` next, i.e. if the /// remaining_hashes is 1. pub fn hash(&mut self, max_num_hashes: u64) -> bool { - let num_hashes = std::cmp::min(self.remaining_hashes - 1, max_num_hashes); + let num_hashes = std::cmp::min(self.remaining_hashes_until_tick - 1, max_num_hashes); for _ in 0..num_hashes { self.hash = hash(self.hash.as_ref()); } self.num_hashes += num_hashes; - self.remaining_hashes -= num_hashes; + self.remaining_hashes_until_tick -= num_hashes; - assert!(self.remaining_hashes > 0); - self.remaining_hashes == 1 + assert!(self.remaining_hashes_until_tick > 0); + self.remaining_hashes_until_tick == 1 } pub fn record(&mut self, mixin: Hash) -> Option { - if self.remaining_hashes == 1 { + if self.remaining_hashes_until_tick == 1 { return None; // Caller needs to `tick()` first } self.hash = hashv(&[self.hash.as_ref(), mixin.as_ref()]); let num_hashes = self.num_hashes + 1; self.num_hashes = 0; - self.remaining_hashes -= 1; + self.remaining_hashes_until_tick -= 1; Some(PohEntry { num_hashes, @@ -98,7 +98,7 @@ impl Poh { let num_mixins = mixins.len() as u64; debug_assert_ne!(num_mixins, 0, "mixins.len() == 0"); - if self.remaining_hashes < num_mixins + 1 { + if self.remaining_hashes_until_tick < num_mixins + 1 { return false; // Not enough hashes remaining to record all mixins } @@ -120,7 +120,7 @@ impl Poh { })); self.num_hashes = 0; - self.remaining_hashes -= num_mixins; + self.remaining_hashes_until_tick -= num_mixins; true } @@ -128,16 +128,16 @@ impl Poh { pub fn tick(&mut self) -> Option { self.hash = hash(self.hash.as_ref()); self.num_hashes += 1; - self.remaining_hashes -= 1; + self.remaining_hashes_until_tick -= 1; // If we are in low power mode then always generate a tick. // Otherwise only tick if there are no remaining hashes - if self.hashes_per_tick != LOW_POWER_MODE && self.remaining_hashes != 0 { + if self.hashes_per_tick != LOW_POWER_MODE && self.remaining_hashes_until_tick != 0 { return None; } let num_hashes = self.num_hashes; - self.remaining_hashes = self.hashes_per_tick; + self.remaining_hashes_until_tick = self.hashes_per_tick; self.num_hashes = 0; self.tick_number += 1; Some(PohEntry { @@ -145,10 +145,19 @@ impl Poh { hash: self.hash, }) } + + pub fn remaining_hashes_in_slot(&self, ticks_per_slot: u64) -> u64 { + // ticks_per_slot must be a power of two so we can use a bitmask + debug_assert!(ticks_per_slot.is_power_of_two() && ticks_per_slot > 0); + ticks_per_slot + .saturating_sub((self.tick_number & (ticks_per_slot.wrapping_sub(1))).wrapping_add(1)) + .wrapping_mul(self.hashes_per_tick) + .wrapping_add(self.remaining_hashes_until_tick) + } } pub fn compute_hash_time(hashes_sample_size: u64) -> Duration { - info!("Running {} hashes...", hashes_sample_size); + info!("Running {hashes_sample_size} hashes..."); let mut v = Hash::default(); let start = Instant::now(); for _ in 0..hashes_sample_size { @@ -328,29 +337,32 @@ mod tests { #[test] fn test_poh_tick() { let mut poh = Poh::new(Hash::default(), Some(2)); - assert_eq!(poh.remaining_hashes, 2); + assert_eq!(poh.remaining_hashes_until_tick, 2); assert!(poh.tick().is_none()); - assert_eq!(poh.remaining_hashes, 1); + assert_eq!(poh.remaining_hashes_until_tick, 1); assert_matches!(poh.tick(), Some(PohEntry { num_hashes: 2, .. })); - assert_eq!(poh.remaining_hashes, 2); // Ready for the next tick + assert_eq!(poh.remaining_hashes_until_tick, 2); // Ready for the next tick } #[test] fn test_poh_tick_large_batch() { let mut poh = Poh::new(Hash::default(), Some(2)); - assert_eq!(poh.remaining_hashes, 2); + assert_eq!(poh.remaining_hashes_until_tick, 2); assert!(poh.hash(1_000_000)); // Stop hashing before the next tick - assert_eq!(poh.remaining_hashes, 1); + assert_eq!(poh.remaining_hashes_until_tick, 1); assert!(poh.hash(1_000_000)); // Does nothing... - assert_eq!(poh.remaining_hashes, 1); + assert_eq!(poh.remaining_hashes_until_tick, 1); + assert_eq!(poh.remaining_hashes_in_slot(2), 3); poh.tick(); - assert_eq!(poh.remaining_hashes, 2); // Ready for the next tick + assert_eq!(poh.remaining_hashes_until_tick, 2); // Ready for the next tick + assert_eq!(poh.remaining_hashes_in_slot(2), 2); } #[test] fn test_poh_tick_too_soon() { let mut poh = Poh::new(Hash::default(), Some(2)); - assert_eq!(poh.remaining_hashes, 2); + assert_eq!(poh.remaining_hashes_until_tick, 2); + assert_eq!(poh.remaining_hashes_in_slot(2), 4); assert!(poh.tick().is_none()); } @@ -358,14 +370,16 @@ mod tests { fn test_poh_record_not_permitted_at_final_hash() { let mut poh = Poh::new(Hash::default(), Some(10)); assert!(poh.hash(9)); - assert_eq!(poh.remaining_hashes, 1); + assert_eq!(poh.remaining_hashes_until_tick, 1); + assert_eq!(poh.remaining_hashes_in_slot(2), 11); assert!(poh.record(Hash::default()).is_none()); // <-- record() rejected to avoid exceeding hashes_per_tick assert_matches!(poh.tick(), Some(PohEntry { num_hashes: 10, .. })); assert_matches!( poh.record(Hash::default()), Some(PohEntry { num_hashes: 1, .. }) // <-- record() ok ); - assert_eq!(poh.remaining_hashes, 9); + assert_eq!(poh.remaining_hashes_until_tick, 9); + assert_eq!(poh.remaining_hashes_in_slot(2), 9); } #[test] @@ -380,7 +394,8 @@ mod tests { assert_eq!(entries[0].num_hashes, 5); assert_eq!(entries[1].num_hashes, 1); assert_eq!(entries[2].num_hashes, 1); - assert!(poh.remaining_hashes == 3); + assert_eq!(poh.remaining_hashes_until_tick, 3); + assert_eq!(poh.remaining_hashes_in_slot(2), 13); // Cannot record more than number of remaining hashes assert!(!poh.record_batches(&dummy_hashes[..4], &mut entries,)); @@ -393,6 +408,7 @@ mod tests { assert_eq!(entries.len(), 2); assert_eq!(entries[0].num_hashes, 1); assert_eq!(entries[1].num_hashes, 1); - assert!(poh.remaining_hashes == 1); + assert_eq!(poh.remaining_hashes_until_tick, 1); + assert_eq!(poh.remaining_hashes_in_slot(2), 11); } } diff --git a/faucet/Cargo.toml b/faucet/Cargo.toml index 6e9f1ac247e0df..3eb122517a76e3 100644 --- a/faucet/Cargo.toml +++ b/faucet/Cargo.toml @@ -20,6 +20,9 @@ name = "solana_faucet" name = "solana-faucet" path = "src/bin/faucet.rs" +[features] +dev-context-only-utils = [] + [dependencies] bincode = { workspace = true } clap = { workspace = true } @@ -29,20 +32,24 @@ serde = { workspace = true } serde_derive = { workspace = true } solana-clap-utils = { workspace = true } solana-cli-config = { workspace = true } -solana-hash = "=2.3.0" -solana-instruction = "=2.3.0" -solana-keypair = "=2.2.1" -solana-logger = "=2.3.1" -solana-message = "=2.4.0" +solana-cli-output = { workspace = true } +solana-hash = "=3.0.0" +solana-instruction = "=3.0.0" +solana-keypair = "=3.0.1" +solana-logger = "=3.0.0" +solana-message = "=3.0.1" solana-metrics = { workspace = true } -solana-native-token = "=2.2.2" -solana-packet = "=2.2.1" -solana-pubkey = { version = "=2.4.0", features = ["rand"] } -solana-signer = "=2.2.1" -solana-system-interface = "=1.0" -solana-system-transaction = "=2.2.1" -solana-transaction = "=2.2.3" +solana-net-utils = { workspace = true } +solana-packet = "=3.0.0" +solana-pubkey = { version = "=3.0.0", features = ["rand"] } +solana-signer = "=3.0.0" +solana-system-interface = "=2.0" +solana-system-transaction = "=3.0.0" +solana-transaction = "=3.0.1" solana-version = { workspace = true } -spl-memo-interface = { version = "=1.0.0" } +spl-memo-interface = { version = "=2.0.0" } thiserror = { workspace = true } tokio = { workspace = true, features = ["full"] } + +[dev-dependencies] +solana-faucet = { path = ".", features = ["dev-context-only-utils"] } diff --git a/faucet/src/bin/faucet.rs b/faucet/src/bin/faucet.rs index acfe578de9f1a7..35f356fdcf6ad6 100644 --- a/faucet/src/bin/faucet.rs +++ b/faucet/src/bin/faucet.rs @@ -63,8 +63,8 @@ async fn main() { .takes_value(true) .multiple(true) .help( - "Allow requests from a particular IP address without request limit; \ - recipient address will be used to check request limits instead", + "Allow requests from a particular IP address without request limit; recipient \ + address will be used to check request limits instead", ), ) .get_matches(); diff --git a/faucet/src/faucet.rs b/faucet/src/faucet.rs index 86a36e5624433a..41395663a01c3c 100644 --- a/faucet/src/faucet.rs +++ b/faucet/src/faucet.rs @@ -9,12 +9,12 @@ use { crossbeam_channel::{unbounded, Sender}, log::*, serde_derive::{Deserialize, Serialize}, + solana_cli_output::display::build_balance_message, solana_hash::Hash, solana_instruction::Instruction, solana_keypair::Keypair, solana_message::Message, solana_metrics::datapoint_info, - solana_native_token::lamports_to_sol, solana_packet::PACKET_DATA_SIZE, solana_pubkey::Pubkey, solana_signer::Signer, @@ -67,10 +67,10 @@ pub enum FaucetError { NoDataReceived, #[error("request too large; req: ◎{0}, cap: ◎{1}")] - PerRequestCapExceeded(f64, f64), + PerRequestCapExceeded(String, String), #[error("limit reached; req: ◎{0}, to: {1}, current: ◎{2}, cap: ◎{3}")] - PerTimeCapExceeded(f64, String, f64, f64), + PerTimeCapExceeded(String, String, String, String), } #[derive(Serialize, Deserialize, Debug, Clone, Copy)] @@ -124,10 +124,10 @@ impl Faucet { if let Some((per_request_cap, per_time_cap)) = per_request_cap.zip(per_time_cap) { if per_time_cap < per_request_cap { warn!( - "per_time_cap {} SOL < per_request_cap {} SOL; \ - maximum single requests will fail", - lamports_to_sol(per_time_cap), - lamports_to_sol(per_request_cap), + "per_time_cap {} SOL < per_request_cap {} SOL; maximum single requests will \ + fail", + build_balance_message(per_time_cap, false, false), + build_balance_message(per_request_cap, false, false), ); } } @@ -152,10 +152,10 @@ impl Faucet { if let Some(cap) = self.per_time_cap { if new_total > cap { return Err(FaucetError::PerTimeCapExceeded( - lamports_to_sol(request_amount), + build_balance_message(request_amount, false, false), to.to_string(), - lamports_to_sol(new_total), - lamports_to_sol(cap), + build_balance_message(new_total, false, false), + build_balance_message(cap, false, false), )); } } @@ -176,7 +176,7 @@ impl Faucet { req: FaucetRequest, ip: IpAddr, ) -> Result { - trace!("build_airdrop_transaction: {:?}", req); + trace!("build_airdrop_transaction: {req:?}"); match req { FaucetRequest::GetAirdrop { lamports, @@ -186,7 +186,7 @@ impl Faucet { let mint_pubkey = self.faucet_keypair.pubkey(); info!( "Requesting airdrop of {} SOL to {:?}", - lamports_to_sol(lamports), + build_balance_message(lamports, false, false), to ); @@ -195,8 +195,8 @@ impl Faucet { let memo = format!( "{}", FaucetError::PerRequestCapExceeded( - lamports_to_sol(lamports), - lamports_to_sol(cap), + build_balance_message(lamports, false, false), + build_balance_message(cap, false, false), ) ); let memo_instruction = Instruction { @@ -235,7 +235,7 @@ impl Faucet { ) -> Result, FaucetError> { let req: FaucetRequest = deserialize(bytes)?; - info!("Airdrop transaction requested...{:?}", req); + info!("Airdrop transaction requested...{req:?}"); let res = self.build_airdrop_transaction(req, ip); match res { Ok(tx) => { @@ -245,7 +245,7 @@ impl Faucet { tx } FaucetTransaction::Memo((tx, memo)) => { - warn!("Memo transaction returned: {}", memo); + warn!("Memo transaction returned: {memo}"); tx } }; @@ -258,7 +258,7 @@ impl Faucet { Ok(response_vec_with_length) } Err(err) => { - warn!("Airdrop transaction failed: {}", err); + warn!("Airdrop transaction failed: {err}"); Err(err) } } @@ -278,8 +278,8 @@ pub fn request_airdrop_transaction( blockhash: Hash, ) -> Result { info!( - "request_airdrop_transaction: faucet_addr={} id={} lamports={} blockhash={}", - faucet_addr, id, lamports, blockhash + "request_airdrop_transaction: faucet_addr={faucet_addr} id={id} lamports={lamports} \ + blockhash={blockhash}" ); let mut stream = TcpStream::connect_timeout(faucet_addr, Duration::new(3, 0))?; @@ -295,10 +295,7 @@ pub fn request_airdrop_transaction( // Read length of transaction let mut buffer = [0; 2]; stream.read_exact(&mut buffer).map_err(|err| { - info!( - "request_airdrop_transaction: buffer length read_exact error: {:?}", - err - ); + info!("request_airdrop_transaction: buffer length read_exact error: {err:?}"); err })?; let transaction_length = u16::from_le_bytes(buffer) as usize; @@ -311,10 +308,7 @@ pub fn request_airdrop_transaction( // Read the transaction let mut buffer = vec![0; transaction_length]; stream.read_exact(&mut buffer).map_err(|err| { - info!( - "request_airdrop_transaction: buffer read_exact error: {:?}", - err - ); + info!("request_airdrop_transaction: buffer read_exact error: {err:?}"); err })?; @@ -322,6 +316,7 @@ pub fn request_airdrop_transaction( Ok(transaction) } +#[deprecated(since = "3.1.0", note = "use `run_local_faucet_with_config` instead")] pub fn run_local_faucet_with_port( faucet_keypair: Keypair, sender: Sender>, @@ -343,9 +338,84 @@ pub fn run_local_faucet_with_port( }); } +/// Configuration for running a local faucet server. +#[cfg(feature = "dev-context-only-utils")] +pub struct LocalFaucetConfig { + pub keypair: Keypair, + pub address: Ipv4Addr, + pub port: u16, + pub time_input: Option, + pub per_time_cap: Option, + pub per_request_cap: Option, +} + +/// Runs a local faucet server with the specified configuration. +/// +/// # Arguments +/// * `sender` - Channel to report the bound socket address or startup errors +/// * `config` - Faucet configuration (use `config.port = 0` for random open port) +#[cfg(feature = "dev-context-only-utils")] +pub fn run_local_faucet_with_config( + sender: Sender>, + config: LocalFaucetConfig, +) { + thread::spawn(move || { + let faucet_addr = socketaddr!(config.address, config.port); + let faucet = Arc::new(Mutex::new(Faucet::new( + config.keypair, + config.time_input, + config.per_time_cap, + config.per_request_cap, + ))); + let runtime = Runtime::new().unwrap(); + runtime.block_on(run_faucet(faucet, faucet_addr, Some(sender))); + }); +} + +/// For integration tests and benchmarks. +/// +/// Listens on `LOCALHOST` with a random open port (unless port is provided) and reports to Sender. +#[cfg(feature = "dev-context-only-utils")] +pub fn run_local_faucet_for_tests( + keypair: Keypair, + per_time_cap: Option, + port: u16, +) -> SocketAddr { + let (sender, receiver) = unbounded(); + run_local_faucet_with_config( + sender, + LocalFaucetConfig { + keypair, + address: Ipv4Addr::LOCALHOST, + port, + time_input: None, + per_time_cap, + per_request_cap: None, + }, + ); + receiver + .recv() + .expect("run_local_faucet_for_tests") + .expect("faucet_addr") +} + +/// For tests only. +/// +/// Listens on `LOCALHOST` with unique, non-overlapping port and reports to Sender. +#[cfg(feature = "dev-context-only-utils")] +pub fn run_local_faucet_with_unique_port_for_tests(keypair: Keypair) -> SocketAddr { + run_local_faucet_for_tests( + keypair, + None, /* per_time_cap */ + solana_net_utils::sockets::unique_port_range_for_tests(1).start, + ) +} + // For integration tests. Listens on random open port and reports port to Sender. +#[deprecated(since = "3.1.0", note = "use `run_local_faucet_for_tests` instead")] pub fn run_local_faucet(faucet_keypair: Keypair, per_time_cap: Option) -> SocketAddr { let (sender, receiver) = unbounded(); + #[allow(deprecated)] run_local_faucet_with_port(faucet_keypair, sender, None, per_time_cap, None, 0); receiver .recv() @@ -360,25 +430,29 @@ pub async fn run_faucet( ) { let listener = TcpListener::bind(&faucet_addr).await; if let Some(sender) = sender { - sender.send( - listener.as_ref().map(|listener| listener.local_addr().unwrap()) - .map_err(|err| { - format!( - "Unable to bind faucet to {faucet_addr:?}, check the address is not already in use: {err}" - ) - }) + sender + .send( + listener + .as_ref() + .map(|listener| listener.local_addr().unwrap()) + .map_err(|err| { + format!( + "Unable to bind faucet to {faucet_addr:?}, check the address is not \ + already in use: {err}" + ) + }), ) .unwrap(); } let listener = match listener { Err(err) => { - error!("Faucet failed to start: {}", err); + error!("Faucet failed to start: {err}"); return; } Ok(listener) => listener, }; - info!("Faucet started. Listening on: {}", faucet_addr); + info!("Faucet started. Listening on: {faucet_addr}"); info!( "Faucet account address: {}", faucet.lock().unwrap().faucet_keypair.pubkey() @@ -390,11 +464,11 @@ pub async fn run_faucet( Ok((stream, _)) => { tokio::spawn(async move { if let Err(e) = process(stream, faucet).await { - info!("failed to process request; error = {:?}", e); + info!("failed to process request; error = {e:?}"); } }); } - Err(e) => debug!("failed to accept socket; error = {:?}", e), + Err(e) => debug!("failed to accept socket; error = {e:?}"), } } } @@ -413,7 +487,7 @@ async fn process( .unwrap() as usize ]; while stream.read_exact(&mut request).await.is_ok() { - trace!("{:?}", request); + trace!("{request:?}"); let response = { match stream.peer_addr() { @@ -423,15 +497,15 @@ async fn process( } Ok(peer_addr) => { let ip = peer_addr.ip(); - info!("Request IP: {:?}", ip); + info!("Request IP: {ip:?}"); match faucet.lock().unwrap().process_faucet_request(&request, ip) { Ok(response_bytes) => { - trace!("Airdrop response_bytes: {:?}", response_bytes); + trace!("Airdrop response_bytes: {response_bytes:?}"); response_bytes } Err(e) => { - info!("Error in request: {}", e); + info!("Error in request: {e}"); ERROR_RESPONSE.to_vec() } } diff --git a/faucet/tests/local-faucet.rs b/faucet/tests/local-faucet.rs index dc86d0c665ea61..61841ae74fcb3e 100644 --- a/faucet/tests/local-faucet.rs +++ b/faucet/tests/local-faucet.rs @@ -1,5 +1,7 @@ use { - solana_faucet::faucet::{request_airdrop_transaction, run_local_faucet}, + solana_faucet::faucet::{ + request_airdrop_transaction, run_local_faucet_with_unique_port_for_tests, + }, solana_hash::Hash, solana_keypair::Keypair, solana_message::Message, @@ -17,8 +19,7 @@ fn test_local_faucet() { let create_instruction = transfer(&keypair.pubkey(), &to, lamports); let message = Message::new(&[create_instruction], Some(&keypair.pubkey())); let expected_tx = Transaction::new(&[&keypair], message, blockhash); - - let faucet_addr = run_local_faucet(keypair, None); + let faucet_addr = run_local_faucet_with_unique_port_for_tests(keypair); let result = request_airdrop_transaction(&faucet_addr, &to, lamports, blockhash); assert_eq!(expected_tx, result.unwrap()); diff --git a/feature-set/src/lib.rs b/feature-set/src/lib.rs index bd26f9b52033f0..f4d6967b711fae 100644 --- a/feature-set/src/lib.rs +++ b/feature-set/src/lib.rs @@ -104,10 +104,9 @@ impl FeatureSet { SVMFeatureSet { move_precompile_verification_to_svm: self .is_active(&move_precompile_verification_to_svm::id()), - remove_accounts_executable_flag_checks: self - .is_active(&remove_accounts_executable_flag_checks::id()), stricter_abi_and_runtime_constraints: self .is_active(&stricter_abi_and_runtime_constraints::id()), + account_data_direct_mapping: self.is_active(&account_data_direct_mapping::id()), enable_bpf_loader_set_authority_checked_ix: self .is_active(&enable_bpf_loader_set_authority_checked_ix::id()), enable_loader_v4: self.is_active(&enable_loader_v4::id()), @@ -149,7 +148,6 @@ impl FeatureSet { .is_active(&simplify_alt_bn128_syscall_error_codes::id()), fix_alt_bn128_multiplication_input_length: self .is_active(&fix_alt_bn128_multiplication_input_length::id()), - loosen_cpi_size_restriction: self.is_active(&loosen_cpi_size_restriction::id()), increase_tx_account_lock_limit: self.is_active(&increase_tx_account_lock_limit::id()), enable_extend_program_checked: self.is_active(&enable_extend_program_checked::id()), formalize_loaded_transaction_data_size: self @@ -159,6 +157,8 @@ impl FeatureSet { reenable_zk_elgamal_proof_program: self .is_active(&reenable_zk_elgamal_proof_program::id()), raise_cpi_nesting_limit_to_8: self.is_active(&raise_cpi_nesting_limit_to_8::id()), + provide_instruction_data_offset_in_vm_r2: self + .is_active(&provide_instruction_data_offset_in_vm_r2::id()), } } } @@ -754,7 +754,11 @@ pub mod apply_cost_tracker_during_replay { } pub mod stricter_abi_and_runtime_constraints { - solana_pubkey::declare_id!("C37iaPi6VE4CZDueU1vL8y6pGp5i8amAbEsF31xzz723"); + solana_pubkey::declare_id!("CxeBn9PVeeXbmjbNwLv6U4C6svNxnC4JX6mfkvgeMocM"); +} + +pub mod account_data_direct_mapping { + solana_pubkey::declare_id!("9s3RKimHWS44rJcJ9P1rwCmn2TvMqtZQBmz815ZUUHqJ"); } pub mod add_set_tx_loaded_accounts_data_size_instruction { @@ -1014,7 +1018,7 @@ pub mod enable_sbpf_v2_deployment_and_execution { } pub mod enable_sbpf_v3_deployment_and_execution { - solana_pubkey::declare_id!("GJav1vwg2etvSWraPT96QvYuQJswJTJwtcyARrvkhuV9"); + solana_pubkey::declare_id!("BUwGLeF3Lxyfv1J1wY8biFHBB2hrk2QhbNftQf3VV3cC"); } pub mod remove_accounts_executable_flag_checks { @@ -1110,7 +1114,7 @@ pub mod disable_zk_elgamal_proof_program { } pub mod reenable_zk_elgamal_proof_program { - solana_pubkey::declare_id!("zkemPXcuM3G4wpMDZ36Cpw34EjUpvm1nuioiSGbGZPR"); + solana_pubkey::declare_id!("zkesAyFB19sTkX8i9ReoKaMNDA4YNTPYJpZKPDt7FMW"); } pub mod raise_block_limits_to_100m { @@ -1125,247 +1129,931 @@ pub mod raise_cpi_nesting_limit_to_8 { solana_pubkey::declare_id!("6TkHkRmP7JZy1fdM6fg5uXn76wChQBWGokHBJzrLB3mj"); } +pub mod enforce_fixed_fec_set { + solana_pubkey::declare_id!("fixfecLZYMfkGzwq6NJA11Yw6KYztzXiK9QcL3K78in"); +} + +pub mod provide_instruction_data_offset_in_vm_r2 { + solana_pubkey::declare_id!("5xXZc66h4UdB6Yq7FzdBxBiRAFMMScMLwHxk2QZDaNZL"); +} + pub static FEATURE_NAMES: LazyLock> = LazyLock::new(|| { [ (secp256k1_program_enabled::id(), "secp256k1 program"), - (deprecate_rewards_sysvar::id(), "deprecate unused rewards sysvar"), + ( + deprecate_rewards_sysvar::id(), + "deprecate unused rewards sysvar", + ), (pico_inflation::id(), "pico inflation"), - (full_inflation::devnet_and_testnet::id(), "full inflation on devnet and testnet"), + ( + full_inflation::devnet_and_testnet::id(), + "full inflation on devnet and testnet", + ), (spl_token_v2_multisig_fix::id(), "spl-token multisig fix"), - (no_overflow_rent_distribution::id(), "no overflow rent distribution"), - (filter_stake_delegation_accounts::id(), "filter stake_delegation_accounts #14062"), - (require_custodian_for_locked_stake_authorize::id(), "require custodian to authorize withdrawer change for locked stake"), - (spl_token_v2_self_transfer_fix::id(), "spl-token self-transfer fix"), - (full_inflation::mainnet::certusone::enable::id(), "full inflation enabled by Certus One"), - (full_inflation::mainnet::certusone::vote::id(), "community vote allowing Certus One to enable full inflation"), - (warp_timestamp_again::id(), "warp timestamp again, adjust bounding to 25% fast 80% slow #15204"), + ( + no_overflow_rent_distribution::id(), + "no overflow rent distribution", + ), + ( + filter_stake_delegation_accounts::id(), + "filter stake_delegation_accounts #14062", + ), + ( + require_custodian_for_locked_stake_authorize::id(), + "require custodian to authorize withdrawer change for locked stake", + ), + ( + spl_token_v2_self_transfer_fix::id(), + "spl-token self-transfer fix", + ), + ( + full_inflation::mainnet::certusone::enable::id(), + "full inflation enabled by Certus One", + ), + ( + full_inflation::mainnet::certusone::vote::id(), + "community vote allowing Certus One to enable full inflation", + ), + ( + warp_timestamp_again::id(), + "warp timestamp again, adjust bounding to 25% fast 80% slow #15204", + ), (check_init_vote_data::id(), "check initialized Vote data"), - (secp256k1_recover_syscall_enabled::id(), "secp256k1_recover syscall"), - (system_transfer_zero_check::id(), "perform all checks for transfers of 0 lamports"), + ( + secp256k1_recover_syscall_enabled::id(), + "secp256k1_recover syscall", + ), + ( + system_transfer_zero_check::id(), + "perform all checks for transfers of 0 lamports", + ), (blake3_syscall_enabled::id(), "blake3 syscall"), - (dedupe_config_program_signers::id(), "dedupe config program signers"), - (verify_tx_signatures_len::id(), "prohibit extra transaction signatures"), - (vote_stake_checked_instructions::id(), "vote/state program checked instructions #18345"), - (rent_for_sysvars::id(), "collect rent from accounts owned by sysvars"), - (libsecp256k1_0_5_upgrade_enabled::id(), "upgrade libsecp256k1 to v0.5.0"), + ( + dedupe_config_program_signers::id(), + "dedupe config program signers", + ), + ( + verify_tx_signatures_len::id(), + "prohibit extra transaction signatures", + ), + ( + vote_stake_checked_instructions::id(), + "vote/state program checked instructions #18345", + ), + ( + rent_for_sysvars::id(), + "collect rent from accounts owned by sysvars", + ), + ( + libsecp256k1_0_5_upgrade_enabled::id(), + "upgrade libsecp256k1 to v0.5.0", + ), (tx_wide_compute_cap::id(), "transaction wide compute cap"), - (spl_token_v2_set_authority_fix::id(), "spl-token set_authority fix"), - (merge_nonce_error_into_system_error::id(), "merge NonceError into SystemError"), + ( + spl_token_v2_set_authority_fix::id(), + "spl-token set_authority fix", + ), + ( + merge_nonce_error_into_system_error::id(), + "merge NonceError into SystemError", + ), (disable_fees_sysvar::id(), "disable fees sysvar"), - (stake_merge_with_unmatched_credits_observed::id(), "allow merging active stakes with unmatched credits_observed #18985"), - (zk_token_sdk_enabled::id(), "enable Zk Token proof program and syscalls"), - (curve25519_syscall_enabled::id(), "enable curve25519 syscalls"), - (versioned_tx_message_enabled::id(), "enable versioned transaction message processing"), - (libsecp256k1_fail_on_bad_count::id(), "fail libsecp256k1_verify if count appears wrong"), - (libsecp256k1_fail_on_bad_count2::id(), "fail libsecp256k1_verify if count appears wrong"), - (instructions_sysvar_owned_by_sysvar::id(), "fix owner for instructions sysvar"), - (stake_program_advance_activating_credits_observed::id(), "Enable advancing credits observed for activation epoch #19309"), - (credits_auto_rewind::id(), "Auto rewind stake's credits_observed if (accidental) vote recreation is detected #22546"), - (demote_program_write_locks::id(), "demote program write locks to readonly, except when upgradeable loader present #19593 #20265"), - (ed25519_program_enabled::id(), "enable builtin ed25519 signature verify program"), - (return_data_syscall_enabled::id(), "enable sol_{set,get}_return_data syscall"), - (reduce_required_deploy_balance::id(), "reduce required payer balance for program deploys"), - (sol_log_data_syscall_enabled::id(), "enable sol_log_data syscall"), - (stakes_remove_delegation_if_inactive::id(), "remove delegations from stakes cache when inactive"), - (do_support_realloc::id(), "support account data reallocation"), - (prevent_calling_precompiles_as_programs::id(), "prevent calling precompiles as programs"), - (optimize_epoch_boundary_updates::id(), "optimize epoch boundary updates"), - (remove_native_loader::id(), "remove support for the native loader"), - (send_to_tpu_vote_port::id(), "send votes to the tpu vote port"), + ( + stake_merge_with_unmatched_credits_observed::id(), + "allow merging active stakes with unmatched credits_observed #18985", + ), + ( + zk_token_sdk_enabled::id(), + "enable Zk Token proof program and syscalls", + ), + ( + curve25519_syscall_enabled::id(), + "enable curve25519 syscalls", + ), + ( + versioned_tx_message_enabled::id(), + "enable versioned transaction message processing", + ), + ( + libsecp256k1_fail_on_bad_count::id(), + "fail libsecp256k1_verify if count appears wrong", + ), + ( + libsecp256k1_fail_on_bad_count2::id(), + "fail libsecp256k1_verify if count appears wrong", + ), + ( + instructions_sysvar_owned_by_sysvar::id(), + "fix owner for instructions sysvar", + ), + ( + stake_program_advance_activating_credits_observed::id(), + "Enable advancing credits observed for activation epoch #19309", + ), + ( + credits_auto_rewind::id(), + "Auto rewind stake's credits_observed if (accidental) vote recreation is detected \ + #22546", + ), + ( + demote_program_write_locks::id(), + "demote program write locks to readonly, except when upgradeable loader present \ + #19593 #20265", + ), + ( + ed25519_program_enabled::id(), + "enable builtin ed25519 signature verify program", + ), + ( + return_data_syscall_enabled::id(), + "enable sol_{set,get}_return_data syscall", + ), + ( + reduce_required_deploy_balance::id(), + "reduce required payer balance for program deploys", + ), + ( + sol_log_data_syscall_enabled::id(), + "enable sol_log_data syscall", + ), + ( + stakes_remove_delegation_if_inactive::id(), + "remove delegations from stakes cache when inactive", + ), + ( + do_support_realloc::id(), + "support account data reallocation", + ), + ( + prevent_calling_precompiles_as_programs::id(), + "prevent calling precompiles as programs", + ), + ( + optimize_epoch_boundary_updates::id(), + "optimize epoch boundary updates", + ), + ( + remove_native_loader::id(), + "remove support for the native loader", + ), + ( + send_to_tpu_vote_port::id(), + "send votes to the tpu vote port", + ), (requestable_heap_size::id(), "Requestable heap frame size"), (disable_fee_calculator::id(), "deprecate fee calculator"), - (add_compute_budget_program::id(), "Add compute_budget_program"), + ( + add_compute_budget_program::id(), + "Add compute_budget_program", + ), (nonce_must_be_writable::id(), "nonce must be writable"), (spl_token_v3_3_0_release::id(), "spl-token v3.3.0 release"), (leave_nonce_on_success::id(), "leave nonce as is on success"), - (reject_empty_instruction_without_program::id(), "fail instructions which have native_loader as program_id directly"), - (fixed_memcpy_nonoverlapping_check::id(), "use correct check for nonoverlapping regions in memcpy syscall"), - (reject_non_rent_exempt_vote_withdraws::id(), "fail vote withdraw instructions which leave the account non-rent-exempt"), - (evict_invalid_stakes_cache_entries::id(), "evict invalid stakes cache entries on epoch boundaries"), - (allow_votes_to_directly_update_vote_state::id(), "enable direct vote state update"), - (max_tx_account_locks::id(), "enforce max number of locked accounts per transaction"), - (require_rent_exempt_accounts::id(), "require all new transaction accounts with data to be rent-exempt"), - (filter_votes_outside_slot_hashes::id(), "filter vote slots older than the slot hashes history"), + ( + reject_empty_instruction_without_program::id(), + "fail instructions which have native_loader as program_id directly", + ), + ( + fixed_memcpy_nonoverlapping_check::id(), + "use correct check for nonoverlapping regions in memcpy syscall", + ), + ( + reject_non_rent_exempt_vote_withdraws::id(), + "fail vote withdraw instructions which leave the account non-rent-exempt", + ), + ( + evict_invalid_stakes_cache_entries::id(), + "evict invalid stakes cache entries on epoch boundaries", + ), + ( + allow_votes_to_directly_update_vote_state::id(), + "enable direct vote state update", + ), + ( + max_tx_account_locks::id(), + "enforce max number of locked accounts per transaction", + ), + ( + require_rent_exempt_accounts::id(), + "require all new transaction accounts with data to be rent-exempt", + ), + ( + filter_votes_outside_slot_hashes::id(), + "filter vote slots older than the slot hashes history", + ), (update_syscall_base_costs::id(), "update syscall base costs"), - (stake_deactivate_delinquent_instruction::id(), "enable the deactivate delinquent stake instruction #23932"), - (vote_withdraw_authority_may_change_authorized_voter::id(), "vote account withdraw authority may change the authorized voter #22521"), - (spl_associated_token_account_v1_0_4::id(), "SPL Associated Token Account Program release version 1.0.4, tied to token 3.3.0 #22648"), - (reject_vote_account_close_unless_zero_credit_epoch::id(), "fail vote account withdraw to 0 unless account earned 0 credits in last completed epoch"), - (add_get_processed_sibling_instruction_syscall::id(), "add add_get_processed_sibling_instruction_syscall"), - (bank_transaction_count_fix::id(), "fixes Bank::transaction_count to include all committed transactions, not just successful ones"), - (disable_bpf_deprecated_load_instructions::id(), "disable ldabs* and ldind* SBF instructions"), - (disable_bpf_unresolved_symbols_at_runtime::id(), "disable reporting of unresolved SBF symbols at runtime"), - (record_instruction_in_transaction_context_push::id(), "move the CPI stack overflow check to the end of push"), + ( + stake_deactivate_delinquent_instruction::id(), + "enable the deactivate delinquent stake instruction #23932", + ), + ( + vote_withdraw_authority_may_change_authorized_voter::id(), + "vote account withdraw authority may change the authorized voter #22521", + ), + ( + spl_associated_token_account_v1_0_4::id(), + "SPL Associated Token Account Program release version 1.0.4, tied to token 3.3.0 \ + #22648", + ), + ( + reject_vote_account_close_unless_zero_credit_epoch::id(), + "fail vote account withdraw to 0 unless account earned 0 credits in last completed \ + epoch", + ), + ( + add_get_processed_sibling_instruction_syscall::id(), + "add add_get_processed_sibling_instruction_syscall", + ), + ( + bank_transaction_count_fix::id(), + "fixes Bank::transaction_count to include all committed transactions, not just \ + successful ones", + ), + ( + disable_bpf_deprecated_load_instructions::id(), + "disable ldabs* and ldind* SBF instructions", + ), + ( + disable_bpf_unresolved_symbols_at_runtime::id(), + "disable reporting of unresolved SBF symbols at runtime", + ), + ( + record_instruction_in_transaction_context_push::id(), + "move the CPI stack overflow check to the end of push", + ), (syscall_saturated_math::id(), "syscalls use saturated math"), - (check_physical_overlapping::id(), "check physical overlapping regions"), - (limit_secp256k1_recovery_id::id(), "limit secp256k1 recovery id"), - (disable_deprecated_loader::id(), "disable the deprecated BPF loader"), - (check_slice_translation_size::id(), "check size when translating slices"), - (stake_split_uses_rent_sysvar::id(), "stake split instruction uses rent sysvar"), - (add_get_minimum_delegation_instruction_to_stake_program::id(), "add GetMinimumDelegation instruction to stake program"), - (error_on_syscall_bpf_function_hash_collisions::id(), "error on bpf function hash collisions"), + ( + check_physical_overlapping::id(), + "check physical overlapping regions", + ), + ( + limit_secp256k1_recovery_id::id(), + "limit secp256k1 recovery id", + ), + ( + disable_deprecated_loader::id(), + "disable the deprecated BPF loader", + ), + ( + check_slice_translation_size::id(), + "check size when translating slices", + ), + ( + stake_split_uses_rent_sysvar::id(), + "stake split instruction uses rent sysvar", + ), + ( + add_get_minimum_delegation_instruction_to_stake_program::id(), + "add GetMinimumDelegation instruction to stake program", + ), + ( + error_on_syscall_bpf_function_hash_collisions::id(), + "error on bpf function hash collisions", + ), (reject_callx_r10::id(), "Reject bpf callx r10 instructions"), - (drop_redundant_turbine_path::id(), "drop redundant turbine path"), - (executables_incur_cpi_data_cost::id(), "Executables incur CPI data costs"), - (fix_recent_blockhashes::id(), "stop adding hashes for skipped slots to recent blockhashes"), - (update_rewards_from_cached_accounts::id(), "update rewards from cached accounts"), - (spl_token_v3_4_0::id(), "SPL Token Program version 3.4.0 release #24740"), - (spl_associated_token_account_v1_1_0::id(), "SPL Associated Token Account Program version 1.1.0 release #24741"), - (default_units_per_instruction::id(), "Default max tx-wide compute units calculated per instruction"), - (stake_allow_zero_undelegated_amount::id(), "Allow zero-lamport undelegated amount for initialized stakes #24670"), - (require_static_program_ids_in_transaction::id(), "require static program ids in versioned transactions"), - (stake_raise_minimum_delegation_to_1_sol::id(), "Raise minimum stake delegation to 1.0 SOL #24357"), - (stake_minimum_delegation_for_rewards::id(), "stakes must be at least the minimum delegation to earn rewards"), - (add_set_compute_unit_price_ix::id(), "add compute budget ix for setting a compute unit price"), - (disable_deploy_of_alloc_free_syscall::id(), "disable new deployments of deprecated sol_alloc_free_ syscall"), - (include_account_index_in_rent_error::id(), "include account index in rent tx error #25190"), - (add_shred_type_to_shred_seed::id(), "add shred-type to shred seed #25556"), - (warp_timestamp_with_a_vengeance::id(), "warp timestamp again, adjust bounding to 150% slow #25666"), - (separate_nonce_from_blockhash::id(), "separate durable nonce and blockhash domains #25744"), + ( + drop_redundant_turbine_path::id(), + "drop redundant turbine path", + ), + ( + executables_incur_cpi_data_cost::id(), + "Executables incur CPI data costs", + ), + ( + fix_recent_blockhashes::id(), + "stop adding hashes for skipped slots to recent blockhashes", + ), + ( + update_rewards_from_cached_accounts::id(), + "update rewards from cached accounts", + ), + ( + spl_token_v3_4_0::id(), + "SPL Token Program version 3.4.0 release #24740", + ), + ( + spl_associated_token_account_v1_1_0::id(), + "SPL Associated Token Account Program version 1.1.0 release #24741", + ), + ( + default_units_per_instruction::id(), + "Default max tx-wide compute units calculated per instruction", + ), + ( + stake_allow_zero_undelegated_amount::id(), + "Allow zero-lamport undelegated amount for initialized stakes #24670", + ), + ( + require_static_program_ids_in_transaction::id(), + "require static program ids in versioned transactions", + ), + ( + stake_raise_minimum_delegation_to_1_sol::id(), + "Raise minimum stake delegation to 1.0 SOL #24357", + ), + ( + stake_minimum_delegation_for_rewards::id(), + "stakes must be at least the minimum delegation to earn rewards", + ), + ( + add_set_compute_unit_price_ix::id(), + "add compute budget ix for setting a compute unit price", + ), + ( + disable_deploy_of_alloc_free_syscall::id(), + "disable new deployments of deprecated sol_alloc_free_ syscall", + ), + ( + include_account_index_in_rent_error::id(), + "include account index in rent tx error #25190", + ), + ( + add_shred_type_to_shred_seed::id(), + "add shred-type to shred seed #25556", + ), + ( + warp_timestamp_with_a_vengeance::id(), + "warp timestamp again, adjust bounding to 150% slow #25666", + ), + ( + separate_nonce_from_blockhash::id(), + "separate durable nonce and blockhash domains #25744", + ), (enable_durable_nonce::id(), "enable durable nonce #25744"), - (vote_state_update_credit_per_dequeue::id(), "Calculate vote credits for VoteStateUpdate per vote dequeue to match credit awards for Vote instruction"), + ( + vote_state_update_credit_per_dequeue::id(), + "Calculate vote credits for VoteStateUpdate per vote dequeue to match credit awards \ + for Vote instruction", + ), (quick_bail_on_panic::id(), "quick bail on panic"), (nonce_must_be_authorized::id(), "nonce must be authorized"), - (nonce_must_be_advanceable::id(), "durable nonces must be advanceable"), - (vote_authorize_with_seed::id(), "An instruction you can use to change a vote accounts authority when the current authority is a derived key #25860"), - (preserve_rent_epoch_for_rent_exempt_accounts::id(), "preserve rent epoch for rent exempt accounts #26479"), - (enable_bpf_loader_extend_program_ix::id(), "enable bpf upgradeable loader ExtendProgram instruction #25234"), - (skip_rent_rewrites::id(), "skip rewriting rent exempt accounts during rent collection #26491"), - (enable_early_verification_of_account_modifications::id(), "enable early verification of account modifications #25899"), - (disable_rehash_for_rent_epoch::id(), "on accounts hash calculation, do not try to rehash accounts #28934"), - (account_hash_ignore_slot::id(), "ignore slot when calculating an account hash #28420"), - (set_exempt_rent_epoch_max::id(), "set rent epoch to Epoch::MAX for rent-exempt accounts #28683"), - (on_load_preserve_rent_epoch_for_rent_exempt_accounts::id(), "on bank load account, do not try to fix up rent_epoch #28541"), - (prevent_crediting_accounts_that_end_rent_paying::id(), "prevent crediting rent paying accounts #26606"), - (cap_bpf_program_instruction_accounts::id(), "enforce max number of accounts per bpf program instruction #26628"), - (loosen_cpi_size_restriction::id(), "loosen cpi size restrictions #26641"), - (use_default_units_in_fee_calculation::id(), "use default units per instruction in fee calculation #26785"), - (compact_vote_state_updates::id(), "Compact vote state updates to lower block size"), - (incremental_snapshot_only_incremental_hash_calculation::id(), "only hash accounts in incremental snapshot during incremental snapshot creation #26799"), - (disable_cpi_setting_executable_and_rent_epoch::id(), "disable setting is_executable and_rent_epoch in CPI #26987"), - (relax_authority_signer_check_for_lookup_table_creation::id(), "relax authority signer check for lookup table creation #27205"), - (stop_sibling_instruction_search_at_parent::id(), "stop the search in get_processed_sibling_instruction when the parent instruction is reached #27289"), - (vote_state_update_root_fix::id(), "fix root in vote state updates #27361"), - (cap_accounts_data_allocations_per_transaction::id(), "cap accounts data allocations per transaction #27375"), - (epoch_accounts_hash::id(), "enable epoch accounts hash calculation #27539"), - (remove_deprecated_request_unit_ix::id(), "remove support for RequestUnitsDeprecated instruction #27500"), - (increase_tx_account_lock_limit::id(), "increase tx account lock limit to 128 #27241"), - (limit_max_instruction_trace_length::id(), "limit max instruction trace length #27939"), - (check_syscall_outputs_do_not_overlap::id(), "check syscall outputs do_not overlap #28600"), - (enable_bpf_loader_set_authority_checked_ix::id(), "enable bpf upgradeable loader SetAuthorityChecked instruction #28424"), - (enable_alt_bn128_syscall::id(), "add alt_bn128 syscalls #27961"), - (simplify_alt_bn128_syscall_error_codes::id(), "SIMD-0129: simplify alt_bn128 syscall error codes"), - (enable_program_redeployment_cooldown::id(), "enable program redeployment cooldown #29135"), - (commission_updates_only_allowed_in_first_half_of_epoch::id(), "validator commission updates are only allowed in the first half of an epoch #29362"), - (enable_turbine_fanout_experiments::id(), "enable turbine fanout experiments #29393"), - (disable_turbine_fanout_experiments::id(), "disable turbine fanout experiments #29393"), - (move_serialized_len_ptr_in_cpi::id(), "cpi ignore serialized_len_ptr #29592"), - (update_hashes_per_tick::id(), "Update desired hashes per tick on epoch boundary"), - (enable_big_mod_exp_syscall::id(), "add big_mod_exp syscall #28503"), - (disable_builtin_loader_ownership_chains::id(), "disable builtin loader ownership chains #29956"), - (cap_transaction_accounts_data_size::id(), "cap transaction accounts data size up to a limit #27839"), - (remove_congestion_multiplier_from_fee_calculation::id(), "Remove congestion multiplier from transaction fee calculation #29881"), - (enable_request_heap_frame_ix::id(), "Enable transaction to request heap frame using compute budget instruction #30076"), - (prevent_rent_paying_rent_recipients::id(), "prevent recipients of rent rewards from ending in rent-paying state #30151"), - (delay_visibility_of_program_deployment::id(), "delay visibility of program upgrades #30085"), - (apply_cost_tracker_during_replay::id(), "apply cost tracker to blocks during replay #29595"), - (add_set_tx_loaded_accounts_data_size_instruction::id(), "add compute budget instruction for setting account data size per transaction #30366"), - (switch_to_new_elf_parser::id(), "switch to new ELF parser #30497"), - (round_up_heap_size::id(), "round up heap size when calculating heap cost #30679"), - (remove_bpf_loader_incorrect_program_id::id(), "stop incorrectly throwing IncorrectProgramId in bpf_loader #30747"), - (include_loaded_accounts_data_size_in_fee_calculation::id(), "include transaction loaded accounts data size in base fee calculation #30657"), - (native_programs_consume_cu::id(), "Native program should consume compute units #30620"), - (simplify_writable_program_account_check::id(), "Simplify checks performed for writable upgradeable program accounts #30559"), - (stop_truncating_strings_in_syscalls::id(), "Stop truncating strings in syscalls #31029"), - (clean_up_delegation_errors::id(), "Return InsufficientDelegation instead of InsufficientFunds or InsufficientStake where applicable #31206"), - (vote_state_add_vote_latency::id(), "replace Lockout with LandedVote (including vote latency) in vote state #31264"), - (checked_arithmetic_in_fee_validation::id(), "checked arithmetic in fee validation #31273"), - (stricter_abi_and_runtime_constraints::id(), "use memory regions to map account data into the rbpf vm instead of copying the data"), - (last_restart_slot_sysvar::id(), "enable new sysvar last_restart_slot"), - (reduce_stake_warmup_cooldown::id(), "reduce stake warmup cooldown from 25% to 9%"), - (revise_turbine_epoch_stakes::id(), "revise turbine epoch stakes"), + ( + nonce_must_be_advanceable::id(), + "durable nonces must be advanceable", + ), + ( + vote_authorize_with_seed::id(), + "An instruction you can use to change a vote accounts authority when the current \ + authority is a derived key #25860", + ), + ( + preserve_rent_epoch_for_rent_exempt_accounts::id(), + "preserve rent epoch for rent exempt accounts #26479", + ), + ( + enable_bpf_loader_extend_program_ix::id(), + "enable bpf upgradeable loader ExtendProgram instruction #25234", + ), + ( + skip_rent_rewrites::id(), + "skip rewriting rent exempt accounts during rent collection #26491", + ), + ( + enable_early_verification_of_account_modifications::id(), + "enable early verification of account modifications #25899", + ), + ( + disable_rehash_for_rent_epoch::id(), + "on accounts hash calculation, do not try to rehash accounts #28934", + ), + ( + account_hash_ignore_slot::id(), + "ignore slot when calculating an account hash #28420", + ), + ( + set_exempt_rent_epoch_max::id(), + "set rent epoch to Epoch::MAX for rent-exempt accounts #28683", + ), + ( + on_load_preserve_rent_epoch_for_rent_exempt_accounts::id(), + "on bank load account, do not try to fix up rent_epoch #28541", + ), + ( + prevent_crediting_accounts_that_end_rent_paying::id(), + "prevent crediting rent paying accounts #26606", + ), + ( + cap_bpf_program_instruction_accounts::id(), + "enforce max number of accounts per bpf program instruction #26628", + ), + ( + loosen_cpi_size_restriction::id(), + "loosen cpi size restrictions #26641", + ), + ( + use_default_units_in_fee_calculation::id(), + "use default units per instruction in fee calculation #26785", + ), + ( + compact_vote_state_updates::id(), + "Compact vote state updates to lower block size", + ), + ( + incremental_snapshot_only_incremental_hash_calculation::id(), + "only hash accounts in incremental snapshot during incremental snapshot creation \ + #26799", + ), + ( + disable_cpi_setting_executable_and_rent_epoch::id(), + "disable setting is_executable and_rent_epoch in CPI #26987", + ), + ( + relax_authority_signer_check_for_lookup_table_creation::id(), + "relax authority signer check for lookup table creation #27205", + ), + ( + stop_sibling_instruction_search_at_parent::id(), + "stop the search in get_processed_sibling_instruction when the parent instruction is \ + reached #27289", + ), + ( + vote_state_update_root_fix::id(), + "fix root in vote state updates #27361", + ), + ( + cap_accounts_data_allocations_per_transaction::id(), + "cap accounts data allocations per transaction #27375", + ), + ( + epoch_accounts_hash::id(), + "enable epoch accounts hash calculation #27539", + ), + ( + remove_deprecated_request_unit_ix::id(), + "remove support for RequestUnitsDeprecated instruction #27500", + ), + ( + increase_tx_account_lock_limit::id(), + "increase tx account lock limit to 128 #27241", + ), + ( + limit_max_instruction_trace_length::id(), + "limit max instruction trace length #27939", + ), + ( + check_syscall_outputs_do_not_overlap::id(), + "check syscall outputs do_not overlap #28600", + ), + ( + enable_bpf_loader_set_authority_checked_ix::id(), + "enable bpf upgradeable loader SetAuthorityChecked instruction #28424", + ), + ( + enable_alt_bn128_syscall::id(), + "add alt_bn128 syscalls #27961", + ), + ( + simplify_alt_bn128_syscall_error_codes::id(), + "SIMD-0129: simplify alt_bn128 syscall error codes", + ), + ( + enable_program_redeployment_cooldown::id(), + "enable program redeployment cooldown #29135", + ), + ( + commission_updates_only_allowed_in_first_half_of_epoch::id(), + "validator commission updates are only allowed in the first half of an epoch #29362", + ), + ( + enable_turbine_fanout_experiments::id(), + "enable turbine fanout experiments #29393", + ), + ( + disable_turbine_fanout_experiments::id(), + "disable turbine fanout experiments #29393", + ), + ( + move_serialized_len_ptr_in_cpi::id(), + "cpi ignore serialized_len_ptr #29592", + ), + ( + update_hashes_per_tick::id(), + "Update desired hashes per tick on epoch boundary", + ), + ( + enable_big_mod_exp_syscall::id(), + "add big_mod_exp syscall #28503", + ), + ( + disable_builtin_loader_ownership_chains::id(), + "disable builtin loader ownership chains #29956", + ), + ( + cap_transaction_accounts_data_size::id(), + "cap transaction accounts data size up to a limit #27839", + ), + ( + remove_congestion_multiplier_from_fee_calculation::id(), + "Remove congestion multiplier from transaction fee calculation #29881", + ), + ( + enable_request_heap_frame_ix::id(), + "Enable transaction to request heap frame using compute budget instruction #30076", + ), + ( + prevent_rent_paying_rent_recipients::id(), + "prevent recipients of rent rewards from ending in rent-paying state #30151", + ), + ( + delay_visibility_of_program_deployment::id(), + "delay visibility of program upgrades #30085", + ), + ( + apply_cost_tracker_during_replay::id(), + "apply cost tracker to blocks during replay #29595", + ), + ( + add_set_tx_loaded_accounts_data_size_instruction::id(), + "add compute budget instruction for setting account data size per transaction #30366", + ), + ( + switch_to_new_elf_parser::id(), + "switch to new ELF parser #30497", + ), + ( + round_up_heap_size::id(), + "round up heap size when calculating heap cost #30679", + ), + ( + remove_bpf_loader_incorrect_program_id::id(), + "stop incorrectly throwing IncorrectProgramId in bpf_loader #30747", + ), + ( + include_loaded_accounts_data_size_in_fee_calculation::id(), + "include transaction loaded accounts data size in base fee calculation #30657", + ), + ( + native_programs_consume_cu::id(), + "Native program should consume compute units #30620", + ), + ( + simplify_writable_program_account_check::id(), + "Simplify checks performed for writable upgradeable program accounts #30559", + ), + ( + stop_truncating_strings_in_syscalls::id(), + "Stop truncating strings in syscalls #31029", + ), + ( + clean_up_delegation_errors::id(), + "Return InsufficientDelegation instead of InsufficientFunds or InsufficientStake \ + where applicable #31206", + ), + ( + vote_state_add_vote_latency::id(), + "replace Lockout with LandedVote (including vote latency) in vote state #31264", + ), + ( + checked_arithmetic_in_fee_validation::id(), + "checked arithmetic in fee validation #31273", + ), + ( + stricter_abi_and_runtime_constraints::id(), + "SIMD-0219: Stricter ABI and Runtime Constraints", + ), + ( + account_data_direct_mapping::id(), + "enable account data direct mapping", + ), + ( + last_restart_slot_sysvar::id(), + "enable new sysvar last_restart_slot", + ), + ( + reduce_stake_warmup_cooldown::id(), + "reduce stake warmup cooldown from 25% to 9%", + ), + ( + revise_turbine_epoch_stakes::id(), + "revise turbine epoch stakes", + ), (enable_poseidon_syscall::id(), "Enable Poseidon syscall"), - (timely_vote_credits::id(), "use timeliness of votes in determining credits to award"), - (remaining_compute_units_syscall_enabled::id(), "enable the remaining_compute_units syscall"), + ( + timely_vote_credits::id(), + "use timeliness of votes in determining credits to award", + ), + ( + remaining_compute_units_syscall_enabled::id(), + "enable the remaining_compute_units syscall", + ), (enable_loader_v4::id(), "SIMD-0167: Enable Loader-v4"), - (require_rent_exempt_split_destination::id(), "Require stake split destination account to be rent exempt"), - (better_error_codes_for_tx_lamport_check::id(), "better error codes for tx lamport check #33353"), - (enable_alt_bn128_compression_syscall::id(), "add alt_bn128 compression syscalls"), - (update_hashes_per_tick2::id(), "Update desired hashes per tick to 2.8M"), - (update_hashes_per_tick3::id(), "Update desired hashes per tick to 4.4M"), - (update_hashes_per_tick4::id(), "Update desired hashes per tick to 7.6M"), - (update_hashes_per_tick5::id(), "Update desired hashes per tick to 9.2M"), - (update_hashes_per_tick6::id(), "Update desired hashes per tick to 10M"), - (validate_fee_collector_account::id(), "validate fee collector account #33888"), - (disable_rent_fees_collection::id(), "Disable rent fees collection #33945"), - (enable_zk_transfer_with_fee::id(), "enable Zk Token proof program transfer with fee"), + ( + require_rent_exempt_split_destination::id(), + "Require stake split destination account to be rent exempt", + ), + ( + better_error_codes_for_tx_lamport_check::id(), + "better error codes for tx lamport check #33353", + ), + ( + enable_alt_bn128_compression_syscall::id(), + "add alt_bn128 compression syscalls", + ), + ( + update_hashes_per_tick2::id(), + "Update desired hashes per tick to 2.8M", + ), + ( + update_hashes_per_tick3::id(), + "Update desired hashes per tick to 4.4M", + ), + ( + update_hashes_per_tick4::id(), + "Update desired hashes per tick to 7.6M", + ), + ( + update_hashes_per_tick5::id(), + "Update desired hashes per tick to 9.2M", + ), + ( + update_hashes_per_tick6::id(), + "Update desired hashes per tick to 10M", + ), + ( + validate_fee_collector_account::id(), + "validate fee collector account #33888", + ), + ( + disable_rent_fees_collection::id(), + "Disable rent fees collection #33945", + ), + ( + enable_zk_transfer_with_fee::id(), + "enable Zk Token proof program transfer with fee", + ), (drop_legacy_shreds::id(), "drops legacy shreds #34328"), - (allow_commission_decrease_at_any_time::id(), "Allow commission decrease at any time in epoch #33843"), - (consume_blockstore_duplicate_proofs::id(), "consume duplicate proofs from blockstore in consensus #34372"), - (add_new_reserved_account_keys::id(), "add new unwritable reserved accounts #34899"), - (index_erasure_conflict_duplicate_proofs::id(), "generate duplicate proofs for index and erasure conflicts #34360"), - (merkle_conflict_duplicate_proofs::id(), "generate duplicate proofs for merkle root conflicts #34270"), - (disable_bpf_loader_instructions::id(), "disable bpf loader management instructions #34194"), - (enable_zk_proof_from_account::id(), "Enable zk token proof program to read proof from accounts instead of instruction data #34750"), - (curve25519_restrict_msm_length::id(), "restrict curve25519 multiscalar multiplication vector lengths #34763"), - (cost_model_requested_write_lock_cost::id(), "cost model uses number of requested write locks #34819"), - (enable_gossip_duplicate_proof_ingestion::id(), "enable gossip duplicate proof ingestion #32963"), - (enable_chained_merkle_shreds::id(), "Enable chained Merkle shreds #34916"), - (remove_rounding_in_fee_calculation::id(), "Removing unwanted rounding in fee calculation #34982"), - (deprecate_unused_legacy_vote_plumbing::id(), "Deprecate unused legacy vote tx plumbing"), - (enable_tower_sync_ix::id(), "Enable tower sync vote instruction"), - (chained_merkle_conflict_duplicate_proofs::id(), "generate duplicate proofs for chained merkle root conflicts"), - (reward_full_priority_fee::id(), "Reward full priority fee to validators #34731"), - (abort_on_invalid_curve::id(), "SIMD-0137: Abort when elliptic curve syscalls invoked on invalid curve id"), - (get_sysvar_syscall_enabled::id(), "Enable syscall for fetching Sysvar bytes #615"), - (migrate_feature_gate_program_to_core_bpf::id(), "Migrate Feature Gate program to Core BPF (programify) #1003"), + ( + allow_commission_decrease_at_any_time::id(), + "Allow commission decrease at any time in epoch #33843", + ), + ( + consume_blockstore_duplicate_proofs::id(), + "consume duplicate proofs from blockstore in consensus #34372", + ), + ( + add_new_reserved_account_keys::id(), + "add new unwritable reserved accounts #34899", + ), + ( + index_erasure_conflict_duplicate_proofs::id(), + "generate duplicate proofs for index and erasure conflicts #34360", + ), + ( + merkle_conflict_duplicate_proofs::id(), + "generate duplicate proofs for merkle root conflicts #34270", + ), + ( + disable_bpf_loader_instructions::id(), + "disable bpf loader management instructions #34194", + ), + ( + enable_zk_proof_from_account::id(), + "Enable zk token proof program to read proof from accounts instead of instruction \ + data #34750", + ), + ( + curve25519_restrict_msm_length::id(), + "restrict curve25519 multiscalar multiplication vector lengths #34763", + ), + ( + cost_model_requested_write_lock_cost::id(), + "cost model uses number of requested write locks #34819", + ), + ( + enable_gossip_duplicate_proof_ingestion::id(), + "enable gossip duplicate proof ingestion #32963", + ), + ( + enable_chained_merkle_shreds::id(), + "Enable chained Merkle shreds #34916", + ), + ( + remove_rounding_in_fee_calculation::id(), + "Removing unwanted rounding in fee calculation #34982", + ), + ( + deprecate_unused_legacy_vote_plumbing::id(), + "Deprecate unused legacy vote tx plumbing", + ), + ( + enable_tower_sync_ix::id(), + "Enable tower sync vote instruction", + ), + ( + chained_merkle_conflict_duplicate_proofs::id(), + "generate duplicate proofs for chained merkle root conflicts", + ), + ( + reward_full_priority_fee::id(), + "Reward full priority fee to validators #34731", + ), + ( + abort_on_invalid_curve::id(), + "SIMD-0137: Abort when elliptic curve syscalls invoked on invalid curve id", + ), + ( + get_sysvar_syscall_enabled::id(), + "Enable syscall for fetching Sysvar bytes #615", + ), + ( + migrate_feature_gate_program_to_core_bpf::id(), + "Migrate Feature Gate program to Core BPF (programify) #1003", + ), (vote_only_full_fec_sets::id(), "vote only full fec sets"), - (migrate_config_program_to_core_bpf::id(), "Migrate Config program to Core BPF #1378"), - (enable_get_epoch_stake_syscall::id(), "Enable syscall: sol_get_epoch_stake #884"), - (migrate_address_lookup_table_program_to_core_bpf::id(), "Migrate Address Lookup Table program to Core BPF #1651"), - (zk_elgamal_proof_program_enabled::id(), "SIMD-0153: Enable ZkElGamalProof program"), - (verify_retransmitter_signature::id(), "Verify retransmitter signature #1840"), - (move_stake_and_move_lamports_ixs::id(), "Enable MoveStake and MoveLamports stake program instructions #1610"), - (ed25519_precompile_verify_strict::id(), "SIMD-0152: Use strict verification in ed25519 precompile"), - (vote_only_retransmitter_signed_fec_sets::id(), "vote only on retransmitter signed fec sets"), - (move_precompile_verification_to_svm::id(), "SIMD-0159: Move precompile verification into SVM"), - (enable_transaction_loading_failure_fees::id(), "SIMD-0082: Enable fees for some additional transaction failures"), - (enable_turbine_extended_fanout_experiments::id(), "enable turbine extended fanout experiments #"), - (deprecate_legacy_vote_ixs::id(), "Deprecate legacy vote instructions"), - (partitioned_epoch_rewards_superfeature::id(), "SIMD-0118: replaces enable_partitioned_epoch_reward to enable partitioned rewards at epoch boundary"), - (disable_sbpf_v0_execution::id(), "SIMD-0161: Disables execution of SBPFv0 programs"), - (reenable_sbpf_v0_execution::id(), "Re-enables execution of SBPFv0 programs"), - (enable_sbpf_v1_deployment_and_execution::id(), "SIMD-0166: Enable deployment and execution of SBPFv1 programs"), - (enable_sbpf_v2_deployment_and_execution::id(), "SIMD-0173 and SIMD-0174: Enable deployment and execution of SBPFv2 programs"), - (enable_sbpf_v3_deployment_and_execution::id(), "SIMD-0178, SIMD-0179 and SIMD-0189: Enable deployment and execution of SBPFv3 programs"), - (remove_accounts_executable_flag_checks::id(), "SIMD-0162: Remove checks of accounts is_executable flag"), - (disable_account_loader_special_case::id(), "Disable account loader special case #3513"), - (accounts_lt_hash::id(), "SIMD-0215: enables lattice-based accounts hash"), - (snapshots_lt_hash::id(), "SIMD-0220: snapshots use lattice-based accounts hash"), - (remove_accounts_delta_hash::id(), "SIMD-0223: removes accounts delta hash"), - (enable_secp256r1_precompile::id(), "SIMD-0075: Enable secp256r1 precompile"), - (migrate_stake_program_to_core_bpf::id(), "SIMD-0196: Migrate Stake program to Core BPF #3655"), - (deplete_cu_meter_on_vm_failure::id(), "SIMD-0182: Deplete compute meter for vm errors #3993"), - (reserve_minimal_cus_for_builtin_instructions::id(), "SIMD-0170: Reserve minimal CUs for builtin instructions #2562"), - (raise_block_limits_to_50m::id(), "SIMD-0207: Raise block limit to 50M"), - (fix_alt_bn128_multiplication_input_length::id(), "SIMD-0222: fix alt_bn128 multiplication input length #3686"), - (drop_unchained_merkle_shreds::id(), "drops unchained Merkle shreds #2149"), - (relax_intrabatch_account_locks::id(), "SIMD-0083: Allow batched transactions to read/write and write/write the same accounts"), - (create_slashing_program::id(), "SIMD-0204: creates an enshrined slashing program"), - (disable_partitioned_rent_collection::id(), "SIMD-0175: Disable partitioned rent collection #4562"), - (enable_vote_address_leader_schedule::id(), "SIMD-0180: Enable vote address leader schedule #4573"), - (require_static_nonce_account::id(), "SIMD-0242: Static Nonce Account Only"), - (raise_block_limits_to_60m::id(), "SIMD-0256: Raise block limit to 60M"), - (mask_out_rent_epoch_in_vm_serialization::id(), "SIMD-0267: Sets rent_epoch to a constant in the VM"), - (enshrine_slashing_program::id(), "SIMD-0204: Slashable event verification"), - (enable_extend_program_checked::id(), "Enable ExtendProgramChecked instruction"), - (formalize_loaded_transaction_data_size::id(), "SIMD-0186: Loaded transaction data size specification"), - (alpenglow::id(), "Enable Alpenglow"), - (disable_zk_elgamal_proof_program::id(), "Disables zk-elgamal-proof program"), - (reenable_zk_elgamal_proof_program::id(), "Re-enables zk-elgamal-proof program"), - (raise_block_limits_to_100m::id(), "SIMD-0286: Raise block limit to 100M"), - (raise_account_cu_limit::id(), "SIMD-0306: Raise account CU limit to 40% max"), - (raise_cpi_nesting_limit_to_8::id(), "SIMD-0296: Raise CPI nesting limit from 4 to 8"), + ( + migrate_config_program_to_core_bpf::id(), + "Migrate Config program to Core BPF #1378", + ), + ( + enable_get_epoch_stake_syscall::id(), + "Enable syscall: sol_get_epoch_stake #884", + ), + ( + migrate_address_lookup_table_program_to_core_bpf::id(), + "Migrate Address Lookup Table program to Core BPF #1651", + ), + ( + zk_elgamal_proof_program_enabled::id(), + "SIMD-0153: Enable ZkElGamalProof program", + ), + ( + verify_retransmitter_signature::id(), + "Verify retransmitter signature #1840", + ), + ( + move_stake_and_move_lamports_ixs::id(), + "Enable MoveStake and MoveLamports stake program instructions #1610", + ), + ( + ed25519_precompile_verify_strict::id(), + "SIMD-0152: Use strict verification in ed25519 precompile", + ), + ( + vote_only_retransmitter_signed_fec_sets::id(), + "vote only on retransmitter signed fec sets", + ), + ( + move_precompile_verification_to_svm::id(), + "SIMD-0159: Move precompile verification into SVM", + ), + ( + enable_transaction_loading_failure_fees::id(), + "SIMD-0082: Enable fees for some additional transaction failures", + ), + ( + enable_turbine_extended_fanout_experiments::id(), + "enable turbine extended fanout experiments #", + ), + ( + deprecate_legacy_vote_ixs::id(), + "Deprecate legacy vote instructions", + ), + ( + partitioned_epoch_rewards_superfeature::id(), + "SIMD-0118: replaces enable_partitioned_epoch_reward to enable partitioned rewards at \ + epoch boundary", + ), + ( + disable_sbpf_v0_execution::id(), + "SIMD-0161: Disables execution of SBPFv0 programs", + ), + ( + reenable_sbpf_v0_execution::id(), + "Re-enables execution of SBPFv0 programs", + ), + ( + enable_sbpf_v1_deployment_and_execution::id(), + "SIMD-0166: Enable deployment and execution of SBPFv1 programs", + ), + ( + enable_sbpf_v2_deployment_and_execution::id(), + "SIMD-0173 and SIMD-0174: Enable deployment and execution of SBPFv2 programs", + ), + ( + enable_sbpf_v3_deployment_and_execution::id(), + "SIMD-0178, SIMD-0179 and SIMD-0189: Enable deployment and execution of SBPFv3 \ + programs", + ), + ( + remove_accounts_executable_flag_checks::id(), + "SIMD-0162: Remove checks of accounts is_executable flag", + ), + ( + disable_account_loader_special_case::id(), + "Disable account loader special case #3513", + ), + ( + accounts_lt_hash::id(), + "SIMD-0215: enables lattice-based accounts hash", + ), + ( + snapshots_lt_hash::id(), + "SIMD-0220: snapshots use lattice-based accounts hash", + ), + ( + remove_accounts_delta_hash::id(), + "SIMD-0223: removes accounts delta hash", + ), + ( + enable_secp256r1_precompile::id(), + "SIMD-0075: Enable secp256r1 precompile", + ), + ( + migrate_stake_program_to_core_bpf::id(), + "SIMD-0196: Migrate Stake program to Core BPF #3655", + ), + ( + deplete_cu_meter_on_vm_failure::id(), + "SIMD-0182: Deplete compute meter for vm errors #3993", + ), + ( + reserve_minimal_cus_for_builtin_instructions::id(), + "SIMD-0170: Reserve minimal CUs for builtin instructions #2562", + ), + ( + raise_block_limits_to_50m::id(), + "SIMD-0207: Raise block limit to 50M", + ), + ( + fix_alt_bn128_multiplication_input_length::id(), + "SIMD-0222: fix alt_bn128 multiplication input length #3686", + ), + ( + drop_unchained_merkle_shreds::id(), + "drops unchained Merkle shreds #2149", + ), + ( + relax_intrabatch_account_locks::id(), + "SIMD-0083: Allow batched transactions to read/write and write/write the same accounts", + ), + ( + create_slashing_program::id(), + "SIMD-0204: creates an enshrined slashing program", + ), + ( + disable_partitioned_rent_collection::id(), + "SIMD-0175: Disable partitioned rent collection #4562", + ), + ( + enable_vote_address_leader_schedule::id(), + "SIMD-0180: Enable vote address leader schedule #4573", + ), + ( + require_static_nonce_account::id(), + "SIMD-0242: Static Nonce Account Only", + ), + ( + raise_block_limits_to_60m::id(), + "SIMD-0256: Raise block limit to 60M", + ), + ( + mask_out_rent_epoch_in_vm_serialization::id(), + "SIMD-0267: Sets rent_epoch to a constant in the VM", + ), + ( + enshrine_slashing_program::id(), + "SIMD-0204: Slashable event verification", + ), + ( + enable_extend_program_checked::id(), + "Enable ExtendProgramChecked instruction", + ), + ( + formalize_loaded_transaction_data_size::id(), + "SIMD-0186: Loaded transaction data size specification", + ), + ( + alpenglow::id(), + "SIMD-0326: Alpenglow: new consensus algorithm", + ), + ( + disable_zk_elgamal_proof_program::id(), + "Disables zk-elgamal-proof program", + ), + ( + reenable_zk_elgamal_proof_program::id(), + "Re-enables zk-elgamal-proof program", + ), + ( + raise_block_limits_to_100m::id(), + "SIMD-0286: Raise block limit to 100M", + ), + ( + raise_account_cu_limit::id(), + "SIMD-0306: Raise account CU limit to 40% max", + ), + ( + raise_cpi_nesting_limit_to_8::id(), + "SIMD-0268: Raise CPI nesting limit from 4 to 8", + ), + ( + enforce_fixed_fec_set::id(), + "SIMD-0317: Enforce 32 data + 32 coding shreds", + ), + ( + provide_instruction_data_offset_in_vm_r2::id(), + "SIMD-0321: Provide instruction data offset in VM r2", + ), /*************** ADD NEW FEATURES HERE ***************/ ] .iter() diff --git a/fetch-core-bpf.sh b/fetch-core-bpf.sh index 0ac70f3665f8eb..824defb93a7aae 100755 --- a/fetch-core-bpf.sh +++ b/fetch-core-bpf.sh @@ -29,5 +29,6 @@ add_core_bpf_program_to_fetch() { add_core_bpf_program_to_fetch address-lookup-table 3.0.0 AddressLookupTab1e1111111111111111111111111 BPFLoaderUpgradeab1e11111111111111111111111 add_core_bpf_program_to_fetch config 3.0.0 Config1111111111111111111111111111111111111 BPFLoaderUpgradeab1e11111111111111111111111 add_core_bpf_program_to_fetch feature-gate 0.0.1 Feature111111111111111111111111111111111111 BPFLoaderUpgradeab1e11111111111111111111111 +add_core_bpf_program_to_fetch stake 1.0.0 Stake11111111111111111111111111111111111111 BPFLoaderUpgradeab1e11111111111111111111111 fetch_programs "$PREFIX" "${programs[@]}" diff --git a/genesis-utils/src/lib.rs b/genesis-utils/src/lib.rs index 4d4f0d8ac1f5d0..150d8602468ec2 100644 --- a/genesis-utils/src/lib.rs +++ b/genesis-utils/src/lib.rs @@ -17,7 +17,8 @@ fn check_genesis_hash( if let Some(expected_genesis_hash) = expected_genesis_hash { if expected_genesis_hash != genesis_hash { return Err(format!( - "Genesis hash mismatch: expected {expected_genesis_hash} but downloaded genesis hash is {genesis_hash}", + "Genesis hash mismatch: expected {expected_genesis_hash} but downloaded genesis \ + hash is {genesis_hash}", )); } } @@ -79,7 +80,7 @@ fn set_and_verify_expected_genesis_hash( ) -> Result<(), String> { let genesis_hash = genesis_config.hash(); if expected_genesis_hash.is_none() { - info!("Expected genesis hash set to {}", genesis_hash); + info!("Expected genesis hash set to {genesis_hash}"); *expected_genesis_hash = Some(genesis_hash); } let expected_genesis_hash = expected_genesis_hash.unwrap(); @@ -92,7 +93,8 @@ fn set_and_verify_expected_genesis_hash( if expected_genesis_hash != rpc_genesis_hash { return Err(format!( - "Genesis hash mismatch: expected {expected_genesis_hash} but RPC node genesis hash is {rpc_genesis_hash}" + "Genesis hash mismatch: expected {expected_genesis_hash} but RPC node genesis hash is \ + {rpc_genesis_hash}" )); } diff --git a/genesis/Cargo.toml b/genesis/Cargo.toml index d125c7c3af3453..14a74843ec9620 100644 --- a/genesis/Cargo.toml +++ b/genesis/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "solana-genesis" -description = "Blockchain, Rebuilt for Scale" documentation = "https://docs.rs/solana-genesis" version = { workspace = true } authors = { workspace = true } +description = { workspace = true } repository = { workspace = true } homepage = { workspace = true } license = { workspace = true } @@ -28,34 +28,35 @@ itertools = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } serde_yaml = { workspace = true } -solana-account = "=2.2.1" +solana-account = "=3.0.0" solana-accounts-db = { workspace = true } solana-clap-utils = { workspace = true } solana-cli-config = { workspace = true } -solana-clock = "=2.2.2" -solana-commitment-config = "=2.2.1" +solana-clock = "=3.0.0" +solana-cluster-type = "=3.0.0" +solana-commitment-config = "=3.0.0" solana-entry = { workspace = true } -solana-epoch-schedule = "=2.2.1" -solana-feature-gate-interface = "=2.2.2" -solana-fee-calculator = "=2.2.1" -solana-genesis-config = "=2.3.0" -solana-inflation = "=2.2.1" -solana-keypair = "=2.2.1" +solana-epoch-schedule = "=3.0.0" +solana-feature-gate-interface = "=3.0.0" +solana-fee-calculator = "=3.0.0" +solana-genesis-config = "=3.0.0" +solana-inflation = "=3.0.0" +solana-keypair = "=3.0.1" solana-ledger = { workspace = true } -solana-loader-v3-interface = "5.0.0" -solana-logger = "=2.3.1" -solana-native-token = "=2.2.2" -solana-poh-config = "=2.2.1" -solana-pubkey = { version = "=2.4.0", default-features = false } -solana-rent = "=2.2.1" +solana-loader-v3-interface = "6.1.0" +solana-logger = "=3.0.0" +solana-native-token = "=3.0.0" +solana-poh-config = "=3.0.0" +solana-pubkey = { version = "=3.0.0", default-features = false } +solana-rent = "=3.0.0" solana-rpc-client = { workspace = true } solana-rpc-client-api = { workspace = true } solana-runtime = { workspace = true } -solana-sdk-ids = "=2.2.1" -solana-signer = "=2.2.1" -solana-stake-interface = "=1.2.1" +solana-sdk-ids = "=3.0.0" +solana-signer = "=3.0.0" +solana-stake-interface = { version = "=2.0.1", features = ["borsh"] } solana-stake-program = { workspace = true } -solana-time-utils = "2.2.1" +solana-time-utils = "3.0.0" solana-version = { workspace = true } solana-vote-program = { workspace = true } tempfile = { workspace = true } diff --git a/genesis/README.md b/genesis/README.md index 66bab5e36671c3..bd6e0a777f0ce2 100644 --- a/genesis/README.md +++ b/genesis/README.md @@ -7,7 +7,7 @@ --bootstrap-validator-lamports --bootstrap-validator-stake-lamports ``` -Note: you can pass in `--bootstrap-validator ...` multiple times but the lamports associated with `--bootstrap-validator-lamports` and `--bootstrap-validator-stake-lamports` will apply to all `--bootstrap-validator` arguments. +Note: you can pass in `--bootstrap-validator ...` multiple times, but the lamports associated with `--bootstrap-validator-lamports` and `--bootstrap-validator-stake-lamports` will apply to all `--bootstrap-validator` arguments. For example: ``` cargo run --bin solana-genesis -- @@ -16,7 +16,7 @@ cargo run --bin solana-genesis -- ... --bootstrap-validator --bootstrap-validator-stake-lamports 10000000000 - --bootstrap-validator 100000000000 + --bootstrap-validator-lamports 100000000000 ``` All validator accounts will receive the same number of stake and account lamports @@ -73,4 +73,4 @@ validator_accounts: identity_account: vote_account: stake_account: -``` \ No newline at end of file +``` diff --git a/genesis/src/genesis_accounts.rs b/genesis/src/genesis_accounts.rs index db990a06754a4b..2fe4ffe9665fbd 100644 --- a/genesis/src/genesis_accounts.rs +++ b/genesis/src/genesis_accounts.rs @@ -3,7 +3,8 @@ use { stakes::{create_and_add_stakes, StakerInfo}, unlocks::UnlockInfo, }, - solana_genesis_config::{ClusterType, GenesisConfig}, + solana_cluster_type::ClusterType, + solana_genesis_config::GenesisConfig, solana_native_token::LAMPORTS_PER_SOL, }; @@ -228,7 +229,7 @@ fn add_stakes( .sum::() } -/// Add acounts that should be present in genesis; skip for development clusters +/// Add accounts that should be present in genesis; skip for development clusters pub fn add_genesis_accounts(genesis_config: &mut GenesisConfig, mut issued_lamports: u64) { if genesis_config.cluster_type == ClusterType::Development { return; diff --git a/genesis/src/main.rs b/genesis/src/main.rs index fc891f4b71b82a..963a962b46bcfb 100644 --- a/genesis/src/main.rs +++ b/genesis/src/main.rs @@ -18,6 +18,7 @@ use { }, }, solana_clock as clock, + solana_cluster_type::ClusterType, solana_commitment_config::CommitmentConfig, solana_entry::poh::compute_hashes_per_tick, solana_epoch_schedule::EpochSchedule, @@ -27,12 +28,12 @@ use { genesis_accounts::add_genesis_accounts, Base64Account, StakedValidatorAccountInfo, ValidatorAccountsFile, }, - solana_genesis_config::{ClusterType, GenesisConfig}, + solana_genesis_config::GenesisConfig, solana_inflation::Inflation, solana_keypair::{read_keypair_file, Keypair}, solana_ledger::{blockstore::create_new_ledger, blockstore_options::LedgerColumnOptions}, solana_loader_v3_interface::state::UpgradeableLoaderState, - solana_native_token::sol_to_lamports, + solana_native_token::LAMPORTS_PER_SOL, solana_poh_config::PohConfig, solana_pubkey::Pubkey, solana_rent::Rent, @@ -65,7 +66,7 @@ fn pubkey_from_str(key_str: &str) -> Result> { Pubkey::from_str(key_str).or_else(|_| { let bytes: Vec = serde_json::from_str(key_str)?; let keypair = - Keypair::from_bytes(&bytes).map_err(|e| std::io::Error::other(e.to_string()))?; + Keypair::try_from(bytes.as_ref()).map_err(|e| std::io::Error::other(e.to_string()))?; Ok(keypair.pubkey()) }) } @@ -253,7 +254,7 @@ fn add_validator_accounts( identity_pubkey, identity_pubkey, commission, - VoteStateV3::get_rent_exempt_reserve(rent).max(1), + rent.minimum_balance(VoteStateV3::size_of()).max(1), ); genesis_config.add_account( @@ -273,11 +274,10 @@ fn add_validator_accounts( fn rent_exempt_check(stake_lamports: u64, exempt: u64) -> io::Result<()> { if stake_lamports < exempt { - Err(io::Error::other( - format!( - "error: insufficient validator stake lamports: {stake_lamports} for rent exemption, requires {exempt}" - ), - )) + Err(io::Error::other(format!( + "error: insufficient validator stake lamports: {stake_lamports} for rent exemption, \ + requires {exempt}" + ))) } else { Ok(()) } @@ -313,11 +313,11 @@ fn main() -> Result<(), Box> { }; // vote account - let default_bootstrap_validator_lamports = &sol_to_lamports(500.0) - .max(VoteStateV3::get_rent_exempt_reserve(&rent)) + let default_bootstrap_validator_lamports = &(500 * LAMPORTS_PER_SOL) + .max(rent.minimum_balance(VoteStateV3::size_of())) .to_string(); // stake account - let default_bootstrap_validator_stake_lamports = &sol_to_lamports(0.5) + let default_bootstrap_validator_stake_lamports = &(LAMPORTS_PER_SOL / 2) .max(rent.minimum_balance(StakeStateV2::size_of())) .to_string(); @@ -335,7 +335,10 @@ fn main() -> Result<(), Box> { .value_name("RFC3339 DATE TIME") .validator(is_rfc3339_datetime) .takes_value(true) - .help("Time when the bootstrap validator will start the cluster [default: current system time]"), + .help( + "Time when the bootstrap validator will start the cluster [default: current \ + system time]", + ), ) .arg( Arg::with_name("bootstrap_validator") @@ -411,8 +414,8 @@ fn main() -> Result<(), Box> { .takes_value(true) .default_value(default_target_lamports_per_signature) .help( - "The cost in lamports that the cluster will charge for signature \ - verification when the cluster is operating at target-signatures-per-slot", + "The cost in lamports that the cluster will charge for signature verification \ + when the cluster is operating at target-signatures-per-slot", ), ) .arg( @@ -422,8 +425,8 @@ fn main() -> Result<(), Box> { .takes_value(true) .default_value(default_lamports_per_byte_year) .help( - "The cost in lamports that the cluster will charge per byte per year \ - for accounts with data", + "The cost in lamports that the cluster will charge per byte per year for \ + accounts with data", ), ) .arg( @@ -433,8 +436,8 @@ fn main() -> Result<(), Box> { .takes_value(true) .default_value(default_rent_exemption_threshold) .help( - "amount of time (in years) the balance has to include rent for \ - to qualify as rent exempted account", + "amount of time (in years) the balance has to include rent for to qualify as \ + rent exempted account", ), ) .arg( @@ -471,10 +474,10 @@ fn main() -> Result<(), Box> { .takes_value(true) .default_value(default_target_signatures_per_slot) .help( - "Used to estimate the desired processing capacity of the cluster. \ - When the latest slot processes fewer/greater signatures than this \ - value, the lamports-per-signature fee will decrease/increase for \ - the next slot. A value of 0 disables signature-based fee adjustments", + "Used to estimate the desired processing capacity of the cluster. When the \ + latest slot processes fewer/greater signatures than this value, the \ + lamports-per-signature fee will decrease/increase for the next slot. A value \ + of 0 disables signature-based fee adjustments", ), ) .arg( @@ -491,10 +494,10 @@ fn main() -> Result<(), Box> { .takes_value(true) .default_value("auto") .help( - "How many PoH hashes to roll before emitting the next tick. \ - If \"auto\", determine based on --target-tick-duration \ - and the hash rate of this computer. If \"sleep\", for development \ - sleep for --target-tick-duration instead of hashing", + "How many PoH hashes to roll before emitting the next tick. If \"auto\", \ + determine based on --target-tick-duration and the hash rate of this \ + computer. If \"sleep\", for development sleep for --target-tick-duration \ + instead of hashing", ), ) .arg( @@ -517,8 +520,8 @@ fn main() -> Result<(), Box> { Arg::with_name("enable_warmup_epochs") .long("enable-warmup-epochs") .help( - "When enabled epochs start short and will grow. \ - Useful for warming up stake quickly during development" + "When enabled epochs start short and will grow. Useful for warming up stake \ + quickly during development", ), ) .arg( @@ -535,7 +538,10 @@ fn main() -> Result<(), Box> { .value_name("FILENAME") .takes_value(true) .multiple(true) - .help("The location of a file containing a list of identity, vote, and stake pubkeys and balances for validator accounts to bake into genesis") + .help( + "The location of a file containing a list of identity, vote, and stake \ + pubkeys and balances for validator accounts to bake into genesis", + ), ) .arg( Arg::with_name("cluster_type") @@ -543,9 +549,7 @@ fn main() -> Result<(), Box> { .possible_values(&ClusterType::STRINGS) .takes_value(true) .default_value(default_cluster_type) - .help( - "Selects the features that will be enabled for the cluster" - ), + .help("Selects the features that will be enabled for the cluster"), ) .arg( Arg::with_name("deactivate_feature") @@ -554,7 +558,10 @@ fn main() -> Result<(), Box> { .value_name("FEATURE_PUBKEY") .validator(is_pubkey) .multiple(true) - .help("Deactivate this feature in genesis. Compatible with --cluster-type development"), + .help( + "Deactivate this feature in genesis. Compatible with --cluster-type \ + development", + ), ) .arg( Arg::with_name("max_genesis_archive_unpacked_size") @@ -562,9 +569,7 @@ fn main() -> Result<(), Box> { .value_name("NUMBER") .takes_value(true) .default_value(&default_genesis_archive_unpacked_size) - .help( - "maximum total uncompressed file size of created genesis archive", - ), + .help("maximum total uncompressed file size of created genesis archive"), ) .arg( Arg::with_name("bpf_program") @@ -582,7 +587,10 @@ fn main() -> Result<(), Box> { .takes_value(true) .number_of_values(4) .multiple(true) - .help("Install an upgradeable SBF program at the given address with the given upgrade authority (or \"none\")"), + .help( + "Install an upgradeable SBF program at the given address with the given \ + upgrade authority (or \"none\")", + ), ) .arg( Arg::with_name("inflation") @@ -601,9 +609,8 @@ fn main() -> Result<(), Box> { .global(true) .validator(is_url_or_moniker) .help( - "URL for Solana's JSON RPC or moniker (or their first letter): \ - [mainnet-beta, testnet, devnet, localhost]. Used for cloning \ - feature sets", + "URL for Solana's JSON RPC or moniker (or their first letter): [mainnet-beta, \ + testnet, devnet, localhost]. Used for cloning feature sets", ), ) .get_matches(); @@ -703,7 +710,6 @@ fn main() -> Result<(), Box> { ); let mut genesis_config = GenesisConfig { - native_instruction_processors: vec![], ticks_per_slot, poh_config, fee_rate_governor, @@ -1168,27 +1174,29 @@ mod tests { #[test] fn test_genesis_account_struct_compatibility() { - let yaml_string_pubkey = "--- -98frSc8R8toHoS3tQ1xWSvHCvGEADRM9hAm5qmUKjSDX: - balance: 4 - owner: Gw6S9CPzR8jHku1QQMdiqcmUKjC2dhJ3gzagWduA6PGw - data: - executable: true -88frSc8R8toHoS3tQ1xWSvHCvGEADRM9hAm5qmUKjSDX: - balance: 3 - owner: Gw7S9CPzR8jHku1QQMdiqcmUKjC2dhJ3gzagWduA6PGw - data: ~ - executable: true -6s36rsNPDfRSvzwek7Ly3mQu9jUMwgqBhjePZMV6Acp4: - balance: 2 - owner: DBC5d45LUHTCrq42ZmCdzc8A8ufwTaiYsL9pZY7KU6TR - data: aGVsbG8= - executable: false -8Y98svZv5sPHhQiPqZvqA5Z5djQ8hieodscvb61RskMJ: - balance: 1 - owner: DSknYr8cPucRbx2VyssZ7Yx3iiRqNGD38VqVahkUvgV1 - data: aGVsbG8gd29ybGQ= - executable: true"; + #[rustfmt::skip] + let yaml_string_pubkey = + "---\ + \n98frSc8R8toHoS3tQ1xWSvHCvGEADRM9hAm5qmUKjSDX:\ + \n balance: 4\ + \n owner: Gw6S9CPzR8jHku1QQMdiqcmUKjC2dhJ3gzagWduA6PGw\ + \n data:\ + \n executable: true\ + \n88frSc8R8toHoS3tQ1xWSvHCvGEADRM9hAm5qmUKjSDX:\ + \n balance: 3\ + \n owner: Gw7S9CPzR8jHku1QQMdiqcmUKjC2dhJ3gzagWduA6PGw\ + \n data: ~\ + \n executable: true\ + \n6s36rsNPDfRSvzwek7Ly3mQu9jUMwgqBhjePZMV6Acp4:\ + \n balance: 2\ + \n owner: DBC5d45LUHTCrq42ZmCdzc8A8ufwTaiYsL9pZY7KU6TR\ + \n data: aGVsbG8=\ + \n executable: false\ + \n8Y98svZv5sPHhQiPqZvqA5Z5djQ8hieodscvb61RskMJ:\ + \n balance: 1\ + \n owner: DSknYr8cPucRbx2VyssZ7Yx3iiRqNGD38VqVahkUvgV1\ + \n data: aGVsbG8gd29ybGQ=\ + \n executable: true"; let tmpfile = tempfile::NamedTempFile::new().unwrap(); let path = tmpfile.path(); @@ -1201,22 +1209,30 @@ mod tests { assert_eq!(genesis_config.accounts.len(), 4); - let yaml_string_keypair = "--- -\"[17,12,234,59,35,246,168,6,64,36,169,164,219,96,253,79,238,202,164,160,195,89,9,96,179,117,255,239,32,64,124,66,233,130,19,107,172,54,86,32,119,148,4,39,199,40,122,230,249,47,150,168,163,159,83,233,97,18,25,238,103,25,253,108]\": - balance: 20 - owner: 9ZfsP6Um1KU8d5gNzTsEbSJxanKYp5EPF36qUu4FJqgp - data: Y2F0IGRvZw== - executable: true -\"[36,246,244,43,37,214,110,50,134,148,148,8,205,82,233,67,223,245,122,5,149,232,213,125,244,182,26,29,56,224,70,45,42,163,71,62,222,33,229,54,73,136,53,174,128,103,247,235,222,27,219,129,180,77,225,174,220,74,201,123,97,155,159,234]\": - balance: 15 - owner: F9dmtjJPi8vfLu1EJN4KkyoGdXGmVfSAhxz35Qo9RDCJ - data: bW9ua2V5IGVsZXBoYW50 - executable: false -\"[103,27,132,107,42,149,72,113,24,138,225,109,209,31,158,6,26,11,8,76,24,128,131,215,156,80,251,114,103,220,111,235,56,22,87,5,209,56,53,12,224,170,10,66,82,42,11,138,51,76,120,27,166,200,237,16,200,31,23,5,57,22,131,221]\": - balance: 30 - owner: AwAR5mAbNPbvQ4CvMeBxwWE8caigQoMC2chkWAbh2b9V - data: Y29tYSBtb2Nh - executable: true"; + #[rustfmt::skip] + let yaml_string_keypair = + "---\ + \n\"[17,12,234,59,35,246,168,6,64,36,169,164,219,96,253,79,238,202,164,160,195,89,9,\ + 96,179,117,255,239,32,64,124,66,233,130,19,107,172,54,86,32,119,148,4,39,199,40,122,\ + 230,249,47,150,168,163,159,83,233,97,18,25,238,103,25,253,108]\":\ + \n balance: 20\ + \n owner: 9ZfsP6Um1KU8d5gNzTsEbSJxanKYp5EPF36qUu4FJqgp\ + \n data: Y2F0IGRvZw==\ + \n executable: true\ + \n\"[36,246,244,43,37,214,110,50,134,148,148,8,205,82,233,67,223,245,122,5,149,232,\ + 213,125,244,182,26,29,56,224,70,45,42,163,71,62,222,33,229,54,73,136,53,174,128,103,\ + 247,235,222,27,219,129,180,77,225,174,220,74,201,123,97,155,159,234]\":\ + \n balance: 15\ + \n owner: F9dmtjJPi8vfLu1EJN4KkyoGdXGmVfSAhxz35Qo9RDCJ\ + \n data: bW9ua2V5IGVsZXBoYW50\ + \n executable: false\ + \n\"[103,27,132,107,42,149,72,113,24,138,225,109,209,31,158,6,26,11,8,76,24,128,131,\ + 215,156,80,251,114,103,220,111,235,56,22,87,5,209,56,53,12,224,170,10,66,82,42,11,138,\ + 51,76,120,27,166,200,237,16,200,31,23,5,57,22,131,221]\":\ + \n balance: 30 + \n owner: AwAR5mAbNPbvQ4CvMeBxwWE8caigQoMC2chkWAbh2b9V + \n data: Y29tYSBtb2Nh + \n executable: true"; let tmpfile = tempfile::NamedTempFile::new().unwrap(); let path = tmpfile.path(); diff --git a/geyser-plugin-interface/src/geyser_plugin_interface.rs b/geyser-plugin-interface/src/geyser_plugin_interface.rs index ca6a7892aac26a..b3dc245c0553b3 100644 --- a/geyser-plugin-interface/src/geyser_plugin_interface.rs +++ b/geyser-plugin-interface/src/geyser_plugin_interface.rs @@ -1,7 +1,7 @@ -/// The interface for Geyser plugins. A plugin must implement -/// the GeyserPlugin trait to work with the runtime. -/// In addition, the dynamic library must export a "C" function _create_plugin which -/// creates the implementation of the plugin. +//! The interface for Geyser plugins. A plugin must implement +//! the GeyserPlugin trait to work with the runtime. +//! In addition, the dynamic library must export a "C" function _create_plugin which +//! creates the implementation of the plugin. use { solana_clock::{Slot, UnixTimestamp}, solana_hash::Hash, @@ -11,7 +11,6 @@ use { std::{any::Any, error, io}, thiserror::Error, }; - #[derive(Debug, Clone, PartialEq, Eq)] #[repr(C)] /// Information about an account being updated diff --git a/geyser-plugin-manager/src/accounts_update_notifier.rs b/geyser-plugin-manager/src/accounts_update_notifier.rs index 78064bf4fb1d07..cca45c4a251eb3 100644 --- a/geyser-plugin-manager/src/accounts_update_notifier.rs +++ b/geyser-plugin-manager/src/accounts_update_notifier.rs @@ -14,7 +14,10 @@ use { solana_metrics::*, solana_pubkey::Pubkey, solana_transaction::sanitized::SanitizedTransaction, - std::sync::{Arc, RwLock}, + std::{ + sync::{Arc, RwLock}, + time::Instant, + }, }; #[derive(Debug)] pub(crate) struct AccountsUpdateNotifierImpl { @@ -46,27 +49,29 @@ impl AccountsUpdateNotifierInterface for AccountsUpdateNotifierImpl { write_version: u64, account: &AccountForGeyser<'_>, ) { - let mut measure_all = Measure::start("geyser-plugin-notify-account-restore-all"); - let mut measure_copy = Measure::start("geyser-plugin-copy-stored-account-info"); + // Since the counter increment calls (below) are at Debug log level, + // do not get the time (Instant::now()) unless logging is at Debug level. + // With ~1 billion accounts on mnb, this is a non-negligible amount of work. + let start = log_enabled!(Level::Debug).then(Instant::now); let mut account = self.accountinfo_from_account_for_geyser(account); account.write_version = write_version; - measure_copy.stop(); + let time_copy = log_enabled!(Level::Debug).then(|| start.unwrap().elapsed()); + + self.notify_plugins_of_account_update(account, slot, true); + + let time_all = log_enabled!(Level::Debug).then(|| start.unwrap().elapsed()); inc_new_counter_debug!( "geyser-plugin-copy-stored-account-info-us", - measure_copy.as_us() as usize, + time_copy.unwrap().as_micros() as usize, 100000, 100000 ); - self.notify_plugins_of_account_update(account, slot, true); - - measure_all.stop(); - inc_new_counter_debug!( "geyser-plugin-notify-account-restore-all-us", - measure_all.as_us() as usize, + time_all.unwrap().as_micros() as usize, 100000, 100000 ); diff --git a/geyser-plugin-manager/src/geyser_plugin_manager.rs b/geyser-plugin-manager/src/geyser_plugin_manager.rs index 2f55978ca5999d..08354df0440ba2 100644 --- a/geyser-plugin-manager/src/geyser_plugin_manager.rs +++ b/geyser-plugin-manager/src/geyser_plugin_manager.rs @@ -60,12 +60,6 @@ pub struct GeyserPluginManager { } impl GeyserPluginManager { - pub fn new() -> Self { - GeyserPluginManager { - plugins: Vec::default(), - } - } - /// Unload all plugins and loaded plugin libraries, making sure to fire /// their `on_plugin_unload()` methods so they can do any necessary cleanup. pub fn unload(&mut self) { @@ -497,7 +491,7 @@ mod tests { #[test] fn test_geyser_reload() { // Initialize empty manager - let plugin_manager = Arc::new(RwLock::new(GeyserPluginManager::new())); + let plugin_manager = Arc::new(RwLock::new(GeyserPluginManager::default())); // No plugins are loaded, this should fail let mut plugin_manager_lock = plugin_manager.write().unwrap(); @@ -536,7 +530,7 @@ mod tests { #[test] fn test_plugin_list() { // Initialize empty manager - let plugin_manager = Arc::new(RwLock::new(GeyserPluginManager::new())); + let plugin_manager = Arc::new(RwLock::new(GeyserPluginManager::default())); let mut plugin_manager_lock = plugin_manager.write().unwrap(); // Load two plugins @@ -558,7 +552,7 @@ mod tests { #[test] fn test_plugin_load_unload() { // Initialize empty manager - let plugin_manager = Arc::new(RwLock::new(GeyserPluginManager::new())); + let plugin_manager = Arc::new(RwLock::new(GeyserPluginManager::default())); let mut plugin_manager_lock = plugin_manager.write().unwrap(); // Load rpc call diff --git a/geyser-plugin-manager/src/geyser_plugin_service.rs b/geyser-plugin-manager/src/geyser_plugin_service.rs index b866470a7e0717..29c9cc03a46f56 100644 --- a/geyser-plugin-manager/src/geyser_plugin_service.rs +++ b/geyser-plugin-manager/src/geyser_plugin_service.rs @@ -79,7 +79,7 @@ impl GeyserPluginService { )>, ) -> Result { info!("Starting GeyserPluginService from config files: {geyser_plugin_config_files:?}"); - let mut plugin_manager = GeyserPluginManager::new(); + let mut plugin_manager = GeyserPluginManager::default(); for geyser_plugin_config_file in geyser_plugin_config_files { Self::load_plugin(&mut plugin_manager, geyser_plugin_config_file)?; diff --git a/gossip/Cargo.toml b/gossip/Cargo.toml index 89499b26e0d18c..f5897ee9a6c774 100644 --- a/gossip/Cargo.toml +++ b/gossip/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "solana-gossip" -description = "Blockchain, Rebuilt for Scale" documentation = "https://docs.rs/solana-gossip" version = { workspace = true } authors = { workspace = true } +description = { workspace = true } repository = { workspace = true } homepage = { workspace = true } license = { workspace = true } @@ -33,9 +33,11 @@ frozen-abi = [ "solana-vote/frozen-abi", "solana-vote-program/frozen-abi", ] +agave-unstable-api = ["agave-low-pass-filter/agave-unstable-api"] [dependencies] agave-feature-set = { workspace = true } +agave-low-pass-filter = { workspace = true, features = ["agave-unstable-api"] } arrayvec = { workspace = true } assert_matches = { workspace = true } bincode = { workspace = true } @@ -56,44 +58,46 @@ serde-big-array = { workspace = true } serde_bytes = { workspace = true } serde_derive = { workspace = true } siphasher = { workspace = true } +solana-account = { workspace = true } solana-bloom = { workspace = true } solana-clap-utils = { workspace = true } solana-client = { workspace = true } -solana-clock = "=2.2.2" +solana-clock = "=3.0.0" +solana-cluster-type = "=3.0.0" solana-connection-cache = { workspace = true } solana-entry = { workspace = true } -solana-epoch-schedule = "=2.2.1" -solana-frozen-abi = { version = "=2.3.0", optional = true, features = [ +solana-epoch-schedule = "=3.0.0" +solana-frozen-abi = { version = "=3.0.0", optional = true, features = [ "frozen-abi", ] } -solana-frozen-abi-macro = { version = "=2.2.1", optional = true, features = [ +solana-frozen-abi-macro = { version = "=3.0.0", optional = true, features = [ "frozen-abi", ] } -solana-hash = "=2.3.0" -solana-keypair = "=2.2.1" -solana-ledger = { workspace = true } -solana-logger = "=2.3.1" +solana-hash = "=3.0.0" +solana-keypair = "=3.0.1" +solana-ledger = { workspace = true, features = ["agave-unstable-api"] } +solana-logger = "=3.0.0" solana-measure = { workspace = true } solana-metrics = { workspace = true } -solana-native-token = "=2.2.2" -solana-net-utils = { workspace = true } -solana-packet = "=2.2.1" +solana-native-token = "=3.0.0" +solana-net-utils = { workspace = true, features = ["agave-unstable-api"] } +solana-packet = "=3.0.0" solana-perf = { workspace = true } -solana-pubkey = { version = "=2.4.0", features = ["rand"] } -solana-quic-definitions = "=2.3.0" +solana-pubkey = { version = "=3.0.0", features = ["rand"] } +solana-quic-definitions = "=3.0.0" solana-rayon-threadlimit = { workspace = true } solana-rpc-client = { workspace = true } solana-runtime = { workspace = true } -solana-sanitize = "=2.2.1" -solana-serde-varint = "=2.2.2" -solana-sha256-hasher = "=2.3.0" -solana-short-vec = "=2.2.1" -solana-signature = { version = "=2.3.0", default-features = false } -solana-signer = "=2.2.1" +solana-sanitize = "=3.0.1" +solana-serde-varint = "=3.0.0" +solana-sha256-hasher = "=3.0.0" +solana-short-vec = "=3.0.0" +solana-signature = { version = "=3.1.0", default-features = false } +solana-signer = "=3.0.0" solana-streamer = { workspace = true } -solana-time-utils = "=2.2.1" +solana-time-utils = "=3.0.0" solana-tpu-client = { workspace = true } -solana-transaction = "=2.2.3" +solana-transaction = "=3.0.1" solana-version = { workspace = true } solana-vote = { workspace = true } solana-vote-program = { workspace = true } @@ -111,7 +115,7 @@ serial_test = { workspace = true } solana-net-utils = { workspace = true, features = ["dev-context-only-utils"] } solana-perf = { workspace = true, features = ["dev-context-only-utils"] } solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } -solana-signature = { version = "=2.3.0", features = ["rand"] } +solana-signature = { version = "=3.1.0", features = ["rand"] } solana-system-transaction = { workspace = true } solana-vote-interface = { workspace = true } static_assertions = { workspace = true } diff --git a/gossip/benches/crds_shards.rs b/gossip/benches/crds_shards.rs index d2d3f9a24d71fe..3299a505716ff5 100644 --- a/gossip/benches/crds_shards.rs +++ b/gossip/benches/crds_shards.rs @@ -31,7 +31,7 @@ fn bench_crds_shards_find(c: &mut Criterion, num_values: usize, mask_bits: u32) assert!(shards.insert(index, value)); } c.bench_function( - &format!("bench_crds_shards_find: mask_bits: {:?}", mask_bits), + &format!("bench_crds_shards_find: mask_bits: {mask_bits:?}"), |b| { b.iter(|| { let mask = rng.gen(); diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 4a0762a452f881..8bdbf01b255d57 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -19,7 +19,7 @@ use { contact_info::{self, ContactInfo, ContactInfoQuery, Error as ContactInfoError}, crds::{Crds, Cursor, GossipRoute}, crds_data::{self, CrdsData, EpochSlotsIndex, LowestSlot, SnapshotHashes, Vote, MAX_VOTES}, - crds_filter::{should_retain_crds_value, GossipFilterDirection, MIN_STAKE_TO_SKIP_PING}, + crds_filter::{should_retain_crds_value, GossipFilterDirection}, crds_gossip::CrdsGossip, crds_gossip_error::CrdsGossipError, crds_gossip_pull::{ @@ -52,8 +52,10 @@ use { solana_keypair::{signable::Signable, Keypair}, solana_ledger::shred::Shred, solana_net_utils::{ - bind_in_range, bind_to_unspecified, sockets::bind_gossip_port_in_range, PortRange, - VALIDATOR_PORT_RANGE, + bind_in_range, + multihomed_sockets::BindIpAddrs, + sockets::{bind_gossip_port_in_range, bind_to_localhost_unique}, + PortRange, VALIDATOR_PORT_RANGE, }, solana_perf::{ data_budget::DataBudget, @@ -61,12 +63,11 @@ use { }, solana_pubkey::Pubkey, solana_rayon_threadlimit::get_thread_count, - solana_runtime::bank_forks::BankForks, + solana_runtime::{bank::Bank, bank_forks::BankForks}, solana_sanitize::Sanitize, solana_signature::Signature, solana_signer::Signer, solana_streamer::{ - atomic_udp_socket::AtomicUdpSocket, packet, socket::SocketAddrSpace, streamer::{ChannelSend, PacketBatchReceiver}, @@ -83,7 +84,7 @@ use { iter::repeat, net::{IpAddr, Ipv4Addr, SocketAddr, TcpListener, UdpSocket}, num::NonZeroUsize, - ops::{Deref, Div}, + ops::Div, path::{Path, PathBuf}, rc::Rc, result::Result, @@ -129,6 +130,8 @@ pub const DEFAULT_CONTACT_DEBUG_INTERVAL_MILLIS: u64 = 10_000; pub const DEFAULT_CONTACT_SAVE_INTERVAL_MILLIS: u64 = 60_000; // Limit number of unique pubkeys in the crds table. pub(crate) const CRDS_UNIQUE_PUBKEY_CAPACITY: usize = 8192; +// Interval between push active set refreshes. +pub const REFRESH_PUSH_ACTIVE_SET_INTERVAL_MS: u64 = CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS / 2; // Must have at least one socket to monitor the TVU port pub const MINIMUM_NUM_TVU_RECEIVE_SOCKETS: NonZeroUsize = NonZeroUsize::new(1).unwrap(); @@ -166,6 +169,7 @@ pub struct ClusterInfo { contact_save_interval: u64, // milliseconds, 0 = disabled contact_info_path: PathBuf, socket_addr_space: SocketAddrSpace, + bind_ip_addrs: Arc, } impl ClusterInfo { @@ -194,6 +198,7 @@ impl ClusterInfo { contact_info_path: PathBuf::default(), contact_save_interval: 0, // disabled socket_addr_space, + bind_ip_addrs: Arc::new(BindIpAddrs::default()), }; me.refresh_my_gossip_contact_info(); me @@ -207,12 +212,21 @@ impl ClusterInfo { &self.socket_addr_space } + pub fn set_bind_ip_addrs(&mut self, ip_addrs: Arc) { + self.bind_ip_addrs = ip_addrs; + } + + pub fn bind_ip_addrs(&self) -> Arc { + self.bind_ip_addrs.clone() + } + fn refresh_push_active_set( &self, recycler: &PacketBatchRecycler, stakes: &HashMap, gossip_validators: Option<&HashSet>, sender: &impl ChannelSend, + maybe_bank_ref: Option<&Bank>, ) { let shred_version = self.my_contact_info.read().unwrap().shred_version(); let self_keypair: Arc = self.keypair().clone(); @@ -225,6 +239,7 @@ impl ClusterInfo { &self.ping_cache, &mut pings, &self.socket_addr_space, + maybe_bank_ref, ); let pings = pings .into_iter() @@ -394,6 +409,15 @@ impl ClusterInfo { Ok(()) } + pub fn set_tvu_socket(&self, tvu_addr: SocketAddr) -> Result<(), ContactInfoError> { + self.my_contact_info + .write() + .unwrap() + .set_tvu(contact_info::Protocol::UDP, tvu_addr)?; + self.refresh_my_gossip_contact_info(); + Ok(()) + } + pub fn set_tpu(&self, tpu_addr: SocketAddr) -> Result<(), ContactInfoError> { self.my_contact_info.write().unwrap().set_tpu(tpu_addr)?; self.refresh_my_gossip_contact_info(); @@ -409,6 +433,19 @@ impl ClusterInfo { Ok(()) } + pub fn set_tpu_vote( + &self, + protocol: contact_info::Protocol, + tpu_vote_addr: SocketAddr, + ) -> Result<(), ContactInfoError> { + self.my_contact_info + .write() + .unwrap() + .set_tpu_vote(protocol, tpu_vote_addr)?; + self.refresh_my_gossip_contact_info(); + Ok(()) + } + pub fn lookup_contact_info( &self, id: &Pubkey, @@ -472,7 +509,7 @@ impl ClusterInfo { .rpc() .filter(|addr| self.socket_addr_space.check(addr))?; let node_version = self.get_node_version(node.pubkey()); - if node.shred_version() != 0 && node.shred_version() != my_shred_version { + if node.shred_version() != my_shred_version { return None; } let rpc_addr = node_rpc.ip(); @@ -527,7 +564,7 @@ impl ClusterInfo { } let node_version = self.get_node_version(node.pubkey()); - if node.shred_version() != 0 && node.shred_version() != my_shred_version { + if node.shred_version() != my_shred_version { different_shred_nodes = different_shred_nodes.saturating_add(1); None } else { @@ -1020,14 +1057,17 @@ impl ClusterInfo { .unwrap_or_default() } - /// all validators that have a valid rpc port regardless of `shred_version`. - pub fn all_rpc_peers(&self) -> Vec { + /// all validators that have a valid rpc port and are on the same `shred_version`. + pub fn rpc_peers(&self) -> Vec { let self_pubkey = self.id(); + let self_shred_version = self.my_shred_version(); let gossip_crds = self.gossip.crds.read().unwrap(); gossip_crds .get_nodes_contact_info() .filter(|node| { - node.pubkey() != &self_pubkey && self.check_socket_addr_space(&node.rpc()) + node.pubkey() != &self_pubkey + && self.check_socket_addr_space(&node.rpc()) + && node.shred_version() == self_shred_version }) .cloned() .collect() @@ -1035,20 +1075,29 @@ impl ClusterInfo { // All nodes in gossip (including spy nodes) and the last time we heard about them pub fn all_peers(&self) -> Vec<(ContactInfo, u64)> { + let self_shred_version = self.my_shred_version(); let gossip_crds = self.gossip.crds.read().unwrap(); gossip_crds .get_nodes() - .map(|x| (x.value.contact_info().unwrap().clone(), x.local_timestamp)) + .filter_map(|node| { + let contact_info = node.value.contact_info()?; + (contact_info.shred_version() == self_shred_version) + .then(|| (contact_info.clone(), node.local_timestamp)) + }) .collect() } pub fn gossip_peers(&self) -> Vec { let me = self.id(); + let self_shred_version = self.my_shred_version(); let gossip_crds = self.gossip.crds.read().unwrap(); gossip_crds .get_nodes_contact_info() - // shred_version not considered for gossip peers (ie, spy nodes do not set shred_version) - .filter(|node| node.pubkey() != &me && self.check_socket_addr_space(&node.gossip())) + .filter(|node| { + node.pubkey() != &me + && self.check_socket_addr_space(&node.gossip()) + && node.shred_version() == self_shred_version + }) .cloned() .collect() } @@ -1426,7 +1475,7 @@ impl ClusterInfo { .thread_name(|i| format!("solGossipRun{i:02}")) .build() .unwrap(); - let mut epoch_specs = bank_forks.map(EpochSpecs::from); + let mut epoch_specs = bank_forks.clone().map(EpochSpecs::from); Builder::new() .name("solGossip".to_string()) .spawn(move || { @@ -1482,13 +1531,19 @@ impl ClusterInfo { entrypoints_processed = entrypoints_processed || self.process_entrypoints(); //TODO: possibly tune this parameter //we saw a deadlock passing an self.read().unwrap().timeout into sleep - if start - last_push > CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS / 2 { + if start - last_push > REFRESH_PUSH_ACTIVE_SET_INTERVAL_MS { + let maybe_bank = bank_forks + .as_ref() + .and_then(|bf| bf.read().ok()) + .map(|forks| forks.root_bank()); + let maybe_bank_ref = maybe_bank.as_deref(); self.refresh_my_gossip_contact_info(); self.refresh_push_active_set( &recycler, &stakes, gossip_validators.as_ref(), &sender, + maybe_bank_ref, ); last_push = timestamp(); } @@ -1970,7 +2025,6 @@ impl ClusterInfo { &mut rng, &keypair, value, - stakes, &self.socket_addr_space, &self.ping_cache, &mut pings, @@ -2314,7 +2368,7 @@ impl ClusterInfo { #[derive(Debug)] pub struct Sockets { - pub gossip: AtomicUdpSocket, + pub gossip: Arc<[UdpSocket]>, pub ip_echo: Option, pub tvu: Vec, pub tvu_quic: UdpSocket, @@ -2342,7 +2396,9 @@ pub struct Sockets { /// Client-side socket for ForwardingStage vote transactions pub tpu_vote_forwarding_client: UdpSocket, /// Client-side socket for ForwardingStage non-vote transactions - pub tpu_transaction_forwarding_client: UdpSocket, + pub tpu_transaction_forwarding_clients: Box<[UdpSocket]>, + /// Socket for alpenglow consensus logic + pub alpenglow: Option, /// Connection cache endpoint for QUIC-based Vote pub quic_vote_client: UdpSocket, /// Client-side socket for RPC/SendTransactionService. @@ -2357,7 +2413,7 @@ pub struct NodeConfig { pub gossip_port: u16, pub port_range: PortRange, /// Multihoming: The IP addresses the node can bind to - pub bind_ip_addrs: BindIpAddrs, + pub bind_ip_addrs: Arc, pub public_tpu_addr: Option, pub public_tpu_forwards_addr: Option, pub vortexor_receiver_addr: Option, @@ -2370,57 +2426,6 @@ pub struct NodeConfig { pub num_quic_endpoints: NonZeroUsize, } -#[derive(Debug, Clone)] -pub struct BindIpAddrs { - /// The IP addresses this node may bind to - /// Index 0 is the primary address - /// Index 1+ are secondary addresses - addrs: Vec, -} - -impl BindIpAddrs { - pub fn new(addrs: Vec) -> Result { - if addrs.is_empty() { - return Err( - "BindIpAddrs requires at least one IP address (--bind-address)".to_string(), - ); - } - if addrs.len() > 1 { - for ip in &addrs { - if ip.is_loopback() || ip.is_unspecified() || ip.is_multicast() { - return Err(format!( - "Invalid configuration: {ip:?} is not allowed with multiple \ - --bind-address values (loopback, unspecified, or multicast)" - )); - } - } - } - - Ok(Self { addrs }) - } - - #[inline] - pub fn primary(&self) -> IpAddr { - self.addrs[0] - } -} - -// Makes BindIpAddrs behave like &[IpAddr] -impl Deref for BindIpAddrs { - type Target = [IpAddr]; - - fn deref(&self) -> &Self::Target { - &self.addrs - } -} - -// For generic APIs expecting something like AsRef<[IpAddr]> -impl AsRef<[IpAddr]> for BindIpAddrs { - fn as_ref(&self) -> &[IpAddr] { - &self.addrs - } -} - pub fn push_messages_to_peer_for_tests( messages: Vec, self_id: Pubkey, @@ -2435,7 +2440,7 @@ pub fn push_messages_to_peer_for_tests( &PacketBatchRecycler::default(), &GossipStats::default(), ); - let sock = bind_to_unspecified().unwrap(); + let sock = bind_to_localhost_unique().expect("should bind"); packet::send_to(&packet_batch, &sock, socket_addr_space)?; Ok(()) } @@ -2455,23 +2460,15 @@ fn check_pull_request_shred_version(self_shred_version: u16, caller: &CrdsValue) // Discards CrdsValues in PushMessages and PullResponses from nodes with // different shred-version. -// ContactInfos are always exempted from shred-version check in order to: -// * Allow nodes to update their shred-version. -// * Prevent two running instances of the same identity key from -// cross-contaminating gossip across clusters; see check_duplicate_instance. fn discard_different_shred_version( msg: &mut Protocol, self_shred_version: u16, crds: &Crds, stats: &GossipStats, ) { - let (from, values, skip_shred_version_counter) = match msg { - Protocol::PullResponse(from, values) => { - (from, values, &stats.skip_pull_response_shred_version) - } - Protocol::PushMessage(from, values) => { - (from, values, &stats.skip_push_message_shred_version) - } + let (values, skip_shred_version_counter) = match msg { + Protocol::PullResponse(_, values) => (values, &stats.skip_pull_response_shred_version), + Protocol::PushMessage(_, values) => (values, &stats.skip_push_message_shred_version), // Shred-version on pull-request callers can be checked without a lock // on CRDS table and is so verified separately (by // check_pull_request_shred_version). @@ -2482,16 +2479,10 @@ fn discard_different_shred_version( } }; let num_values = values.len(); - if crds.get_shred_version(from) == Some(self_shred_version) { - // Retain ContactInfos or values with the same shred version. - values.retain(|value| { - matches!(value.data(), CrdsData::ContactInfo(_)) - || crds.get_shred_version(&value.pubkey()) == Some(self_shred_version) - }) - } else { - // Only retain ContactInfos. - values.retain(|value| matches!(value.data(), CrdsData::ContactInfo(_))); - } + values.retain(|value| match value.data() { + CrdsData::ContactInfo(ci) => ci.shred_version() == self_shred_version, + _ => crds.get_shred_version(&value.pubkey()) == Some(self_shred_version), + }); let num_skipped = num_values - values.len(); if num_skipped != 0 { skip_shred_version_counter.add_relaxed(num_skipped as u64); @@ -2517,7 +2508,6 @@ fn verify_gossip_addr( rng: &mut R, keypair: &Keypair, value: &CrdsValue, - stakes: &HashMap, socket_addr_space: &SocketAddrSpace, ping_cache: &Mutex, pings: &mut Vec<(SocketAddr, Ping)>, @@ -2527,10 +2517,6 @@ fn verify_gossip_addr( CrdsData::LegacyContactInfo(node) => (node.pubkey(), node.gossip()), _ => return true, // If not a contact-info, nothing to verify. }; - // For (sufficiently) staked nodes, don't bother with ping/pong. - if stakes.get(pubkey).copied() >= Some(MIN_STAKE_TO_SKIP_PING) { - return true; - } // Invalid addresses are not verifiable. let Some(addr) = addr.filter(|addr| socket_addr_space.check(addr)) else { return false; @@ -2621,6 +2607,7 @@ mod tests { std::{ iter::repeat_with, net::{IpAddr, Ipv4Addr}, + ops::Deref, panic, sync::Arc, }, @@ -2821,6 +2808,7 @@ mod tests { &cluster_info.ping_cache, &mut Vec::new(), // pings &SocketAddrSpace::Unspecified, + None, ); let mut reqs = cluster_info.generate_new_gossip_requests( &thread_pool, @@ -2859,26 +2847,33 @@ mod tests { assert!(x < range.1); } - fn check_sockets(sockets: &[UdpSocket], ip: IpAddr, range: (u16, u16)) { + fn check_sockets(sockets: &[T], ip: IpAddr, range: (u16, u16)) + where + T: Borrow, + { assert!(!sockets.is_empty()); - let port = sockets[0].local_addr().unwrap().port(); - for socket in sockets.iter() { - check_socket(socket, ip, range); - assert_eq!(socket.local_addr().unwrap().port(), port); + let port = sockets[0].borrow().local_addr().unwrap().port(); + for s in sockets { + let s = s.borrow(); + let local_addr = s.local_addr().unwrap(); + assert_eq!(local_addr.ip(), ip); + assert_in_range(local_addr.port(), range); + assert_eq!(local_addr.port(), port); } } - fn check_socket(socket: &UdpSocket, ip: IpAddr, range: (u16, u16)) { - let local_addr = socket.local_addr().unwrap(); - assert_eq!(local_addr.ip(), ip); - assert_in_range(local_addr.port(), range); + fn check_socket(socket: &T, ip: IpAddr, range: (u16, u16)) + where + T: Borrow, + { + check_sockets(std::slice::from_ref(socket), ip, range); } fn check_node_sockets(node: &Node, ip: IpAddr, range: (u16, u16)) { - check_socket(&node.sockets.gossip.load(), ip, range); check_socket(&node.sockets.repair, ip, range); check_socket(&node.sockets.tvu_quic, ip, range); + check_sockets(&node.sockets.gossip, ip, range); check_sockets(&node.sockets.tvu, ip, range); check_sockets(&node.sockets.tpu, ip, range); } @@ -2891,7 +2886,7 @@ mod tests { advertised_ip: IpAddr::V4(ip), gossip_port: 0, port_range, - bind_ip_addrs: BindIpAddrs::new(vec![IpAddr::V4(ip)]).unwrap(), + bind_ip_addrs: Arc::new(BindIpAddrs::new(vec![IpAddr::V4(ip)]).unwrap()), public_tpu_addr: None, public_tpu_forwards_addr: None, num_tvu_receive_sockets: MINIMUM_NUM_TVU_RECEIVE_SOCKETS, @@ -2916,7 +2911,7 @@ mod tests { advertised_ip: ip, gossip_port: port, port_range, - bind_ip_addrs: BindIpAddrs::new(vec![ip]).unwrap(), + bind_ip_addrs: Arc::new(BindIpAddrs::new(vec![ip]).unwrap()), public_tpu_addr: None, public_tpu_forwards_addr: None, num_tvu_receive_sockets: MINIMUM_NUM_TVU_RECEIVE_SOCKETS, @@ -2928,8 +2923,7 @@ mod tests { let node = Node::new_with_external_ip(&solana_pubkey::new_rand(), config); check_node_sockets(&node, ip, port_range); - - assert_eq!(node.sockets.gossip.local_addr().unwrap().port(), port); + check_sockets(&node.sockets.gossip, ip, port_range); } //test that all cluster_info objects only generate signed messages @@ -2962,6 +2956,7 @@ mod tests { &cluster_info.ping_cache, &mut Vec::new(), // pings &SocketAddrSpace::Unspecified, + None, ); //check that all types of gossip messages are signed correctly cluster_info.flush_push_queue(); @@ -3783,21 +3778,27 @@ mod tests { fn test_contact_trace() { solana_logger::setup(); let keypair43 = Arc::new( - Keypair::from_bytes(&[ - 198, 203, 8, 178, 196, 71, 119, 152, 31, 96, 221, 142, 115, 224, 45, 34, 173, 138, - 254, 39, 181, 238, 168, 70, 183, 47, 210, 91, 221, 179, 237, 153, 14, 58, 154, 59, - 67, 220, 235, 106, 241, 99, 4, 72, 60, 245, 53, 30, 225, 122, 145, 225, 8, 40, 30, - 174, 26, 228, 125, 127, 125, 21, 96, 28, - ]) + Keypair::try_from( + [ + 198, 203, 8, 178, 196, 71, 119, 152, 31, 96, 221, 142, 115, 224, 45, 34, 173, + 138, 254, 39, 181, 238, 168, 70, 183, 47, 210, 91, 221, 179, 237, 153, 14, 58, + 154, 59, 67, 220, 235, 106, 241, 99, 4, 72, 60, 245, 53, 30, 225, 122, 145, + 225, 8, 40, 30, 174, 26, 228, 125, 127, 125, 21, 96, 28, + ] + .as_ref(), + ) .unwrap(), ); let keypair44 = Arc::new( - Keypair::from_bytes(&[ - 66, 88, 3, 70, 228, 215, 125, 64, 130, 183, 180, 98, 22, 166, 201, 234, 89, 80, - 135, 24, 228, 35, 20, 52, 105, 130, 50, 51, 46, 229, 244, 108, 70, 57, 45, 247, 57, - 177, 39, 126, 190, 238, 50, 96, 186, 208, 28, 168, 148, 56, 9, 106, 92, 213, 63, - 205, 252, 225, 244, 101, 77, 182, 4, 2, - ]) + Keypair::try_from( + [ + 66, 88, 3, 70, 228, 215, 125, 64, 130, 183, 180, 98, 22, 166, 201, 234, 89, 80, + 135, 24, 228, 35, 20, 52, 105, 130, 50, 51, 46, 229, 244, 108, 70, 57, 45, 247, + 57, 177, 39, 126, 190, 238, 50, 96, 186, 208, 28, 168, 148, 56, 9, 106, 92, + 213, 63, 205, 252, 225, 244, 101, 77, 182, 4, 2, + ] + .as_ref(), + ) .unwrap(), ); @@ -3830,4 +3831,166 @@ mod tests { info!("rpc:\n{trace}"); assert_eq!(trace.len(), 335); } + + #[test] + fn test_discard_different_shred_version_push_message() { + let self_shred_version = 5555; + let mut crds = Crds::default(); + let stats = GossipStats::default(); + let mut rng = rand::thread_rng(); + let keypair = Keypair::new(); + + // create contact info with matching shred version + let contact_info = ContactInfo::new( + keypair.pubkey(), + /*wallclock:*/ 1234567890, + self_shred_version, + ); + let ci = CrdsValue::new(CrdsData::ContactInfo(contact_info), &keypair); + + // Test push message with matching shred version + let mut msg = Protocol::PushMessage(keypair.pubkey(), vec![ci.clone()]); + discard_different_shred_version(&mut msg, self_shred_version, &crds, &stats); + if let Protocol::PushMessage(_, values) = msg { + assert_eq!(values.len(), 1); + } + + let contact_info_wrong_shred_version = + ContactInfo::new(keypair.pubkey(), /*wallclock:*/ 1234567890, 1); + let ci_wrong_shred_version = CrdsValue::new( + CrdsData::ContactInfo(contact_info_wrong_shred_version), + &keypair, + ); + + // Test push message with non-matching shred version + let mut msg = Protocol::PushMessage(keypair.pubkey(), vec![ci_wrong_shred_version]); + discard_different_shred_version(&mut msg, self_shred_version, &crds, &stats); + if let Protocol::PushMessage(_, values) = msg { + assert_eq!(values.len(), 0); + } + + // Test EpochSlot w/o previous CI with matching shred version/pubkey -> should be rejected + let epoch_slots = EpochSlots::new_rand(&mut rng, Some(keypair.pubkey())); + let es = CrdsValue::new_unsigned(CrdsData::EpochSlots(0, epoch_slots)); + let mut msg = Protocol::PushMessage(keypair.pubkey(), vec![es]); + discard_different_shred_version(&mut msg, self_shred_version, &crds, &stats); + if let Protocol::PushMessage(_, ref values) = msg { + assert_eq!(values.len(), 0); + } + + // Insert ContactInfo with different pubkey than EpochSlot + let keypair2 = Keypair::new(); + let ci_wrong_pubkey = CrdsValue::new( + CrdsData::ContactInfo(ContactInfo::new( + keypair2.pubkey(), + /*wallclock:*/ 1234567890, + self_shred_version, + )), + &keypair2, + ); + assert!(crds + .insert(ci_wrong_pubkey, /*now=*/ 0, GossipRoute::LocalMessage) + .is_ok()); + + // Test insert EpochSlot w/ previous ContactInfo w/ matching shred version but different pubkey -> should be rejected + let epoch_slots = EpochSlots::new_rand(&mut rng, Some(keypair.pubkey())); + let es: CrdsValue = CrdsValue::new_unsigned(CrdsData::EpochSlots(0, epoch_slots)); + let mut msg = Protocol::PushMessage(keypair.pubkey(), vec![es.clone()]); + discard_different_shred_version(&mut msg, self_shred_version, &crds, &stats); + if let Protocol::PushMessage(_, ref values) = msg { + assert_eq!(values.len(), 0); + } + + // Now insert ContactInfo with same pubkey as EpochSlot + assert!(crds + .insert(ci.clone(), /*now=*/ 0, GossipRoute::LocalMessage) + .is_ok()); + + let mut msg = Protocol::PushMessage(keypair.pubkey(), vec![es]); + discard_different_shred_version(&mut msg, self_shred_version, &crds, &stats); + if let Protocol::PushMessage(_, ref values) = msg { + assert_eq!(values.len(), 1); + } + + // Test multiple ContactInfo/EpochSlot with various shred versions. Crds table contains ContactInfo from `keypair` + let keypair3 = Keypair::new(); + let keypair4 = Keypair::new(); + let entries = vec![ + CrdsValue::new( + CrdsData::ContactInfo(ContactInfo::new( + keypair2.pubkey(), + /*wallclock:*/ 1234567890, + self_shred_version, + )), + &keypair2, + ), + CrdsValue::new( + CrdsData::ContactInfo(ContactInfo::new( + keypair3.pubkey(), + /*wallclock:*/ 1234567890, + 1, + )), + &keypair3, + ), + CrdsValue::new_unsigned(CrdsData::EpochSlots( + 0, + EpochSlots::new_rand(&mut rng, Some(keypair.pubkey())), + )), + CrdsValue::new( + CrdsData::ContactInfo(ContactInfo::new( + keypair.pubkey(), + /*wallclock:*/ 1234567890, + self_shred_version, + )), + &keypair4, + ), + ]; + let mut msg = Protocol::PushMessage(keypair.pubkey(), entries); + discard_different_shred_version(&mut msg, self_shred_version, &crds, &stats); + if let Protocol::PushMessage(_, ref values) = msg { + // Only reject ContactInfo with invalid shred version. EpochSlot with associated ContactInfo is already in the table + assert_eq!(values.len(), 3); + } + + // Remove ContactInfo with matching pubkey as EpochSlot + crds.remove(&ci.label(), /* now */ 0); + + // Test multiple ContactInfo with various shred versions. Crds table is empty + let entries = vec![ + CrdsValue::new( + CrdsData::ContactInfo(ContactInfo::new( + keypair2.pubkey(), + /*wallclock:*/ 1234567890, + self_shred_version, + )), + &keypair2, + ), + CrdsValue::new( + CrdsData::ContactInfo(ContactInfo::new( + keypair3.pubkey(), + /*wallclock:*/ 1234567890, + 1, + )), + &keypair3, + ), + CrdsValue::new_unsigned(CrdsData::EpochSlots( + 0, + EpochSlots::new_rand(&mut rng, Some(keypair.pubkey())), + )), + CrdsValue::new( + CrdsData::ContactInfo(ContactInfo::new( + keypair.pubkey(), + /*wallclock:*/ 1234567890, + self_shred_version, + )), + &keypair, + ), + ]; + let mut msg = Protocol::PushMessage(keypair.pubkey(), entries); + discard_different_shred_version(&mut msg, self_shred_version, &crds, &stats); + if let Protocol::PushMessage(_, ref values) = msg { + // Reject ContactInfo with invalid shred version and EpochSlot with no associated ContactInfo in the table + assert_eq!(values.len(), 2); + } + } } diff --git a/gossip/src/crds_filter.rs b/gossip/src/crds_filter.rs index 973a5020704cc1..4ced79a37acfc8 100644 --- a/gossip/src/crds_filter.rs +++ b/gossip/src/crds_filter.rs @@ -16,8 +16,6 @@ const MIN_NUM_STAKED_NODES: usize = 500; /// Minimum stake that a node should have so that all its CRDS values are /// propagated through gossip (below this only subset of CRDS is propagated). pub(crate) const MIN_STAKE_FOR_GOSSIP: u64 = solana_native_token::LAMPORTS_PER_SOL; -/// Minimum stake required for a node to bypass the initial ping check when joining gossip. -pub(crate) const MIN_STAKE_TO_SKIP_PING: u64 = 1000 * solana_native_token::LAMPORTS_PER_SOL; /// Returns false if the CRDS value should be discarded. /// `direction` controls whether we are looking at diff --git a/gossip/src/crds_gossip.rs b/gossip/src/crds_gossip.rs index d13d80225295e9..dac8b177005034 100644 --- a/gossip/src/crds_gossip.rs +++ b/gossip/src/crds_gossip.rs @@ -27,6 +27,7 @@ use { solana_keypair::Keypair, solana_ledger::shred::Shred, solana_pubkey::Pubkey, + solana_runtime::bank::Bank, solana_signer::Signer, solana_streamer::socket::SocketAddrSpace, solana_time_utils::timestamp, @@ -186,6 +187,7 @@ impl CrdsGossip { ping_cache: &Mutex, pings: &mut Vec<(SocketAddr, Ping)>, socket_addr_space: &SocketAddrSpace, + maybe_bank_ref: Option<&Bank>, ) { self.push.refresh_push_active_set( &self.crds, @@ -196,6 +198,7 @@ impl CrdsGossip { ping_cache, pings, socket_addr_space, + maybe_bank_ref, ) } @@ -445,6 +448,7 @@ mod test { &ping_cache, &mut Vec::new(), // pings &SocketAddrSpace::Unspecified, + None, ); let now = timestamp(); //incorrect dest diff --git a/gossip/src/crds_gossip_push.rs b/gossip/src/crds_gossip_push.rs index d78d2e3da3d42f..665f4b42c16700 100644 --- a/gossip/src/crds_gossip_push.rs +++ b/gossip/src/crds_gossip_push.rs @@ -21,10 +21,13 @@ use { protocol::{Ping, PingCache}, push_active_set::PushActiveSet, received_cache::ReceivedCache, + stake_weighting_config::{get_gossip_config_from_account, WeightingConfig}, }, itertools::Itertools, + solana_cluster_type::ClusterType, solana_keypair::Keypair, solana_pubkey::Pubkey, + solana_runtime::bank::Bank, solana_signer::Signer, solana_streamer::socket::SocketAddrSpace, solana_time_utils::timestamp, @@ -49,6 +52,7 @@ const CRDS_GOSSIP_PRUNE_MSG_TIMEOUT_MS: u64 = 500; const CRDS_GOSSIP_PRUNE_STAKE_THRESHOLD_PCT: f64 = 0.15; const CRDS_GOSSIP_PRUNE_MIN_INGRESS_NODES: usize = 2; const CRDS_GOSSIP_PUSH_ACTIVE_SET_SIZE: usize = CRDS_GOSSIP_PUSH_FANOUT + 3; +const CONFIG_REFRESH_INTERVAL_MS: u64 = 60_000; pub struct CrdsGossipPush { /// Active set of validators for push @@ -65,12 +69,13 @@ pub struct CrdsGossipPush { pub num_total: AtomicUsize, pub num_old: AtomicUsize, pub num_pushes: AtomicUsize, + last_cfg_poll_ms: Mutex, } impl Default for CrdsGossipPush { fn default() -> Self { Self { - active_set: RwLock::default(), + active_set: RwLock::new(PushActiveSet::new_static()), crds_cursor: Mutex::default(), received_cache: Mutex::new(ReceivedCache::new(2 * CRDS_UNIQUE_PUBKEY_CAPACITY)), push_fanout: CRDS_GOSSIP_PUSH_FANOUT, @@ -79,6 +84,7 @@ impl Default for CrdsGossipPush { num_total: AtomicUsize::default(), num_old: AtomicUsize::default(), num_pushes: AtomicUsize::default(), + last_cfg_poll_ms: Mutex::new(0), } } } @@ -238,6 +244,28 @@ impl CrdsGossipPush { active_set.prune(self_pubkey, peer, origins, stakes); } + fn maybe_refresh_weighting_config( + &self, + maybe_bank_ref: Option<&Bank>, + now_ms: u64, + ) -> Option { + let bank = maybe_bank_ref?; + if !matches!( + bank.cluster_type(), + ClusterType::Testnet | ClusterType::Development + ) { + return None; + } + { + let mut last = self.last_cfg_poll_ms.lock().unwrap(); + if now_ms.saturating_sub(*last) < CONFIG_REFRESH_INTERVAL_MS { + return None; + } + *last = now_ms; + } + get_gossip_config_from_account(bank) + } + /// Refresh the push active set. #[allow(clippy::too_many_arguments)] pub(crate) fn refresh_push_active_set( @@ -250,6 +278,7 @@ impl CrdsGossipPush { ping_cache: &Mutex, pings: &mut Vec<(SocketAddr, Ping)>, socket_addr_space: &SocketAddrSpace, + maybe_bank_ref: Option<&Bank>, ) { let mut rng = rand::thread_rng(); // Active and valid gossip nodes with matching shred-version. @@ -280,13 +309,18 @@ impl CrdsGossipPush { return; } let cluster_size = crds.read().unwrap().num_pubkeys().max(stakes.len()); + let maybe_cfg = self.maybe_refresh_weighting_config(maybe_bank_ref, timestamp()); let mut active_set = self.active_set.write().unwrap(); + if let Some(cfg) = maybe_cfg { + active_set.apply_cfg(&cfg); + } active_set.rotate( &mut rng, CRDS_GOSSIP_PUSH_ACTIVE_SET_SIZE, cluster_size, &nodes, stakes, + &self_keypair.pubkey(), ) } } @@ -447,6 +481,7 @@ mod tests { &ping_cache, &mut Vec::new(), // pings &SocketAddrSpace::Unspecified, + None, ); let new_msg = CrdsValue::new_unsigned(CrdsData::from(ContactInfo::new_localhost( @@ -514,6 +549,7 @@ mod tests { &ping_cache, &mut Vec::new(), &SocketAddrSpace::Unspecified, + None, ); // push 3's contact info to 1 and 2 and 3 @@ -557,6 +593,7 @@ mod tests { &ping_cache, &mut Vec::new(), // pings &SocketAddrSpace::Unspecified, + None, ); let new_msg = CrdsValue::new_unsigned(CrdsData::from(ContactInfo::new_localhost( @@ -605,6 +642,7 @@ mod tests { &ping_cache, &mut Vec::new(), // pings &SocketAddrSpace::Unspecified, + None, ); let mut ci = ContactInfo::new_localhost(&solana_pubkey::new_rand(), 0); diff --git a/gossip/src/crds_value.rs b/gossip/src/crds_value.rs index fe537790ff28d3..fabfeb80ba7fc8 100644 --- a/gossip/src/crds_value.rs +++ b/gossip/src/crds_value.rs @@ -371,7 +371,7 @@ mod test { ); let values: Vec = vec![ { - let keypair = Keypair::generate(&mut rng); + let keypair = Keypair::new_from_array(rng.gen()); let lockouts: [Lockout; 4] = [ Lockout::new_with_confirmation_count(302_388_991, 11), Lockout::new_with_confirmation_count(302_388_995, 7), @@ -387,11 +387,11 @@ mod test { }; let vote = new_tower_sync_transaction( tower_sync, - Hash::new_from_array(rng.gen()), // blockhash - &keypair, // node_keypair - &Keypair::generate(&mut rng), // vote_keypair - &Keypair::generate(&mut rng), // authorized_voter_keypair - None, // switch_proof_hash + Hash::new_from_array(rng.gen()), // blockhash + &keypair, // node_keypair + &Keypair::new_from_array(rng.gen()), // vote_keypair + &Keypair::new_from_array(rng.gen()), // authorized_voter_keypair + None, // switch_proof_hash ); let vote = Vote::new( keypair.pubkey(), @@ -402,7 +402,7 @@ mod test { CrdsValue::new(CrdsData::Vote(5, vote), &keypair) }, { - let keypair = Keypair::generate(&mut rng); + let keypair = Keypair::new_from_array(rng.gen()); let lockouts: [Lockout; 3] = [ Lockout::new_with_confirmation_count(302_410_500, 9), Lockout::new_with_confirmation_count(302_410_505, 5), @@ -417,11 +417,11 @@ mod test { }; let vote = new_tower_sync_transaction( tower_sync, - Hash::new_from_array(rng.gen()), // blockhash - &keypair, // node_keypair - &Keypair::generate(&mut rng), // vote_keypair - &Keypair::generate(&mut rng), // authorized_voter_keypair - None, // switch_proof_hash + Hash::new_from_array(rng.gen()), // blockhash + &keypair, // node_keypair + &Keypair::new_from_array(rng.gen()), // vote_keypair + &Keypair::new_from_array(rng.gen()), // authorized_voter_keypair + None, // switch_proof_hash ); let vote = Vote::new( keypair.pubkey(), @@ -436,7 +436,7 @@ mod test { // Serialized bytes are fixed and should never change. assert_eq!( solana_sha256_hasher::hash(&bytes), - Hash::from_str("7gtcoafccWE964njbs2bA1QuVFeV34RaoY781yLx2A8N").unwrap() + Hash::from_str("BTg284TRo5S5PpbA9YZaab5rKeoLNAj7arwadvG6XVLT").unwrap() ); // serialize -> deserialize should round trip. assert_eq!( diff --git a/gossip/src/duplicate_shred.rs b/gossip/src/duplicate_shred.rs index 6abfcf6be7a589..fbbe69b20ae278 100644 --- a/gossip/src/duplicate_shred.rs +++ b/gossip/src/duplicate_shred.rs @@ -457,7 +457,7 @@ pub(crate) mod tests { &entries, is_last_in_slot, // chained_merkle_root - Some(Hash::new_from_array(rng.gen())), + Hash::new_from_array(rng.gen()), next_shred_index, next_code_index, // next_code_index &ReedSolomonCache::default(), @@ -1102,7 +1102,7 @@ pub(crate) mod tests { new_rand_coding_shreds(&mut rng, next_shred_index, 10, &shredder, &leader)[0].clone(); let mut data_shred_different_retransmitter_payload = data_shred.clone().into_payload(); shred::layout::set_retransmitter_signature( - &mut data_shred_different_retransmitter_payload, + &mut data_shred_different_retransmitter_payload.as_mut(), &Signature::new_unique(), ) .unwrap(); @@ -1110,7 +1110,7 @@ pub(crate) mod tests { Shred::new_from_serialized_shred(data_shred_different_retransmitter_payload).unwrap(); let mut coding_shred_different_retransmitter_payload = coding_shred.clone().into_payload(); shred::layout::set_retransmitter_signature( - &mut coding_shred_different_retransmitter_payload, + &mut coding_shred_different_retransmitter_payload.as_mut(), &Signature::new_unique(), ) .unwrap(); diff --git a/gossip/src/epoch_specs.rs b/gossip/src/epoch_specs.rs index c711cb3f387357..87c74ee98eb4d7 100644 --- a/gossip/src/epoch_specs.rs +++ b/gossip/src/epoch_specs.rs @@ -54,6 +54,12 @@ impl EpochSpecs { } } +impl Clone for EpochSpecs { + fn clone(&self) -> Self { + Self::from(self.bank_forks.clone()) + } +} + impl From>> for EpochSpecs { fn from(bank_forks: Arc>) -> Self { let (root, root_bank) = { diff --git a/gossip/src/gossip_service.rs b/gossip/src/gossip_service.rs index 2440518cfbfb19..e354c075f52ce7 100644 --- a/gossip/src/gossip_service.rs +++ b/gossip/src/gossip_service.rs @@ -18,7 +18,6 @@ use { solana_runtime::bank_forks::BankForks, solana_signer::Signer, solana_streamer::{ - atomic_udp_socket::AtomicUdpSocket, evicting_sender::EvictingSender, socket::SocketAddrSpace, streamer::{self, StreamerReceiveStats}, @@ -26,7 +25,7 @@ use { solana_tpu_client::tpu_client::{TpuClient, TpuClientConfig}, std::{ collections::HashSet, - net::{SocketAddr, TcpListener}, + net::{SocketAddr, TcpListener, UdpSocket}, sync::{ atomic::{AtomicBool, Ordering}, Arc, RwLock, @@ -46,7 +45,7 @@ impl GossipService { pub fn new( cluster_info: &Arc, bank_forks: Option>>, - gossip_socket: AtomicUdpSocket, + gossip_sockets: Arc<[UdpSocket]>, gossip_validators: Option>, should_check_duplicate_instance: bool, stats_reporter_sender: Option>>, @@ -54,17 +53,19 @@ impl GossipService { ) -> Self { let (request_sender, request_receiver) = EvictingSender::new_bounded(GOSSIP_CHANNEL_CAPACITY); - let gossip_socket = Arc::new(gossip_socket); trace!( - "GossipService: id: {}, listening on: {:?}", + "GossipService: id: {}, listening on primary interface: {:?}, all available \ + interfaces: {:?}", &cluster_info.id(), - gossip_socket.local_addr().unwrap() + gossip_sockets[0].local_addr().unwrap(), + gossip_sockets, ); let socket_addr_space = *cluster_info.socket_addr_space(); let gossip_receiver_stats = Arc::new(StreamerReceiveStats::new("gossip_receiver")); let t_receiver = streamer::receiver_atomic( "solRcvrGossip".to_string(), - gossip_socket.clone(), + gossip_sockets.clone(), + cluster_info.bind_ip_addrs(), exit.clone(), request_sender, Recycler::default(), @@ -99,7 +100,8 @@ impl GossipService { ); let t_responder = streamer::responder_atomic( "Gossip", - gossip_socket.clone(), + gossip_sockets.clone(), + cluster_info.bind_ip_addrs(), response_receiver, socket_addr_space, stats_reporter_sender, @@ -143,16 +145,6 @@ impl GossipService { } } -/// Discover Validators in a cluster -#[deprecated(since = "3.0.0", note = "use `discover_validators` instead")] -pub fn discover_cluster( - entrypoint: &SocketAddr, - num_nodes: usize, - socket_addr_space: SocketAddrSpace, -) -> std::io::Result> { - discover_validators(entrypoint, num_nodes, 0, socket_addr_space) -} - pub fn discover_validators( entrypoint: &SocketAddr, num_nodes: usize, @@ -305,7 +297,7 @@ fn spy( .into_iter() .map(|x| x.0) .collect::>(); - tvu_peers = spy_ref.tvu_peers(|q| q.clone()); + tvu_peers = spy_ref.tvu_peers(ContactInfo::clone); let found_nodes_by_pubkey = if let Some(pubkeys) = find_nodes_by_pubkey { pubkeys @@ -372,12 +364,12 @@ pub fn make_gossip_node( if let Some(entrypoint) = entrypoint { cluster_info.set_entrypoint(ContactInfo::new_gossip_entry_point(entrypoint)); } - let gossip_socket = AtomicUdpSocket::new(gossip_socket); + let gossip_sockets = Arc::new([gossip_socket]); let cluster_info = Arc::new(cluster_info); let gossip_service = GossipService::new( &cluster_info, None, - gossip_socket, + gossip_sockets, None, should_check_duplicate_instance, None, @@ -395,16 +387,13 @@ mod tests { }; #[test] - #[ignore] // test that stage will exit when flag is set fn test_exit() { let exit = Arc::new(AtomicBool::new(false)); - let tn = Node::new_localhost(); - let cluster_info = ClusterInfo::new( - tn.info.clone(), - Arc::new(Keypair::new()), - SocketAddrSpace::Unspecified, - ); + let kp = Keypair::new(); + let tn = Node::new_localhost_with_pubkey(&kp.pubkey()); + let cluster_info = + ClusterInfo::new(tn.info.clone(), Arc::new(kp), SocketAddrSpace::Unspecified); let c = Arc::new(cluster_info); let d = GossipService::new( &c, diff --git a/gossip/src/lib.rs b/gossip/src/lib.rs index 4bc0214eaf959f..b69c08c61dfa2a 100644 --- a/gossip/src/lib.rs +++ b/gossip/src/lib.rs @@ -41,6 +41,7 @@ mod protocol; mod push_active_set; mod received_cache; pub mod restart_crds_values; +pub mod stake_weighting_config; pub mod weighted_shuffle; #[macro_use] diff --git a/gossip/src/node.rs b/gossip/src/node.rs index 967df1b8a5cc44..e2ae0382caba5a 100644 --- a/gossip/src/node.rs +++ b/gossip/src/node.rs @@ -1,6 +1,6 @@ use { crate::{ - cluster_info::{BindIpAddrs, NodeConfig, Sockets}, + cluster_info::{NodeConfig, Sockets}, contact_info::{ ContactInfo, Protocol::{QUIC, UDP}, @@ -8,28 +8,43 @@ use { }, solana_net_utils::{ find_available_ports_in_range, + multihomed_sockets::BindIpAddrs, sockets::{ bind_gossip_port_in_range, bind_in_range_with_config, bind_more_with_config, bind_to_with_config, bind_two_in_range_with_offset_and_config, localhost_port_range_for_tests, multi_bind_in_range_with_config, SocketConfiguration as SocketConfig, }, - PortRange, }, solana_pubkey::Pubkey, solana_quic_definitions::QUIC_PORT_OFFSET, - solana_streamer::{atomic_udp_socket::AtomicUdpSocket, quic::DEFAULT_QUIC_ENDPOINTS}, + solana_streamer::quic::DEFAULT_QUIC_ENDPOINTS, solana_time_utils::timestamp, std::{ - net::{IpAddr, Ipv4Addr, SocketAddr}, + io, + iter::once, + net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket}, num::NonZero, + sync::Arc, }, }; +// Socket addresses for each protocol across all interfaces +#[derive(Debug, Clone)] +pub struct MultihomingAddresses { + pub tvu: Box<[SocketAddr]>, + pub tpu_vote: Box<[SocketAddr]>, + pub tpu_quic: Box<[SocketAddr]>, + pub tpu_forwards_quic: Box<[SocketAddr]>, + pub tpu_vote_quic: Box<[SocketAddr]>, +} + #[derive(Debug)] pub struct Node { pub info: ContactInfo, pub sockets: Sockets, + pub bind_ip_addrs: Arc, + pub addresses: MultihomingAddresses, } impl Node { @@ -45,7 +60,7 @@ impl Node { let port_range = localhost_port_range_for_tests(); let bind_ip_addr = IpAddr::V4(Ipv4Addr::LOCALHOST); let config = NodeConfig { - bind_ip_addrs: BindIpAddrs::new(vec![bind_ip_addr]).expect("should bind"), + bind_ip_addrs: Arc::new(BindIpAddrs::new(vec![bind_ip_addr]).expect("should bind")), gossip_port: port_range.0, port_range, advertised_ip: bind_ip_addr, @@ -66,35 +81,6 @@ impl Node { node } - #[deprecated(since = "3.0.0", note = "use new_with_external_ip")] - pub fn new_single_bind( - pubkey: &Pubkey, - gossip_addr: &SocketAddr, - port_range: PortRange, - bind_ip_addr: IpAddr, - ) -> Self { - let config = NodeConfig { - bind_ip_addrs: BindIpAddrs::new(vec![bind_ip_addr]).expect("should bind"), - gossip_port: gossip_addr.port(), - port_range, - advertised_ip: bind_ip_addr, - public_tpu_addr: None, - public_tpu_forwards_addr: None, - num_tvu_receive_sockets: NonZero::new(1).unwrap(), - num_tvu_retransmit_sockets: NonZero::new(1).unwrap(), - num_quic_endpoints: NonZero::new(DEFAULT_QUIC_ENDPOINTS) - .expect("Number of QUIC endpoints can not be zero"), - vortexor_receiver_addr: None, - }; - let mut node = Self::new_with_external_ip(pubkey, config); - let rpc_ports: [u16; 2] = find_available_ports_in_range(bind_ip_addr, port_range).unwrap(); - let rpc_addr = SocketAddr::new(bind_ip_addr, rpc_ports[0]); - let rpc_pubsub_addr = SocketAddr::new(bind_ip_addr, rpc_ports[1]); - node.info.set_rpc(rpc_addr).unwrap(); - node.info.set_rpc_pubsub(rpc_pubsub_addr).unwrap(); - node - } - pub fn new_with_external_ip(pubkey: &Pubkey, config: NodeConfig) -> Node { let NodeConfig { advertised_ip, @@ -108,26 +94,45 @@ impl Node { num_quic_endpoints, vortexor_receiver_addr, } = config; - let bind_ip_addr = bind_ip_addrs.primary(); - - let gossip_addr = SocketAddr::new(advertised_ip, gossip_port); - let (gossip_port, (gossip, ip_echo)) = - bind_gossip_port_in_range(&gossip_addr, port_range, bind_ip_addr); + let bind_ip_addr = bind_ip_addrs.active(); + let mut gossip_sockets = Vec::with_capacity(bind_ip_addrs.len()); + let mut gossip_ports = Vec::with_capacity(bind_ip_addrs.len()); + let mut ip_echo_sockets = Vec::with_capacity(bind_ip_addrs.len()); + for ip in bind_ip_addrs.iter() { + let gossip_addr = SocketAddr::new(*ip, gossip_port); + let (port, (gossip, ip_echo)) = + bind_gossip_port_in_range(&gossip_addr, port_range, *ip); + gossip_sockets.push(gossip); + gossip_ports.push(port); + ip_echo_sockets.push(ip_echo); + } let socket_config = SocketConfig::default(); - let (tvu_port, tvu_sockets) = multi_bind_in_range_with_config( + let (tvu_port, mut tvu_sockets) = multi_bind_in_range_with_config( bind_ip_addr, port_range, socket_config, num_tvu_receive_sockets.get(), ) .expect("tvu multi_bind"); + // Multihoming RX for TVU + tvu_sockets.append( + &mut Self::bind_to_extra_ip( + &bind_ip_addrs, + tvu_port, + num_tvu_receive_sockets.get(), + socket_config, + ) + .expect("Secondary bind TVU"), + ); + let tvu_addresses = Self::get_socket_addrs(&tvu_sockets); + let (tvu_quic_port, tvu_quic) = bind_in_range_with_config(bind_ip_addr, port_range, socket_config) .expect("tvu_quic bind"); - let ((tpu_port, tpu_socket), (_tpu_port_quic, tpu_quic)) = + let ((tpu_port, tpu_socket), (tpu_port_quic, tpu_quic)) = bind_two_in_range_with_offset_and_config( bind_ip_addr, port_range, @@ -139,10 +144,17 @@ impl Node { let tpu_sockets = bind_more_with_config(tpu_socket, 32, socket_config).expect("tpu_sockets multi_bind"); - let tpu_quic = bind_more_with_config(tpu_quic, num_quic_endpoints.get(), socket_config) + let mut tpu_quic = bind_more_with_config(tpu_quic, num_quic_endpoints.get(), socket_config) .expect("tpu_quic bind"); - let ((tpu_forwards_port, tpu_forwards_socket), (_, tpu_forwards_quic)) = + // multihoming RX for TPU + tpu_quic.append( + &mut Self::bind_to_extra_ip(&bind_ip_addrs, tpu_port_quic, 32, socket_config) + .expect("Secondary bind TPU QUIC"), + ); + let tpu_quic_addresses = Self::get_socket_addrs(&tpu_quic); + + let ((tpu_forwards_port, tpu_forwards_socket), (tpu_forwards_quic_port, tpu_forwards_quic)) = bind_two_in_range_with_offset_and_config( bind_ip_addr, port_range, @@ -153,28 +165,65 @@ impl Node { .expect("tpu_forwards primary bind"); let tpu_forwards_sockets = bind_more_with_config(tpu_forwards_socket, 8, socket_config) .expect("tpu_forwards multi_bind"); - let tpu_forwards_quic = + let mut tpu_forwards_quic = bind_more_with_config(tpu_forwards_quic, num_quic_endpoints.get(), socket_config) .expect("tpu_forwards_quic multi_bind"); - let (tpu_vote_port, tpu_vote_sockets) = + tpu_forwards_quic.append( + &mut Self::bind_to_extra_ip( + &bind_ip_addrs, + tpu_forwards_quic_port, + num_quic_endpoints.get(), + socket_config, + ) + .expect("Secondary bind TPU forwards"), + ); + let tpu_forwards_quic_addresses = Self::get_socket_addrs(&tpu_forwards_quic); + + let (tpu_vote_port, mut tpu_vote_sockets) = multi_bind_in_range_with_config(bind_ip_addr, port_range, socket_config, 1) .expect("tpu_vote multi_bind"); + tpu_vote_sockets.extend( + Self::bind_to_extra_ip(&bind_ip_addrs, tpu_vote_port, 1, socket_config) + .expect("Secondary binds for tpu vote"), + ); + let tpu_vote_addresses = Self::get_socket_addrs(&tpu_vote_sockets); + let (tpu_vote_quic_port, tpu_vote_quic) = bind_in_range_with_config(bind_ip_addr, port_range, socket_config) .expect("tpu_vote_quic"); - let tpu_vote_quic = + let mut tpu_vote_quic = bind_more_with_config(tpu_vote_quic, num_quic_endpoints.get(), socket_config) .expect("tpu_vote_quic multi_bind"); + tpu_vote_quic.append( + &mut Self::bind_to_extra_ip( + &bind_ip_addrs, + tpu_vote_quic_port, + num_quic_endpoints.get(), + socket_config, + ) + .expect("Secondary bind TPU vote"), + ); + let tpu_vote_quic_addresses = Self::get_socket_addrs(&tpu_vote_quic); - let (_, retransmit_sockets) = multi_bind_in_range_with_config( + let (tvu_retransmit_port, mut retransmit_sockets) = multi_bind_in_range_with_config( bind_ip_addr, port_range, socket_config, num_tvu_retransmit_sockets.get(), ) - .expect("retransmit multi_bind"); + .expect("tvu retransmit multi_bind"); + // Multihoming TX for TVU + retransmit_sockets.append( + &mut Self::bind_to_extra_ip( + &bind_ip_addrs, + tvu_retransmit_port, + num_tvu_retransmit_sockets.get(), + socket_config, + ) + .expect("Secondary bind TVU retransmit"), + ); let (_, repair) = bind_in_range_with_config(bind_ip_addr, port_range, socket_config) .expect("repair bind"); @@ -188,9 +237,14 @@ impl Node { bind_in_range_with_config(bind_ip_addr, port_range, socket_config) .expect("serve_repair_quic"); - let (_, broadcast) = + let (broadcast_port, mut broadcast) = multi_bind_in_range_with_config(bind_ip_addr, port_range, socket_config, 4) .expect("broadcast multi_bind"); + // Multihoming TX for broadcast + broadcast.append( + &mut Self::bind_to_extra_ip(&bind_ip_addrs, broadcast_port, 4, socket_config) + .expect("Secondary bind broadcast"), + ); let (_, ancestor_hashes_requests) = bind_in_range_with_config(bind_ip_addr, port_range, socket_config) @@ -199,13 +253,37 @@ impl Node { bind_in_range_with_config(bind_ip_addr, port_range, socket_config) .expect("ancestor_hashes_requests QUIC bind should succeed"); - // These are client sockets, so the port is set to be 0 because it must be ephimeral. - let tpu_vote_forwarding_client = - bind_to_with_config(bind_ip_addr, 0, socket_config).unwrap(); - let tpu_transaction_forwarding_client = - bind_to_with_config(bind_ip_addr, 0, socket_config).unwrap(); - let quic_vote_client = bind_to_with_config(bind_ip_addr, 0, socket_config).unwrap(); - let rpc_sts_client = bind_to_with_config(bind_ip_addr, 0, socket_config).unwrap(); + let (alpenglow_port, alpenglow) = + bind_in_range_with_config(bind_ip_addr, port_range, socket_config) + .expect("Alpenglow port bind should succeed"); + // These are "client" sockets, so they could use ephemeral ports, but we + // force them into the provided port_range to simplify the operations. + + // vote forwarding is only bound to primary interface for now + let (_, tpu_vote_forwarding_client) = + bind_in_range_with_config(bind_ip_addr, port_range, socket_config).unwrap(); + + let (tpu_transaction_forwarding_client_port, tpu_transaction_forwarding_clients) = + bind_in_range_with_config(bind_ip_addr, port_range, socket_config).expect( + "TPU transaction forwarding client bind on interface {bind_ip_addr} should succeed", + ); + let tpu_transaction_forwarding_clients = once(tpu_transaction_forwarding_clients) + .chain( + Self::bind_to_extra_ip( + &bind_ip_addrs, + tpu_transaction_forwarding_client_port, + 1, + socket_config, + ) + .expect("Secondary interface binds for tpu forward clients should succeed"), + ) + .collect(); + + let (_, quic_vote_client) = + bind_in_range_with_config(bind_ip_addr, port_range, socket_config).unwrap(); + + let (_, rpc_sts_client) = + bind_in_range_with_config(bind_ip_addr, port_range, socket_config).unwrap(); let mut info = ContactInfo::new( *pubkey, @@ -213,7 +291,7 @@ impl Node { 0u16, // shred_version ); - info.set_gossip((advertised_ip, gossip_port)).unwrap(); + info.set_gossip((advertised_ip, gossip_ports[0])).unwrap(); info.set_tvu(UDP, (advertised_ip, tvu_port)).unwrap(); info.set_tvu(QUIC, (advertised_ip, tvu_quic_port)).unwrap(); info.set_tpu(public_tpu_addr.unwrap_or_else(|| SocketAddr::new(advertised_ip, tpu_port))) @@ -229,6 +307,7 @@ impl Node { .unwrap(); info.set_serve_repair(UDP, (advertised_ip, serve_repair_port)) .unwrap(); + info.set_alpenglow((advertised_ip, alpenglow_port)).unwrap(); info.set_serve_repair(QUIC, (advertised_ip, serve_repair_quic_port)) .unwrap(); @@ -251,7 +330,8 @@ impl Node { info!("vortexor_receivers is {vortexor_receivers:?}"); trace!("new ContactInfo: {info:?}"); let sockets = Sockets { - gossip: AtomicUdpSocket::new(gossip), + alpenglow: Some(alpenglow), + gossip: gossip_sockets.into_iter().collect(), tvu: tvu_sockets, tvu_quic, tpu: tpu_sockets, @@ -263,7 +343,7 @@ impl Node { retransmit_sockets, serve_repair, serve_repair_quic, - ip_echo: Some(ip_echo), + ip_echo: ip_echo_sockets.into_iter().next(), ancestor_hashes_requests, ancestor_hashes_requests_quic, tpu_quic, @@ -271,11 +351,173 @@ impl Node { tpu_vote_quic, tpu_vote_forwarding_client, quic_vote_client, - tpu_transaction_forwarding_client, + tpu_transaction_forwarding_clients, rpc_sts_client, vortexor_receivers, }; info!("Bound all network sockets as follows: {:#?}", &sockets); - Node { info, sockets } + Node { + info, + sockets, + bind_ip_addrs, + addresses: MultihomingAddresses { + tvu: tvu_addresses, + tpu_vote: tpu_vote_addresses, + tpu_quic: tpu_quic_addresses, + tpu_forwards_quic: tpu_forwards_quic_addresses, + tpu_vote_quic: tpu_vote_quic_addresses, + }, + } + } + + /// Extract unique addresses from bound sockets + fn get_socket_addrs(sockets: &[UdpSocket]) -> Box<[SocketAddr]> { + let mut addresses = Vec::new(); + let mut seen = std::collections::HashSet::new(); + + for socket in sockets { + let addr = socket.local_addr().unwrap(); + if seen.insert(addr) { + addresses.push(addr); + } + } + addresses.into() + } + + /// Binds num sockets to each of the addresses in bind_ip_addrs except primary_ip_addr + fn bind_to_extra_ip( + bind_ip_addrs: &BindIpAddrs, + port: u16, + num: usize, + socket_config: SocketConfig, + ) -> io::Result> { + let active_ip_addr = bind_ip_addrs.active(); + let mut sockets = vec![]; + for ip_addr in bind_ip_addrs + .iter() + .cloned() + .filter(|&ip| ip != active_ip_addr) + { + let socket = bind_to_with_config(ip_addr, port, socket_config)?; + sockets.append(&mut bind_more_with_config(socket, num, socket_config)?); + } + Ok(sockets) + } +} + +#[cfg(feature = "agave-unstable-api")] +mod multihoming { + use { + crate::{ + cluster_info::ClusterInfo, + contact_info::Protocol::{QUIC, UDP}, + node::{MultihomingAddresses, Node}, + }, + solana_net_utils::multihomed_sockets::BindIpAddrs, + std::{ + net::{IpAddr, UdpSocket}, + sync::Arc, + }, + }; + + #[derive(Debug, Clone)] + pub struct NodeMultihoming { + pub gossip_socket: Arc<[UdpSocket]>, + pub addresses: MultihomingAddresses, + pub bind_ip_addrs: Arc, + } + + impl NodeMultihoming { + /// Error handling note for `switch_active_interface(...)` + /// + /// Both self.gossip_socket and self.addresses are guaranteed to have the same length + /// since they hold unique addresses and are bound by the length of self.bind_ip_addrs. + /// + /// `set__socket(...)` can only fail in 4 scenarios: + /// 1. port is 0 (impossible - we can't bind to port 0) + /// 2. ip is multicast (checked at startup) + /// 3. ip is unspecified (checked at startup) + /// 4. > 255 IPs (impossible - bounded by bind_ip_addrs.len()) + pub fn switch_active_interface( + &self, + interface: IpAddr, + cluster_info: &ClusterInfo, + ) -> Result<(), String> { + if self.bind_ip_addrs.active() == interface { + return Err(String::from("Specified interface already selected")); + } + // check the validity of the provided address + let interface_index = self + .bind_ip_addrs + .iter() + .position(|&e| e == interface) + .ok_or_else(|| { + let addrs: &[IpAddr] = &self.bind_ip_addrs; + format!( + "Invalid interface address provided, registered interfaces are {addrs:?}", + ) + })?; + + // update gossip socket + let gossip_addr = self.gossip_socket[interface_index] + .local_addr() + .map_err(|e| e.to_string())?; + // Set the new gossip address in contact-info + cluster_info + .set_gossip_socket(gossip_addr) + .map_err(|e| e.to_string())?; + + // update tvu ingress advertised socket + let tvu_ingress_address = self.addresses.tvu[interface_index]; + cluster_info + .set_tvu_socket(tvu_ingress_address) + .map_err(|e| e.to_string())?; + + // tpu_quic + let tpu_quic_address = self.addresses.tpu_quic[interface_index]; + cluster_info + .set_tpu(tpu_quic_address) + .map_err(|e| e.to_string())?; + + // tpu_forwards_quic + let tpu_forwards_quic_address = self.addresses.tpu_forwards_quic[interface_index]; + cluster_info + .set_tpu_forwards(tpu_forwards_quic_address) + .map_err(|e| e.to_string())?; + + // tpu_vote_quic + let tpu_vote_quic_address = self.addresses.tpu_vote_quic[interface_index]; + cluster_info + .set_tpu_vote(QUIC, tpu_vote_quic_address) + .map_err(|e| e.to_string())?; + + // tpu_vote (udp) + let tpu_vote_address = self.addresses.tpu_vote[interface_index]; + cluster_info + .set_tpu_vote(UDP, tpu_vote_address) + .map_err(|e| e.to_string())?; + + // Update active index for tvu broadcast, tvu retransmit, and tpu forwarding client + // This will never fail since we have checked index validity above + let _new_ip_addr = self + .bind_ip_addrs + .set_active(interface_index) + .expect("Interface index out of range"); + + Ok(()) + } + } + + impl From<&Node> for NodeMultihoming { + fn from(node: &Node) -> Self { + NodeMultihoming { + gossip_socket: node.sockets.gossip.clone(), + addresses: node.addresses.clone(), + bind_ip_addrs: node.bind_ip_addrs.clone(), + } + } } } + +#[cfg(feature = "agave-unstable-api")] +pub use multihoming::*; diff --git a/gossip/src/push_active_set.rs b/gossip/src/push_active_set.rs index b1d1c62fa59a7d..abb86499d5b244 100644 --- a/gossip/src/push_active_set.rs +++ b/gossip/src/push_active_set.rs @@ -1,5 +1,10 @@ use { - crate::weighted_shuffle::WeightedShuffle, + crate::{ + cluster_info::REFRESH_PUSH_ACTIVE_SET_INTERVAL_MS, + stake_weighting_config::{TimeConstant, WeightingConfig, WeightingConfigTyped}, + weighted_shuffle::WeightedShuffle, + }, + agave_low_pass_filter::api as lpf, indexmap::IndexMap, rand::Rng, solana_bloom::bloom::{Bloom, ConcurrentBloom}, @@ -10,12 +15,128 @@ use { const NUM_PUSH_ACTIVE_SET_ENTRIES: usize = 25; +const ALPHA_MIN: u64 = lpf::SCALE.get(); +const ALPHA_MAX: u64 = 2 * lpf::SCALE.get(); +const DEFAULT_ALPHA: u64 = ALPHA_MAX; +// Low pass filter convergence time (ms) +const DEFAULT_TC_MS: u64 = 30_000; + +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum WeightingMode { + // alpha = 2.0 -> Quadratic + Static, + // alpha in [1.0, 2.0], smoothed over time, scaled up by 1,000,000 to avoid floating-point math + Dynamic { + alpha: u64, // current alpha (fixed-point, 1,000,000–2,000,000) + filter_k: u64, // default: 611,015 + tc_ms: u64, // IIR time-constant (ms) + }, +} + +impl From for WeightingMode { + fn from(cfg: WeightingConfigTyped) -> Self { + match cfg { + WeightingConfigTyped::Static => WeightingMode::Static, + WeightingConfigTyped::Dynamic { tc } => { + let tc_ms = match tc { + TimeConstant::Value(ms) => ms, + TimeConstant::Default => DEFAULT_TC_MS, + }; + let filter_k = lpf::compute_k(REFRESH_PUSH_ACTIVE_SET_INTERVAL_MS, tc_ms); + WeightingMode::Dynamic { + alpha: DEFAULT_ALPHA, + filter_k, + tc_ms, + } + } + } + } +} + +#[inline] +fn get_weight(bucket: u64, alpha: u64) -> u64 { + debug_assert!((ALPHA_MIN..=ALPHA_MIN + lpf::SCALE.get()).contains(&alpha)); + let b = bucket + 1; + let b_squared = b.saturating_mul(b); + gossip_interpolate_weight(b, b_squared, alpha) +} + +/// Approximates `base^alpha` rounded to nearest integer using +/// integer-only linear interpolation between `base^1` and `base^2`. +/// +/// Note: This function is most accurate when `base` is small e.g. < ~25. +#[inline] +#[allow(clippy::arithmetic_side_effects)] +fn gossip_interpolate_weight(base: u64, base_squared: u64, alpha: u64) -> u64 { + let scale = lpf::SCALE.get(); + let t = alpha.saturating_sub(ALPHA_MIN); + debug_assert!(t <= scale, "interpolation t={t} > SCALE={scale}"); + // ((base * (scale - t) + base_squared * t) + scale / 2) / scale + ((base.saturating_mul(scale.saturating_sub(t))).saturating_add(base_squared.saturating_mul(t))) + .saturating_add(scale / 2) + / scale +} + // Each entry corresponds to a stake bucket for // min stake of { this node, crds value owner } // The entry represents set of gossip nodes to actively // push to for crds values belonging to the bucket. -#[derive(Default)] -pub(crate) struct PushActiveSet([PushActiveSetEntry; NUM_PUSH_ACTIVE_SET_ENTRIES]); +pub(crate) struct PushActiveSet { + entries: [PushActiveSetEntry; NUM_PUSH_ACTIVE_SET_ENTRIES], + mode: WeightingMode, +} + +impl PushActiveSet { + pub(crate) fn new(mode: WeightingMode) -> Self { + Self { + entries: Default::default(), + mode, + } + } + + pub(crate) fn new_static() -> Self { + Self::new(WeightingMode::Static) + } + + pub(crate) fn apply_cfg(&mut self, cfg: &WeightingConfig) { + let config_type = WeightingConfigTyped::from(cfg); + match (&mut self.mode, config_type) { + (WeightingMode::Static, WeightingConfigTyped::Static) => (), + (current_mode, WeightingConfigTyped::Static) => { + // Dynamic -> Static: Switch mode + info!("Switching mode: {current_mode:?} -> Static"); + self.mode = WeightingMode::Static; + } + ( + WeightingMode::Dynamic { + filter_k, tc_ms, .. + }, + WeightingConfigTyped::Dynamic { tc }, + ) => { + // Dynamic -> Dynamic: Update parameters if needed + let new_tc_ms = match tc { + TimeConstant::Value(ms) => ms, + TimeConstant::Default => DEFAULT_TC_MS, + }; + if *tc_ms != new_tc_ms { + *filter_k = lpf::compute_k(REFRESH_PUSH_ACTIVE_SET_INTERVAL_MS, new_tc_ms); + *tc_ms = new_tc_ms; + info!("Recomputed filter K = {} (tc_ms = {})", *filter_k, *tc_ms); + } + } + (current_mode, WeightingConfigTyped::Dynamic { .. }) => { + info!("Switching mode: {current_mode:?} -> Dynamic"); + self.mode = WeightingMode::from(config_type); + if let WeightingMode::Dynamic { + filter_k, tc_ms, .. + } = self.mode + { + info!("Initialized filter K = {filter_k} (tc_ms = {tc_ms})"); + } + } + } + } +} // Keys are gossip nodes to push messages to. // Values are which origins the node has pruned. @@ -68,7 +189,11 @@ impl PushActiveSet { // Gossip nodes to be sampled for each push active set. nodes: &[Pubkey], stakes: &HashMap, + self_pubkey: &Pubkey, ) { + if nodes.is_empty() { + return; + } let num_bloom_filter_items = cluster_size.max(Self::MIN_NUM_BLOOM_ITEMS); // Active set of nodes to push to are sampled from these gossip nodes, // using sampling probabilities obtained from the stake bucket of each @@ -77,32 +202,76 @@ impl PushActiveSet { .iter() .map(|node| get_stake_bucket(stakes.get(node))) .collect(); - // (k, entry) represents push active set where the stake bucket of - // min stake of {this node, crds value owner} - // is equal to `k`. The `entry` maintains set of gossip nodes to - // actively push to for crds values belonging to this bucket. - for (k, entry) in self.0.iter_mut().enumerate() { - let weights: Vec = buckets - .iter() - .map(|&bucket| { - // bucket <- get_stake_bucket(min stake of { - // this node, crds value owner and gossip peer - // }) - // weight <- (bucket + 1)^2 - // min stake of {...} is a proxy for how much we care about - // the link, and tries to mirror similar logic on the - // receiving end when pruning incoming links: - // https://github.com/solana-labs/solana/blob/81394cf92/gossip/src/received_cache.rs#L100-L105 - let bucket = bucket.min(k) as u64; - bucket.saturating_add(1).saturating_pow(2) - }) - .collect(); - entry.rotate(rng, size, num_bloom_filter_items, nodes, &weights); + + match self.mode { + WeightingMode::Static => { + // alpha = 2.0 → weight = (bucket + 1)^2 + // (k, entry) represents push active set where the stake bucket of + // min stake of {this node, crds value owner} + // is equal to `k`. The `entry` maintains set of gossip nodes to + // actively push to for crds values belonging to this bucket. + for (k, entry) in self.entries.iter_mut().enumerate() { + let weights: Vec = buckets + .iter() + .map(|&bucket| { + // bucket <- get_stake_bucket(min stake of { + // this node, crds value owner and gossip peer + // }) + // weight <- (bucket + 1)^2 + // min stake of {...} is a proxy for how much we care about + // the link, and tries to mirror similar logic on the + // receiving end when pruning incoming links: + // https://github.com/solana-labs/solana/blob/81394cf92/gossip/src/received_cache.rs#L100-L105 + let bucket = bucket.min(k) as u64; + bucket.saturating_add(1).saturating_pow(2) + }) + .collect(); + entry.rotate(rng, size, num_bloom_filter_items, nodes, &weights); + } + } + WeightingMode::Dynamic { + ref mut alpha, + filter_k, + tc_ms: _, + } => { + // Need to take into account this node's stake bucket when calculating fraction of unstaked nodes. + let self_bucket = get_stake_bucket(stakes.get(self_pubkey)); + let num_unstaked = buckets + .iter() + .filter(|&&b| b == 0) + .count() + .saturating_add(if self_bucket == 0 { 1 } else { 0 }); + let total_nodes = nodes.len().saturating_add(1); + + let f_scaled = ((num_unstaked.saturating_mul(lpf::SCALE.get() as usize)) + .saturating_add(total_nodes / 2)) + / total_nodes; + let alpha_target = ALPHA_MIN.saturating_add(f_scaled as u64); + *alpha = lpf::filter_alpha( + *alpha, + alpha_target, + lpf::FilterConfig { + output_range: ALPHA_MIN..ALPHA_MAX, + k: filter_k, + }, + ); + + for (k, entry) in self.entries.iter_mut().enumerate() { + let weights: Vec = buckets + .iter() + .map(|&bucket| { + let bucket = bucket.min(k) as u64; + get_weight(bucket, *alpha) + }) + .collect(); + entry.rotate(rng, size, num_bloom_filter_items, nodes, &weights); + } + } } } fn get_entry(&self, stake: Option<&u64>) -> &PushActiveSetEntry { - &self.0[get_stake_bucket(stake)] + &self.entries[get_stake_bucket(stake)] } } @@ -184,10 +353,42 @@ fn get_stake_bucket(stake: Option<&u64>) -> usize { #[cfg(test)] mod tests { use { - super::*, itertools::iproduct, rand::SeedableRng, rand_chacha::ChaChaRng, + super::*, + crate::stake_weighting_config::{WEIGHTING_MODE_DYNAMIC, WEIGHTING_MODE_STATIC}, + itertools::iproduct, + rand::SeedableRng, + rand_chacha::ChaChaRng, std::iter::repeat_with, }; + const MAX_STAKE: u64 = (1 << 20) * LAMPORTS_PER_SOL; + + fn push_active_set_new_dynamic() -> PushActiveSet { + PushActiveSet::new(WeightingMode::from(WeightingConfigTyped::Dynamic { + tc: TimeConstant::Default, + })) + } + + // Helper to generate a stake map given unstaked count + fn make_stakes( + nodes: &[Pubkey], + num_unstaked: usize, + rng: &mut ChaChaRng, + ) -> HashMap { + nodes + .iter() + .enumerate() + .map(|(i, node)| { + let stake = if i < num_unstaked { + 0 + } else { + rng.gen_range(1..=MAX_STAKE) + }; + (*node, stake) + }) + .collect() + } + #[test] fn test_get_stake_bucket() { assert_eq!(get_stake_bucket(None), 0); @@ -212,21 +413,20 @@ mod tests { } #[test] - fn test_push_active_set() { + fn test_push_active_set_static_weighting() { const CLUSTER_SIZE: usize = 117; - const MAX_STAKE: u64 = (1 << 20) * LAMPORTS_PER_SOL; let mut rng = ChaChaRng::from_seed([189u8; 32]); let pubkey = Pubkey::new_unique(); let nodes: Vec<_> = repeat_with(Pubkey::new_unique).take(20).collect(); let stakes = repeat_with(|| rng.gen_range(1..MAX_STAKE)); let mut stakes: HashMap<_, _> = nodes.iter().copied().zip(stakes).collect(); stakes.insert(pubkey, rng.gen_range(1..MAX_STAKE)); - let mut active_set = PushActiveSet::default(); - assert!(active_set.0.iter().all(|entry| entry.0.is_empty())); - active_set.rotate(&mut rng, 5, CLUSTER_SIZE, &nodes, &stakes); - assert!(active_set.0.iter().all(|entry| entry.0.len() == 5)); + let mut active_set = PushActiveSet::new(WeightingMode::Static); + assert!(active_set.entries.iter().all(|entry| entry.0.is_empty())); + active_set.rotate(&mut rng, 5, CLUSTER_SIZE, &nodes, &stakes, &pubkey); + assert!(active_set.entries.iter().all(|entry| entry.0.len() == 5)); // Assert that for all entries, each filter already prunes the key. - for entry in &active_set.0 { + for entry in &active_set.entries { for (node, filter) in entry.0.iter() { assert!(filter.contains(node)); } @@ -248,8 +448,8 @@ mod tests { assert!(active_set .get_nodes(&pubkey, other, |_| false, &stakes) .eq([13, 18, 16, 0].into_iter().map(|k| &nodes[k]))); - active_set.rotate(&mut rng, 7, CLUSTER_SIZE, &nodes, &stakes); - assert!(active_set.0.iter().all(|entry| entry.0.len() == 7)); + active_set.rotate(&mut rng, 7, CLUSTER_SIZE, &nodes, &stakes, &pubkey); + assert!(active_set.entries.iter().all(|entry| entry.0.len() == 7)); assert!(active_set .get_nodes(&pubkey, origin, |_| false, &stakes) .eq([18, 0, 7, 15, 11].into_iter().map(|k| &nodes[k]))); @@ -268,6 +468,63 @@ mod tests { .eq([16, 7, 11].into_iter().map(|k| &nodes[k]))); } + #[test] + fn test_push_active_set_dynamic_weighting() { + const CLUSTER_SIZE: usize = 117; + let mut rng = ChaChaRng::from_seed([14u8; 32]); + let pubkey = Pubkey::new_unique(); + let nodes: Vec<_> = repeat_with(Pubkey::new_unique).take(20).collect(); + let stakes = repeat_with(|| rng.gen_range(1..MAX_STAKE)); + let mut stakes: HashMap<_, _> = nodes.iter().copied().zip(stakes).collect(); + stakes.insert(pubkey, rng.gen_range(1..MAX_STAKE)); + let mut active_set = push_active_set_new_dynamic(); + assert!(active_set.entries.iter().all(|entry| entry.0.is_empty())); + active_set.rotate(&mut rng, 5, CLUSTER_SIZE, &nodes, &stakes, &pubkey); + assert!(active_set.entries.iter().all(|entry| entry.0.len() == 5)); + // Assert that for all entries, each filter already prunes the key. + for entry in &active_set.entries { + for (node, filter) in entry.0.iter() { + assert!(filter.contains(node)); + } + } + let other = &nodes[6]; + let origin = &nodes[17]; + assert!(active_set + .get_nodes(&pubkey, origin, |_| false, &stakes) + .eq([7, 6, 2, 4, 12].into_iter().map(|k| &nodes[k]))); + assert!(active_set + .get_nodes(&pubkey, other, |_| false, &stakes) + .eq([7, 2, 4, 12].into_iter().map(|k| &nodes[k]))); + + active_set.prune(&pubkey, &nodes[6], &[*origin], &stakes); + active_set.prune(&pubkey, &nodes[11], &[*origin], &stakes); + active_set.prune(&pubkey, &nodes[4], &[*origin], &stakes); + assert!(active_set + .get_nodes(&pubkey, origin, |_| false, &stakes) + .eq([7, 2, 12].into_iter().map(|k| &nodes[k]))); + assert!(active_set + .get_nodes(&pubkey, other, |_| false, &stakes) + .eq([7, 2, 4, 12].into_iter().map(|k| &nodes[k]))); + active_set.rotate(&mut rng, 7, CLUSTER_SIZE, &nodes, &stakes, &pubkey); + assert!(active_set.entries.iter().all(|entry| entry.0.len() == 7)); + assert!(active_set + .get_nodes(&pubkey, origin, |_| false, &stakes) + .eq([2, 12, 15, 14, 16].into_iter().map(|k| &nodes[k]))); + assert!(active_set + .get_nodes(&pubkey, other, |_| false, &stakes) + .eq([2, 4, 12, 15, 14, 16].into_iter().map(|k| &nodes[k]))); + let origins = [*origin, *other]; + active_set.prune(&pubkey, &nodes[2], &origins, &stakes); + active_set.prune(&pubkey, &nodes[12], &origins, &stakes); + active_set.prune(&pubkey, &nodes[14], &origins, &stakes); + assert!(active_set + .get_nodes(&pubkey, origin, |_| false, &stakes) + .eq([15, 16].into_iter().map(|k| &nodes[k]))); + assert!(active_set + .get_nodes(&pubkey, other, |_| false, &stakes) + .eq([4, 15, 16].into_iter().map(|k| &nodes[k]))); + } + #[test] fn test_push_active_set_entry() { const NUM_BLOOM_FILTER_ITEMS: usize = 100; @@ -329,4 +586,476 @@ mod tests { let keys = [&nodes[5], &nodes[7], &nodes[1], &nodes[13]]; assert!(entry.0.keys().eq(keys)); } + + fn alpha_of(pas: &PushActiveSet) -> u64 { + match pas.mode { + WeightingMode::Dynamic { alpha, .. } => alpha, + WeightingMode::Static => panic!("test assumed Dynamic mode but found Static"), + } + } + + #[test] + fn test_alpha_converges_to_expected_target() { + const CLUSTER_SIZE: usize = 415; + const TOLERANCE_MILLI: u64 = lpf::SCALE.get() / 100; // ±1% of alpha + + let mut rng = ChaChaRng::from_seed([77u8; 32]); + let mut nodes: Vec = repeat_with(Pubkey::new_unique).take(CLUSTER_SIZE).collect(); + + // 39% unstaked → alpha_target = 1,000,000 + 39 * 10000 = 1,390,000 + let percent_unstaked = 39; + let num_unstaked = (CLUSTER_SIZE * percent_unstaked + 50) / 100; + let expected_alpha_milli = 1_000_000 + (percent_unstaked as u64 * 10_000); + + let stakes = make_stakes(&nodes, num_unstaked, &mut rng); + let my_pubkey = nodes.pop().unwrap(); + + let mut active_set = push_active_set_new_dynamic(); + + // Simulate repeated calls to `rotate()` (as would happen every 7.5s) + // 8 calls (60s) should be enough to converge to the expected target alpha. + // We converge in about 4 calls (30s). + for _ in 0..8 { + active_set.rotate(&mut rng, 5, CLUSTER_SIZE, &nodes, &stakes, &my_pubkey); + } + + let actual_alpha = alpha_of(&active_set); + assert!( + (actual_alpha as i32 - expected_alpha_milli as i32).abs() <= TOLERANCE_MILLI as i32, + "alpha={actual_alpha} did not converge to expected alpha={expected_alpha_milli}" + ); + + // 93% unstaked → alpha_target = 1,000,000 + 93 * 10000 = 1,930,000 + let percent_unstaked = 93; + let num_unstaked = (CLUSTER_SIZE * percent_unstaked + 50) / 100; + let expected_alpha_milli = 1_000_000 + (percent_unstaked as u64 * 10_000); + + let stakes = make_stakes(&nodes, num_unstaked, &mut rng); + for _ in 0..8 { + active_set.rotate(&mut rng, 5, CLUSTER_SIZE, &nodes, &stakes, &my_pubkey); + } + + let actual_alpha = alpha_of(&active_set); + assert!( + (actual_alpha as i32 - expected_alpha_milli as i32).abs() <= TOLERANCE_MILLI as i32, + "alpha={actual_alpha} did not reconverge to expected alpha={expected_alpha_milli}" + ); + } + + #[test] + fn test_alpha_converges_up_and_down() { + const CLUSTER_SIZE: usize = 415; + const TOLERANCE_MILLI: u64 = lpf::SCALE.get() / 100; // ±1% of alpha + const ROTATE_CALLS: usize = 8; + + let mut rng = ChaChaRng::from_seed([99u8; 32]); + let mut nodes: Vec = repeat_with(Pubkey::new_unique).take(CLUSTER_SIZE).collect(); + + let mut active_set = push_active_set_new_dynamic(); + + // 0% unstaked → alpha_target = 1,000,000 + let num_unstaked = 0; + let expected_alpha_0 = 1_000_000; + let stakes = make_stakes(&nodes, num_unstaked, &mut rng); + let my_pubkey = nodes.pop().unwrap(); + + for _ in 0..ROTATE_CALLS { + active_set.rotate(&mut rng, 5, CLUSTER_SIZE, &nodes, &stakes, &my_pubkey); + } + let alpha = alpha_of(&active_set); + assert!( + (alpha as i32 - expected_alpha_0).abs() <= TOLERANCE_MILLI as i32, + "alpha={alpha} did not converge to alpha_0={expected_alpha_0}" + ); + + // 100% unstaked → alpha_target = 2,000,000 + let num_unstaked = CLUSTER_SIZE; + let expected_alpha_100 = 2_000_000; + let stakes = make_stakes(&nodes, num_unstaked, &mut rng); + for _ in 0..ROTATE_CALLS { + active_set.rotate(&mut rng, 5, CLUSTER_SIZE, &nodes, &stakes, &my_pubkey); + } + let alpha = alpha_of(&active_set); + assert!( + (alpha as i32 - expected_alpha_100).abs() <= TOLERANCE_MILLI as i32, + "alpha={alpha} did not converge to alpha_100={expected_alpha_100}" + ); + + // back to 0% unstaked → alpha_target = 1,000,000 + let num_unstaked = 0; + let stakes = make_stakes(&nodes, num_unstaked, &mut rng); + for _ in 0..ROTATE_CALLS { + active_set.rotate(&mut rng, 5, CLUSTER_SIZE, &nodes, &stakes, &my_pubkey); + } + let alpha = alpha_of(&active_set); + assert!( + (alpha as i32 - expected_alpha_0).abs() <= TOLERANCE_MILLI as i32, + "alpha={alpha} did not reconverge to alpha_0={expected_alpha_0}" + ); + } + + #[test] + fn test_alpha_progression_matches_expected() { + let mut alpha = ALPHA_MAX; + let target_down = ALPHA_MIN; + let target_up = ALPHA_MAX; + let filter_k = lpf::compute_k(REFRESH_PUSH_ACTIVE_SET_INTERVAL_MS, DEFAULT_TC_MS); + + // Expected values from rotating to 1,000,000 from 2,000,000 + let expected_down = [ + 1_388_985, 1_151_309, 1_058_856, 1_022_894, 1_008_905, 1_003_463, 1_001_347, 1_000_523, + ]; + + for (i, expected) in expected_down.iter().enumerate() { + alpha = lpf::filter_alpha( + alpha, + target_down, + lpf::FilterConfig { + output_range: ALPHA_MIN..ALPHA_MAX, + k: filter_k, + }, + ); + assert_eq!( + alpha, *expected as u64, + "step {i}: alpha did not match expected during convergence down" + ); + } + + // Rotate upward from current alpha (1,000,000) to 2,000,000 + let expected_up = [ + 1_611_218, 1_848_769, 1_941_173, 1_977_117, 1_991_098, 1_996_537, 1_998_652, 1_999_475, + ]; + for (i, expected) in expected_up.iter().enumerate() { + alpha = lpf::filter_alpha( + alpha, + target_up, + lpf::FilterConfig { + output_range: ALPHA_MIN..ALPHA_MAX, + k: filter_k, + }, + ); + assert_eq!( + alpha, *expected as u64, + "step {i}: alpha did not match expected during convergence up" + ); + } + + // Rotate downward again from current alpha (1,999,000) to 1,000,000 + let expected_down2 = [ + 1_388_780, 1_151_229, 1_058_825, 1_022_882, 1_008_900, 1_003_461, 1_001_346, 1_000_523, + ]; + for (i, expected) in expected_down2.iter().enumerate() { + alpha = lpf::filter_alpha( + alpha, + target_down, + lpf::FilterConfig { + output_range: ALPHA_MIN..ALPHA_MAX, + k: filter_k, + }, + ); + assert_eq!( + alpha, *expected as u64, + "step {i}: alpha did not match expected during final convergence down" + ); + } + } + + #[test] + fn test_record_size() { + assert_eq!( + bincode::serialized_size(&WeightingConfig::default()).unwrap(), + 58 + ); + } + + #[test] + fn test_apply_cfg_static_to_static() { + // Static -> Static: No change + let mut active_set = PushActiveSet::new(WeightingMode::Static); + assert_eq!(active_set.mode, WeightingMode::Static); + + active_set.apply_cfg(&WeightingConfig::new_for_test(WEIGHTING_MODE_STATIC, 0)); + assert_eq!(active_set.mode, WeightingMode::Static); + } + + #[test] + fn test_apply_cfg_dynamic_to_static() { + // Dynamic -> Static: Mode switch + let mut active_set = push_active_set_new_dynamic(); + assert!(matches!(active_set.mode, WeightingMode::Dynamic { .. })); + + active_set.apply_cfg(&WeightingConfig::new_for_test(WEIGHTING_MODE_STATIC, 0)); + assert_eq!(active_set.mode, WeightingMode::Static); + } + + #[test] + fn test_apply_cfg_static_to_dynamic() { + // Static -> Dynamic: Mode switch + let mut active_set = PushActiveSet::new(WeightingMode::Static); + assert_eq!(active_set.mode, WeightingMode::Static); + + let config = WeightingConfig::new_for_test(WEIGHTING_MODE_DYNAMIC, 0); + active_set.apply_cfg(&config); + + match active_set.mode { + WeightingMode::Dynamic { + alpha, + filter_k, + tc_ms, + } => { + assert_eq!(alpha, DEFAULT_ALPHA); + assert_eq!(tc_ms, DEFAULT_TC_MS); + assert_eq!( + filter_k, + lpf::compute_k(REFRESH_PUSH_ACTIVE_SET_INTERVAL_MS, DEFAULT_TC_MS) + ); + } + WeightingMode::Static => panic!("Expected Dynamic mode after config change"), + } + } + + #[test] + fn test_apply_cfg_dynamic_to_dynamic_same_tc() { + // Dynamic -> Dynamic (same tc): No change + let mut active_set = push_active_set_new_dynamic(); + let original_mode = active_set.mode; + + let config = WeightingConfig::new_for_test(WEIGHTING_MODE_DYNAMIC, 0); + active_set.apply_cfg(&config); + + // Mode should be unchanged since tc is the same + assert_eq!(active_set.mode, original_mode); + } + + #[test] + fn test_apply_cfg_dynamic_to_dynamic_different_tc() { + // Dynamic -> Dynamic (different tc): Update filter parameters + let mut active_set = push_active_set_new_dynamic(); + + // Change to a different tc value + let new_tc_ms = 45_000; + let config = WeightingConfig::new_for_test(WEIGHTING_MODE_DYNAMIC, new_tc_ms); + active_set.apply_cfg(&config); + + match active_set.mode { + WeightingMode::Dynamic { + alpha, + filter_k, + tc_ms, + } => { + assert_eq!(alpha, DEFAULT_ALPHA); + assert_eq!(tc_ms, new_tc_ms); + assert_eq!( + filter_k, + lpf::compute_k(REFRESH_PUSH_ACTIVE_SET_INTERVAL_MS, new_tc_ms) + ); + } + WeightingMode::Static => panic!("Expected Dynamic mode"), + } + } + + #[test] + fn test_apply_cfg_multiple_transitions() { + // Test multiple config changes in sequence + let mut active_set = PushActiveSet::new(WeightingMode::Static); + + // Static -> Dynamic + active_set.apply_cfg(&WeightingConfig::new_for_test( + WEIGHTING_MODE_DYNAMIC, + 20_000, + )); + assert!(matches!( + active_set.mode, + WeightingMode::Dynamic { tc_ms: 20_000, .. } + )); + + // Dynamic -> Dynamic (change tc) + active_set.apply_cfg(&WeightingConfig::new_for_test( + WEIGHTING_MODE_DYNAMIC, + 40_000, + )); + assert!(matches!( + active_set.mode, + WeightingMode::Dynamic { tc_ms: 40_000, .. } + )); + + // Dynamic -> Static + active_set.apply_cfg(&WeightingConfig::new_for_test(WEIGHTING_MODE_STATIC, 0)); + assert_eq!(active_set.mode, WeightingMode::Static); + + // Static -> Dynamic (with default tc) + active_set.apply_cfg(&WeightingConfig::new_for_test(WEIGHTING_MODE_DYNAMIC, 0)); + assert!( + matches!(active_set.mode, WeightingMode::Dynamic { tc_ms, .. } if tc_ms == DEFAULT_TC_MS) + ); + } + + #[test] + fn test_apply_cfg_filter_k_computation() { + // Verify that filter_k is correctly computed for different tc values + let mut active_set = PushActiveSet::new(WeightingMode::Static); + + let test_cases = [10_000, 30_000, 60_000, 120_000]; + + for tc_ms in test_cases { + let config = WeightingConfig::new_for_test(WEIGHTING_MODE_DYNAMIC, tc_ms); + active_set.apply_cfg(&config); + + let expected_filter_k = lpf::compute_k(REFRESH_PUSH_ACTIVE_SET_INTERVAL_MS, tc_ms); + + match active_set.mode { + WeightingMode::Dynamic { + filter_k, + tc_ms: actual_tc_ms, + .. + } => { + assert_eq!(actual_tc_ms, tc_ms); + assert_eq!(filter_k, expected_filter_k); + } + WeightingMode::Static => panic!("Expected Dynamic mode"), + } + } + } + + #[test] + fn test_interpolate_t_zero() { + // When alpha = ALPHA_MIN (t = 0), should return base + assert_eq!(gossip_interpolate_weight(100, 100 * 100, ALPHA_MIN), 100); + assert_eq!(gossip_interpolate_weight(0, 0, ALPHA_MIN), 0); + assert_eq!( + gossip_interpolate_weight(1_000_000, 1_000_000 * 1_000_000, ALPHA_MIN), + 1_000_000 + ); + } + + #[test] + fn test_interpolate_t_max() { + // When alpha = ALPHA_MAX (t = SCALE), should return base^2 + let base = 100; + let result = gossip_interpolate_weight(base, base * base, ALPHA_MAX); + assert_eq!(result, base * base); + + let base2 = 1000; + let result = gossip_interpolate_weight(base2, base2 * base2, ALPHA_MAX); + assert_eq!(result, base2 * base2); + } + + #[test] + fn test_interpolate_values() { + let t_10 = lpf::SCALE.get() / 10; // 10% + let t_50 = lpf::SCALE.get() / 2; // 50% + let t_75 = lpf::SCALE.get() * 3 / 4; // 75% + + let base = 3; + let result = gossip_interpolate_weight(base, base * base, ALPHA_MIN + t_10); + assert_eq!(result, 4); + + let result = gossip_interpolate_weight(base, base * base, ALPHA_MIN + t_50); + assert_eq!(result, 6); + + let result = gossip_interpolate_weight(base, base * base, ALPHA_MIN + t_75); + assert_eq!(result, 8); + + let base = 15; + let result = gossip_interpolate_weight(base, base * base, ALPHA_MIN + t_10); + assert_eq!(result, 36); + + let result = gossip_interpolate_weight(base, base * base, ALPHA_MIN + t_50); + assert_eq!(result, 120); + + let result = gossip_interpolate_weight(base, base * base, ALPHA_MIN + t_75); + assert_eq!(result, 173); + + let base = 24; + let result = gossip_interpolate_weight(base, base * base, ALPHA_MIN + t_10); + assert_eq!(result, 79); + + let result = gossip_interpolate_weight(base, base * base, ALPHA_MIN + t_50); + assert_eq!(result, 300); + + let result = gossip_interpolate_weight(base, base * base, ALPHA_MIN + t_75); + assert_eq!(result, 438); + } + + #[test] + fn test_interpolate_large_base() { + let base = 1_000_000_000u64; + let result = gossip_interpolate_weight(base, base * base, ALPHA_MIN + lpf::SCALE.get() / 2); + assert!(result >= base); + assert!(result < base * base); + } + + #[test] + fn test_interpolate_edge_cases() { + // Test with base = 1 + assert_eq!(gossip_interpolate_weight(1, 1, ALPHA_MIN), 1); + assert_eq!(gossip_interpolate_weight(1, 1, ALPHA_MAX), 1); + assert_eq!( + gossip_interpolate_weight(1, 1, ALPHA_MIN + (lpf::SCALE.get() / 2)), + 1 + ); + + // Test with base = 0 + assert_eq!(gossip_interpolate_weight(0, 0, ALPHA_MIN), 0); + assert_eq!(gossip_interpolate_weight(0, 0, ALPHA_MAX), 0); + assert_eq!( + gossip_interpolate_weight(0, 0, ALPHA_MIN + (lpf::SCALE.get() / 2)), + 0 + ); + } + + #[test] + fn test_interpolate_rounding() { + let base = 3; + let t = lpf::SCALE.get() / 3; + let result = gossip_interpolate_weight(base, base * base, ALPHA_MIN + t); + + assert!(result >= 3); + assert!(result <= 9); + } + + #[test] + fn test_integration_filter_and_interpolate() { + // Test using filtered alpha with interpolate + // Alpha range is [SCALE, 2*SCALE] as used in push_active_set + let alpha_min = lpf::SCALE.get(); + let alpha_max = 2 * lpf::SCALE.get(); + + let config = lpf::FilterConfig { + output_range: alpha_min..alpha_max, + k: lpf::SCALE.get() / 10, // 10% + }; + + let prev_alpha = alpha_min + lpf::SCALE.get() / 4; // 1.25 * SCALE + let target_alpha = alpha_min + lpf::SCALE.get() / 2; // 1.5 * SCALE + let filtered_alpha = lpf::filter_alpha(prev_alpha, target_alpha, config); + + let base = 2; + let result = gossip_interpolate_weight(base, base * base, filtered_alpha); + + assert!(result >= base); + assert!(result <= base * base); + + assert!(filtered_alpha >= alpha_min); + assert!(filtered_alpha <= alpha_max); + } + + #[test] + fn test_get_weight_specific_values() { + // Test get_weight with specific bucket=15 and alpha=1118676 + let bucket = 15; + let alpha = 1118676; + + // Verify alpha is in the valid range + assert!(alpha >= ALPHA_MIN); + assert!(alpha <= ALPHA_MAX); + + let result = get_weight(bucket, alpha); + + // Expected calculation: + // b = bucket + 1 = 16 + // t = alpha - ALPHA_MIN = 1118676 - 1000000 = 118676 + // interpolate(16, 118676) should return 44 + assert_eq!(result, 44); + } } diff --git a/gossip/src/stake_weighting_config.rs b/gossip/src/stake_weighting_config.rs new file mode 100644 index 00000000000000..e72d7269936029 --- /dev/null +++ b/gossip/src/stake_weighting_config.rs @@ -0,0 +1,80 @@ +use { + serde::{Deserialize, Serialize}, + solana_account::ReadableAccount, + solana_runtime::bank::Bank, +}; + +#[derive(Debug, Clone, Copy, PartialEq)] +pub(crate) enum TimeConstant { + /// IIR time-constant (ms) + Value(u64), + /// Use the default time constant. + Default, +} + +/// Actual on-chain state that controls the weighting of gossip nodes +#[derive(Serialize, Deserialize, Debug, Default)] +#[repr(C)] +pub(crate) struct WeightingConfig { + _version: u8, // This is part of Record program header + _authority: [u8; 32], // This is part of Record program header + pub weighting_mode: u8, // 0 = Static, 1 = Dynamic + pub tc_ms: u64, // IIR time constant in milliseconds + _future_use: [u8; 16], // Reserved for future use +} + +pub const WEIGHTING_MODE_STATIC: u8 = 0; +pub const WEIGHTING_MODE_DYNAMIC: u8 = 1; + +#[derive(Debug, Clone, Copy, PartialEq)] +pub(crate) enum WeightingConfigTyped { + Static, + Dynamic { tc: TimeConstant }, +} + +impl From<&WeightingConfig> for WeightingConfigTyped { + fn from(raw: &WeightingConfig) -> Self { + match raw.weighting_mode { + WEIGHTING_MODE_STATIC => WeightingConfigTyped::Static, + WEIGHTING_MODE_DYNAMIC => { + let tc = if raw.tc_ms == 0 { + TimeConstant::Default + } else { + TimeConstant::Value(raw.tc_ms) + }; + WeightingConfigTyped::Dynamic { tc } + } + _ => WeightingConfigTyped::Static, + } + } +} + +impl WeightingConfig { + #[cfg(test)] + pub(crate) fn new_for_test(weighting_mode: u8, tc_ms: u64) -> Self { + Self { + _version: 0, + _authority: [0; 32], + weighting_mode, + tc_ms, + _future_use: [0; 16], + } + } +} + +mod weighting_config_control_pubkey { + solana_pubkey::declare_id!("goSwVUizoqNYKEaaiTjkgdN2RgLpvsTvFt1MEVGibY9"); +} + +pub(crate) fn get_gossip_config_from_account(bank: &Bank) -> Option { + let data = bank + .accounts() + .accounts_db + .load_account_with( + &bank.ancestors, + &weighting_config_control_pubkey::id(), + true, + )? + .0; + bincode::deserialize::(data.data()).ok() +} diff --git a/gossip/tests/crds_gossip.rs b/gossip/tests/crds_gossip.rs index c105fee066e15f..8ada6c706b0b21 100644 --- a/gossip/tests/crds_gossip.rs +++ b/gossip/tests/crds_gossip.rs @@ -296,6 +296,7 @@ fn network_simulator(thread_pool: &ThreadPool, network: &mut Network, max_conver &node.ping_cache, &mut Vec::new(), // pings &SocketAddrSpace::Unspecified, + None, ); }); let mut total_bytes = bytes_tx; @@ -468,6 +469,7 @@ fn network_run_push( &node.ping_cache, &mut Vec::new(), // pings &SocketAddrSpace::Unspecified, + None, ); }); } @@ -796,6 +798,7 @@ fn test_prune_errors() { &ping_cache, &mut Vec::new(), // pings &SocketAddrSpace::Unspecified, + None, ); let now = timestamp(); let stakes = HashMap::::default(); diff --git a/install/Cargo.toml b/install/Cargo.toml index 70d8b3fa5ec94b..a22fef8b61c7f8 100644 --- a/install/Cargo.toml +++ b/install/Cargo.toml @@ -32,18 +32,17 @@ serde_derive = { workspace = true } serde_yaml = { workspace = true } serde_yaml_08 = { package = "serde_yaml", version = "0.8.26" } solana-clap-utils = { workspace = true } -solana-config-interface = "=1.0.0" -solana-config-program-client = { version = "=1.1.0", features = ["serde"] } -solana-hash = "=2.3.0" -solana-keypair = "=2.2.1" -solana-logger = "=2.3.1" -solana-message = "=2.4.0" -solana-pubkey = { version = "=2.4.0", default-features = false } +solana-config-interface = { version = "=2.0.0", features = ["bincode"] } +solana-hash = "=3.0.0" +solana-keypair = "=3.0.1" +solana-logger = "=3.0.0" +solana-message = "=3.0.1" +solana-pubkey = { version = "=3.0.0", default-features = false } solana-rpc-client = { workspace = true } solana-sha256-hasher = { workspace = true } -solana-signature = { version = "=2.3.0", default-features = false } -solana-signer = "=2.2.1" -solana-transaction = "=2.2.3" +solana-signature = { version = "=3.1.0", default-features = false } +solana-signer = "=3.0.0" +solana-transaction = "=3.0.1" solana-version = { workspace = true } tar = { workspace = true } tempfile = { workspace = true } diff --git a/install/src/command.rs b/install/src/command.rs index 32005da6215f8b..57f3b518395783 100644 --- a/install/src/command.rs +++ b/install/src/command.rs @@ -9,8 +9,10 @@ use { crossbeam_channel::unbounded, indicatif::{ProgressBar, ProgressStyle}, serde_derive::{Deserialize, Serialize}, - solana_config_interface::instruction::{self as config_instruction}, - solana_config_program_client::get_config_data, + solana_config_interface::{ + instruction::{self as config_instruction}, + state::get_config_data, + }, solana_hash::Hash, solana_keypair::{read_keypair_file, signable::Signable, Keypair}, solana_message::Message, @@ -307,7 +309,8 @@ fn check_env_path_for_bin_dir(config: &Config) { if !found { println!( - "\nPlease update your PATH environment variable to include the solana programs:\n PATH=\"{}:$PATH\"\n", + "\nPlease update your PATH environment variable to include the solana programs:\n \ + PATH=\"{}:$PATH\"\n", config.active_release_bin_dir().to_str().unwrap() ); } @@ -317,7 +320,7 @@ fn check_env_path_for_bin_dir(config: &Config) { #[cfg(windows)] pub fn string_to_winreg_bytes(s: &str) -> Vec { use std::{ffi::OsString, os::windows::ffi::OsStrExt}; - let v: Vec<_> = OsString::from(format!("{}\x00", s)).encode_wide().collect(); + let v: Vec<_> = OsString::from(format!("{s}\x00")).encode_wide().collect(); unsafe { std::slice::from_raw_parts(v.as_ptr() as *const u8, v.len() * 2).to_vec() } } @@ -359,7 +362,7 @@ fn get_windows_path_var() -> Result, String> { let root = RegKey::predef(HKEY_CURRENT_USER); let environment = root .open_subkey_with_flags("Environment", KEY_READ | KEY_WRITE) - .map_err(|err| format!("Unable to open HKEY_CURRENT_USER\\Environment: {}", err))?; + .map_err(|err| format!("Unable to open HKEY_CURRENT_USER\\Environment: {err}"))?; let reg_value = environment.get_raw_value("PATH"); match reg_value { @@ -367,7 +370,10 @@ fn get_windows_path_var() -> Result, String> { if let Some(s) = string_from_winreg_value(&val) { Ok(Some(s)) } else { - println!("the registry key HKEY_CURRENT_USER\\Environment\\PATH does not contain valid Unicode. Not modifying the PATH variable"); + println!( + "the registry key HKEY_CURRENT_USER\\Environment\\PATH does not contain valid \ + Unicode. Not modifying the PATH variable" + ); Ok(None) } } @@ -393,7 +399,7 @@ fn add_to_path(new_path: &str) -> bool { }; let Some(old_path) = - get_windows_path_var().unwrap_or_else(|err| panic!("Unable to get PATH: {}", err)) + get_windows_path_var().unwrap_or_else(|err| panic!("Unable to get PATH: {err}")) else { return false; }; @@ -408,7 +414,7 @@ fn add_to_path(new_path: &str) -> bool { let root = RegKey::predef(HKEY_CURRENT_USER); let environment = root .open_subkey_with_flags("Environment", KEY_READ | KEY_WRITE) - .unwrap_or_else(|err| panic!("Unable to open HKEY_CURRENT_USER\\Environment: {}", err)); + .unwrap_or_else(|err| panic!("Unable to open HKEY_CURRENT_USER\\Environment: {err}")); let reg_value = RegValue { bytes: string_to_winreg_bytes(&new_path), @@ -417,9 +423,7 @@ fn add_to_path(new_path: &str) -> bool { environment .set_raw_value("PATH", ®_value) - .unwrap_or_else(|err| { - panic!("Unable set HKEY_CURRENT_USER\\Environment\\PATH: {}", err) - }); + .unwrap_or_else(|err| panic!("Unable set HKEY_CURRENT_USER\\Environment\\PATH: {err}")); // Tell other processes to update their environment unsafe { @@ -437,9 +441,14 @@ fn add_to_path(new_path: &str) -> bool { println!( "\n{}\n {}\n\n{}", - style("The HKEY_CURRENT_USER/Environment/PATH registry key has been modified to include:").bold(), + style("The HKEY_CURRENT_USER/Environment/PATH registry key has been modified to include:") + .bold(), new_path, - style("Future applications will automatically have the correct environment, but you may need to restart your current shell.").bold() + style( + "Future applications will automatically have the correct environment, but you may \ + need to restart your current shell." + ) + .bold() ); true } @@ -521,9 +530,14 @@ fn add_to_path(new_path: &str) -> bool { if modified_rcfiles { println!( "\n{}\n {}\n", - style("Close and reopen your terminal to apply the PATH changes or run the following in your existing shell:").bold().blue(), + style( + "Close and reopen your terminal to apply the PATH changes or run the following in \ + your existing shell:" + ) + .bold() + .blue(), shell_export_string - ); + ); } modified_rcfiles @@ -1003,8 +1017,9 @@ pub fn init_or_update(config_file: &str, is_init: bool, check_only: bool) -> Res == active_release_version.channel { println!( - "Install is up to date. {release_semver} is the latest compatible release" - ); + "Install is up to date. {release_semver} is the \ + latest compatible release" + ); return Ok(false); } } @@ -1297,8 +1312,8 @@ pub fn list(config_file: &str) -> Result<(), String> { let entries = fs::read_dir(&config.releases_dir).map_err(|err| { format!( - "Failed to read install directory, \ - double check that your configuration file is correct: {err}" + "Failed to read install directory, double check that your configuration file is \ + correct: {err}" ) })?; diff --git a/io-uring/src/lib.rs b/io-uring/src/lib.rs index ceeae47c6406b5..d6edd6dd57c410 100644 --- a/io-uring/src/lib.rs +++ b/io-uring/src/lib.rs @@ -28,7 +28,7 @@ pub fn io_uring_supported() -> bool { true } Err(e) => { - log::info!("io_uring NOT supported: {}", e); + log::info!("io_uring NOT supported: {e}"); false } }; diff --git a/keygen/Cargo.toml b/keygen/Cargo.toml index 540f857dc074ba..93eaebc74efc2f 100644 --- a/keygen/Cargo.toml +++ b/keygen/Cargo.toml @@ -17,21 +17,21 @@ name = "solana-keygen" path = "src/keygen.rs" [dependencies] -bs58 = { workspace = true } +bs58 = { workspace = true, features = ["std"] } clap = { version = "3.1.5", features = ["cargo"] } dirs-next = { workspace = true } num_cpus = { workspace = true } serde_json = { workspace = true } solana-clap-v3-utils = { workspace = true } solana-cli-config = { workspace = true } -solana-derivation-path = "=2.2.1" -solana-instruction = { version = "=2.3.0", features = ["bincode"] } -solana-keypair = "=2.2.1" -solana-message = { version = "=2.4.0", features = ["bincode"] } -solana-pubkey = { version = "=2.4.0", default-features = false } +solana-derivation-path = "=3.0.0" +solana-instruction = { version = "=3.0.0", features = ["bincode"] } +solana-keypair = "=3.0.1" +solana-message = { version = "=3.0.1", features = ["bincode"] } +solana-pubkey = { version = "=3.0.0", default-features = false } solana-remote-wallet = { workspace = true, features = ["default"] } -solana-seed-derivable = "=2.2.1" -solana-signer = "=2.2.1" +solana-seed-derivable = "=3.0.0" +solana-signer = "=3.0.0" solana-version = { workspace = true } tiny-bip39 = { workspace = true } diff --git a/keygen/src/keygen.rs b/keygen/src/keygen.rs index 1450a13f44d863..31e6d99d0e6553 100644 --- a/keygen/src/keygen.rs +++ b/keygen/src/keygen.rs @@ -83,7 +83,7 @@ fn grind_parser(grind_type: GrindType) -> ValueParser { }; if v.matches(':').count() != required_div_count || (v.starts_with(':') || v.ends_with(':')) { - return Err(format!("Expected : between {} and COUNT", prefix_suffix)); + return Err(format!("Expected : between {prefix_suffix} and COUNT")); } // `args` is guaranteed to have length at least 1 by the previous if statement let mut args: Vec<&str> = v.split(':').collect(); @@ -248,15 +248,16 @@ fn app<'a>(num_threads: &'a str, crate_version: &'a str) -> Command<'a> { .index(2) .value_name("KEYPAIR") .takes_value(true) - .value_parser( - SignerSourceParserBuilder::default().allow_all().build() - ) + .value_parser(SignerSourceParserBuilder::default().allow_all().build()) .help("Filepath or URL to a keypair"), - ) + ), ) .subcommand( Command::new("new") - .about("Generate new keypair file from a random seed phrase and optional BIP39 passphrase") + .about( + "Generate new keypair file from a random seed phrase and optional BIP39 \ + passphrase", + ) .disable_version_flag(true) .arg( Arg::new("outfile") @@ -272,19 +273,13 @@ fn app<'a>(num_threads: &'a str, crate_version: &'a str) -> Command<'a> { .long("force") .help("Overwrite the output file if it exists"), ) - .arg( - Arg::new("silent") - .short('s') - .long("silent") - .help("Do not display seed phrase. Useful when piping output to other programs that prompt for user input, like gpg"), - ) - .arg( - derivation_path_arg() - ) + .arg(Arg::new("silent").short('s').long("silent").help( + "Do not display seed phrase. Useful when piping output to other programs that \ + prompt for user input, like gpg", + )) + .arg(derivation_path_arg()) .key_generation_common_args() - .arg(no_outfile_arg() - .conflicts_with_all(&["outfile", "silent"]) - ) + .arg(no_outfile_arg().conflicts_with_all(&["outfile", "silent"])), ) .subcommand( Command::new("grind") @@ -304,7 +299,11 @@ fn app<'a>(num_threads: &'a str, crate_version: &'a str) -> Command<'a> { .action(ArgAction::Append) .multiple_values(true) .value_parser(grind_parser(GrindType::Starts)) - .help("Saves specified number of keypairs whos public key starts with the indicated prefix\nExample: --starts-with sol:4\nPREFIX type is Base58\nCOUNT type is u64"), + .help( + "Saves specified number of keypairs whos public key starts with the \ + indicated prefix\nExample: --starts-with sol:4\nPREFIX type is \ + Base58\nCOUNT type is u64", + ), ) .arg( Arg::new("ends_with") @@ -315,7 +314,11 @@ fn app<'a>(num_threads: &'a str, crate_version: &'a str) -> Command<'a> { .action(ArgAction::Append) .multiple_values(true) .value_parser(grind_parser(GrindType::Ends)) - .help("Saves specified number of keypairs whos public key ends with the indicated suffix\nExample: --ends-with ana:4\nSUFFIX type is Base58\nCOUNT type is u64"), + .help( + "Saves specified number of keypairs whos public key ends with the \ + indicated suffix\nExample: --ends-with ana:4\nSUFFIX type is \ + Base58\nCOUNT type is u64", + ), ) .arg( Arg::new("starts_and_ends_with") @@ -326,7 +329,12 @@ fn app<'a>(num_threads: &'a str, crate_version: &'a str) -> Command<'a> { .action(ArgAction::Append) .multiple_values(true) .value_parser(grind_parser(GrindType::StartsAndEnds)) - .help("Saves specified number of keypairs whos public key starts and ends with the indicated prefix and suffix\nExample: --starts-and-ends-with sol:ana:4\nPREFIX and SUFFIX type is Base58\nCOUNT type is u64"), + .help( + "Saves specified number of keypairs whos public key starts and ends \ + with the indicated prefix and suffix\nExample: \ + --starts-and-ends-with sol:ana:4\nPREFIX and SUFFIX type is \ + Base58\nCOUNT type is u64", + ), ) .arg( Arg::new("num_threads") @@ -337,22 +345,18 @@ fn app<'a>(num_threads: &'a str, crate_version: &'a str) -> Command<'a> { .default_value(num_threads) .help("Specify the number of grind threads"), ) - .arg( - Arg::new("use_mnemonic") - .long("use-mnemonic") - .help("Generate using a mnemonic key phrase. Expect a significant slowdown in this mode"), - ) - .arg( - derivation_path_arg() - .requires("use_mnemonic") - ) + .arg(Arg::new("use_mnemonic").long("use-mnemonic").help( + "Generate using a mnemonic key phrase. Expect a significant slowdown in this \ + mode", + )) + .arg(derivation_path_arg().requires("use_mnemonic")) .key_generation_common_args() .arg( no_outfile_arg() - // Require a seed phrase to avoid generating a keypair - // but having no way to get the private key - .requires("use_mnemonic") - ) + // Require a seed phrase to avoid generating a keypair + // but having no way to get the private key + .requires("use_mnemonic"), + ), ) .subcommand( Command::new("pubkey") @@ -363,9 +367,7 @@ fn app<'a>(num_threads: &'a str, crate_version: &'a str) -> Command<'a> { .index(1) .value_name("KEYPAIR") .takes_value(true) - .value_parser( - SignerSourceParserBuilder::default().allow_all().build() - ) + .value_parser(SignerSourceParserBuilder::default().allow_all().build()) .help("Filepath or URL to a keypair"), ) .arg( @@ -386,7 +388,7 @@ fn app<'a>(num_threads: &'a str, crate_version: &'a str) -> Command<'a> { .short('f') .long("force") .help("Overwrite the output file if it exists"), - ) + ), ) .subcommand( Command::new("recover") @@ -397,7 +399,12 @@ fn app<'a>(num_threads: &'a str, crate_version: &'a str) -> Command<'a> { .index(1) .value_name("KEYPAIR") .takes_value(true) - .value_parser(SignerSourceParserBuilder::default().allow_prompt().allow_legacy().build()) + .value_parser( + SignerSourceParserBuilder::default() + .allow_prompt() + .allow_legacy() + .build(), + ) .help("`prompt:` URI scheme or `ASK` keyword"), ) .arg( @@ -419,7 +426,6 @@ fn app<'a>(num_threads: &'a str, crate_version: &'a str) -> Command<'a> { .long(SKIP_SEED_PHRASE_VALIDATION_ARG.long) .help(SKIP_SEED_PHRASE_VALIDATION_ARG.help), ), - ) } @@ -517,8 +523,14 @@ fn do_main(matches: &ArgMatches) -> Result<(), Box> { let phrase: &str = mnemonic.phrase(); let divider = String::from_utf8(vec![b'='; phrase.len()]).unwrap(); println!( - "{}\npubkey: {}\n{}\nSave this seed phrase{} to recover your new keypair:\n{}\n{}", - ÷r, keypair.pubkey(), ÷r, passphrase_message, phrase, ÷r + "{}\npubkey: {}\n{}\nSave this seed phrase{} to recover your new \ + keypair:\n{}\n{}", + ÷r, + keypair.pubkey(), + ÷r, + passphrase_message, + phrase, + ÷r ); } } @@ -600,7 +612,9 @@ fn do_main(matches: &ArgMatches) -> Result<(), Box> { && starts_and_ends_with_args.is_empty() { return Err( - "Error: No keypair search criteria provided (--starts-with or --ends-with or --starts-and-ends-with)".into() + "Error: No keypair search criteria provided (--starts-with or --ends-with or \ + --starts-and-ends-with)" + .into(), ); } @@ -681,15 +695,21 @@ fn do_main(matches: &ArgMatches) -> Result<(), Box> { let mnemonic = Mnemonic::new(mnemonic_type, language); let seed = Seed::new(&mnemonic, &passphrase); let keypair = match derivation_path { - Some(_) => keypair_from_seed_and_derivation_path(seed.as_bytes(), derivation_path.clone()), + Some(_) => keypair_from_seed_and_derivation_path( + seed.as_bytes(), + derivation_path.clone(), + ), None => keypair_from_seed(seed.as_bytes()), - }.unwrap(); + } + .unwrap(); (keypair, mnemonic.phrase().to_string()) } else { (Keypair::new(), "".to_string()) }; // Skip keypairs that will never match the user specified prefix - if skip_len_44_pubkeys && keypair.pubkey() >= smallest_length_44_public_key::PUBKEY { + if skip_len_44_pubkeys + && keypair.pubkey() >= smallest_length_44_public_key::PUBKEY + { continue; } let mut pubkey = bs58::encode(keypair.pubkey()).into_string(); @@ -718,7 +738,10 @@ fn do_main(matches: &ArgMatches) -> Result<(), Box> { .count .fetch_sub(1, Ordering::Relaxed); if !no_outfile { - write_keypair_file(&keypair, format!("{}.json", keypair.pubkey())) + write_keypair_file( + &keypair, + format!("{}.json", keypair.pubkey()), + ) .unwrap(); println!( "Wrote keypair to {}", @@ -726,12 +749,16 @@ fn do_main(matches: &ArgMatches) -> Result<(), Box> { ); } if use_mnemonic { - let divider = String::from_utf8(vec![b'='; phrase.len()]).unwrap(); + let divider = + String::from_utf8(vec![b'='; phrase.len()]).unwrap(); println!( "{}\nFound matching key {}", - ÷r, keypair.pubkey()); + ÷r, + keypair.pubkey() + ); println!( - "\nSave this seed phrase{} to recover your new keypair:\n{}\n{}", + "\nSave this seed phrase{} to recover your new \ + keypair:\n{}\n{}", passphrase_message, phrase, ÷r ); } diff --git a/ledger-tool/Cargo.toml b/ledger-tool/Cargo.toml index 6dc925e9956b53..58dbabce561ceb 100644 --- a/ledger-tool/Cargo.toml +++ b/ledger-tool/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "agave-ledger-tool" -description = "Blockchain, Rebuilt for Scale" documentation = "https://docs.rs/agave-ledger-tool" version = { workspace = true } authors = { workspace = true } +description = { workspace = true } repository = { workspace = true } homepage = { workspace = true } license = { workspace = true } @@ -44,6 +44,7 @@ solana-bpf-loader-program = { workspace = true } solana-clap-utils = { workspace = true } solana-cli-output = { workspace = true } solana-clock = { workspace = true } +solana-cluster-type = { workspace = true } solana-compute-budget = { workspace = true } solana-core = { workspace = true, features = ["dev-context-only-utils"] } solana-cost-model = { workspace = true } @@ -56,10 +57,9 @@ solana-hash = { workspace = true } solana-inflation = { workspace = true } solana-instruction = { workspace = true } solana-keypair = { workspace = true } -solana-ledger = { workspace = true, features = ["dev-context-only-utils"] } +solana-ledger = { workspace = true, features = ["dev-context-only-utils", "agave-unstable-api"] } solana-loader-v3-interface = { workspace = true } -solana-log-collector = { workspace = true } -solana-logger = "=2.3.1" +solana-logger = "=3.0.0" solana-measure = { workspace = true } solana-message = { workspace = true } solana-native-token = { workspace = true } @@ -69,7 +69,7 @@ solana-rent = { workspace = true } solana-rpc = { workspace = true, features = ["dev-context-only-utils"] } solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } solana-runtime-transaction = { workspace = true } -solana-sbpf = { workspace = true, features = ["debugger"] } +solana-sbpf = { workspace = true, features = ["debugger", "jit"] } solana-sdk-ids = { workspace = true } solana-shred-version = { workspace = true } solana-signature = { workspace = true } @@ -79,11 +79,12 @@ solana-storage-bigtable = { workspace = true } solana-streamer = { workspace = true } solana-svm-callback = { workspace = true } solana-svm-feature-set = { workspace = true } +solana-svm-log-collector = { workspace = true } +solana-svm-type-overrides = { workspace = true } solana-system-interface = { workspace = true } solana-transaction = { workspace = true } solana-transaction-context = { workspace = true } solana-transaction-status = { workspace = true } -solana-type-overrides = { workspace = true } solana-unified-scheduler-pool = { workspace = true } solana-version = { workspace = true } solana-vote = { workspace = true } diff --git a/ledger-tool/src/args.rs b/ledger-tool/src/args.rs index ca121d88fd4aaa..060dba6557f73d 100644 --- a/ledger-tool/src/args.rs +++ b/ledger-tool/src/args.rs @@ -3,14 +3,14 @@ use { clap::{value_t, value_t_or_exit, values_t, values_t_or_exit, Arg, ArgMatches}, solana_account_decoder::{UiAccountEncoding, UiDataSliceConfig}, solana_accounts_db::{ - accounts_db::AccountsDbConfig, + accounts_db::{AccountsDbConfig, DEFAULT_MEMLOCK_BUDGET_SIZE}, accounts_file::StorageAccess, accounts_index::{AccountsIndexConfig, IndexLimitMb, ScanFilter}, }, solana_clap_utils::{ hidden_unless_forced, input_parsers::pubkeys_of, - input_validators::{is_parsable, is_pow2, is_within_range}, + input_validators::{is_parsable, is_pow2}, }, solana_cli_output::CliAccountNewConfig, solana_clock::Slot, @@ -21,7 +21,6 @@ use { solana_runtime::runtime_config::RuntimeConfig, std::{ collections::HashSet, - num::NonZeroUsize, path::{Path, PathBuf}, sync::Arc, }, @@ -35,8 +34,8 @@ pub fn accounts_db_args<'a, 'b>() -> Box<[Arg<'a, 'b>]> { .value_name("PATHS") .takes_value(true) .help( - "Persistent accounts location. May be specified multiple times. \ - [default: /accounts]", + "Persistent accounts location. May be specified multiple times. [default: \ + /accounts]", ), Arg::with_name("accounts_index_path") .long("accounts-index-path") @@ -44,8 +43,8 @@ pub fn accounts_db_args<'a, 'b>() -> Box<[Arg<'a, 'b>]> { .takes_value(true) .multiple(true) .help( - "Persistent accounts-index location. May be specified multiple times. \ - [default: /accounts_index]", + "Persistent accounts-index location. May be specified multiple times. [default: \ + /accounts_index]", ), Arg::with_name("accounts_index_bins") .long("accounts-index-bins") @@ -107,13 +106,6 @@ pub fn accounts_db_args<'a, 'b>() -> Box<[Arg<'a, 'b>]> { .takes_value(true) .possible_values(&["mmap", "file"]) .help("Access account storages using this method"), - Arg::with_name("accounts_db_hash_threads") - .long("accounts-db-hash-threads") - .value_name("NUM_THREADS") - .takes_value(true) - .validator(|s| is_within_range(s, 1..=num_cpus::get())) - .help("Number of threads to use for background accounts hashing") - .hidden(hidden_unless_forced()), Arg::with_name("accounts_db_ancient_storage_ideal_size") .long("accounts-db-ancient-storage-ideal-size") .value_name("BYTES") @@ -157,18 +149,23 @@ pub fn snapshot_args<'a, 'b>() -> Box<[Arg<'a, 'b>]> { .help("Do not start from a local snapshot if present"), Arg::with_name("snapshots") .long("snapshots") - .alias("snapshot-archive-path") - .alias("full-snapshot-archive-path") .value_name("DIR") .takes_value(true) .global(true) .help("Use DIR for snapshot location [default: --ledger value]"), + Arg::with_name("full_snapshot_archive_path") + .long("full-snapshot-archive-path") + .alias("snapshot-archive-path") + .value_name("DIR") + .takes_value(true) + .global(true) + .help("Use DIR as full snapshot archives location [default: --snapshots value]"), Arg::with_name("incremental_snapshot_archive_path") .long("incremental-snapshot-archive-path") .value_name("DIR") .takes_value(true) .global(true) - .help("Use DIR for separate incremental snapshot location"), + .help("Use DIR as incremental snapshot archives location [default: --snapshots value]"), Arg::with_name(use_snapshot_archives_at_startup::cli::NAME) .long(use_snapshot_archives_at_startup::cli::LONG_ARG) .takes_value(true) @@ -185,7 +182,7 @@ pub fn snapshot_args<'a, 'b>() -> Box<[Arg<'a, 'b>]> { /// use this function may not support all flags. pub fn parse_process_options(ledger_path: &Path, arg_matches: &ArgMatches<'_>) -> ProcessOptions { let new_hard_forks = hardforks_of(arg_matches, "hard_forks"); - let accounts_db_config = Some(get_accounts_db_config(ledger_path, arg_matches)); + let accounts_db_config = get_accounts_db_config(ledger_path, arg_matches); let log_messages_bytes_limit = value_t!(arg_matches, "log_messages_bytes_limit", usize).ok(); let runtime_config = RuntimeConfig { log_messages_bytes_limit, @@ -285,10 +282,6 @@ pub fn get_accounts_db_config( }) .unwrap_or_default(); - let num_hash_threads = arg_matches - .is_present("accounts_db_hash_threads") - .then(|| value_t_or_exit!(arg_matches, "accounts_db_hash_threads", NonZeroUsize)); - AccountsDbConfig { index: Some(accounts_index_config), base_working_path: Some(ledger_tool_ledger_path), @@ -305,7 +298,7 @@ pub fn get_accounts_db_config( skip_initial_hash_calc: arg_matches.is_present("accounts_db_skip_initial_hash_calculation"), storage_access, scan_filter_for_shrinking, - num_hash_threads, + memlock_budget_size: DEFAULT_MEMLOCK_BUDGET_SIZE, ..AccountsDbConfig::default() } } diff --git a/ledger-tool/src/bigtable.rs b/ledger-tool/src/bigtable.rs index f7ead704781fdd..f7d638c843f2f1 100644 --- a/ledger-tool/src/bigtable.rs +++ b/ledger-tool/src/bigtable.rs @@ -393,10 +393,10 @@ async fn shreds( .make_merkle_shreds_from_entries( &keypair, &entries, - true, // last_in_slot - None, // chained_merkle_root - 0, // next_shred_index - 0, // next_code_index + true, // last_in_slot + Hash::default(), // chained_merkle_root + 0, // next_shred_index + 0, // next_code_index &ReedSolomonCache::default(), &mut ProcessShredsStats::default(), ) @@ -1182,9 +1182,9 @@ impl BigTableSubCommand for App<'_, '_> { .validator(is_slot) .default_value("1000") .help( - "Number of transaction signatures to query at once. \ - Smaller: more responsive/lower throughput. \ - Larger: less responsive/higher throughput", + "Number of transaction signatures to query at once. Smaller: \ + more responsive/lower throughput. Larger: less \ + responsive/higher throughput", ), ) .arg( diff --git a/ledger-tool/src/blockstore.rs b/ledger-tool/src/blockstore.rs index 0892dbd371e73d..b5ac43416723a7 100644 --- a/ledger-tool/src/blockstore.rs +++ b/ledger-tool/src/blockstore.rs @@ -466,8 +466,8 @@ pub fn blockstore_subcommands<'a, 'b>(hidden: bool) -> Vec> { .help("Start slot to purge from (inclusive)"), ) .arg(Arg::with_name("end_slot").index(2).value_name("SLOT").help( - "Ending slot to stop purging (inclusive). \ - [default: the highest slot in the ledger]", + "Ending slot to stop purging (inclusive). [default: the highest slot in the \ + ledger]", )) .arg( Arg::with_name("batch_size") diff --git a/ledger-tool/src/ledger_utils.rs b/ledger-tool/src/ledger_utils.rs index 2acc4b977223fa..cdd443d0e80762 100644 --- a/ledger-tool/src/ledger_utils.rs +++ b/ledger-tool/src/ledger_utils.rs @@ -35,7 +35,7 @@ use { snapshot_config::{SnapshotConfig, SnapshotUsage}, snapshot_controller::SnapshotController, snapshot_hash::StartingSnapshotHashes, - snapshot_utils::{self, clean_orphaned_account_snapshot_dirs}, + snapshot_utils::{self, clean_orphaned_account_snapshot_dirs, BANK_SNAPSHOTS_DIR}, }, solana_transaction::versioned::VersionedTransaction, solana_unified_scheduler_pool::DefaultSchedulerPool, @@ -129,26 +129,30 @@ pub fn load_and_process_ledger( process_options: ProcessOptions, transaction_status_sender: Option, ) -> Result { - let bank_snapshots_dir = if blockstore.is_primary_access() { - blockstore.ledger_path().join("snapshot") - } else { - blockstore - .ledger_path() - .join(LEDGER_TOOL_DIRECTORY) - .join("snapshot") - }; - let mut starting_slot = 0; // default start check with genesis let snapshot_config = { - let full_snapshot_archives_dir = value_t!(arg_matches, "snapshots", String) + let snapshots_dir = value_t!(arg_matches, "snapshots", String) .ok() .map(PathBuf::from) .unwrap_or_else(|| blockstore.ledger_path().to_path_buf()); + let bank_snapshots_dir = if blockstore.is_primary_access() { + snapshots_dir.join(BANK_SNAPSHOTS_DIR) + } else { + blockstore + .ledger_path() + .join(LEDGER_TOOL_DIRECTORY) + .join(BANK_SNAPSHOTS_DIR) + }; + let full_snapshot_archives_dir = + value_t!(arg_matches, "full_snapshot_archive_path", String) + .ok() + .map(PathBuf::from) + .unwrap_or_else(|| snapshots_dir.clone()); let incremental_snapshot_archives_dir = value_t!(arg_matches, "incremental_snapshot_archive_path", String) .ok() .map(PathBuf::from) - .unwrap_or_else(|| full_snapshot_archives_dir.clone()); + .unwrap_or_else(|| snapshots_dir.clone()); if let Some(full_snapshot_slot) = snapshot_utils::get_highest_full_snapshot_archive_slot(&full_snapshot_archives_dir) { @@ -170,7 +174,7 @@ pub fn load_and_process_ledger( usage, full_snapshot_archives_dir, incremental_snapshot_archives_dir, - bank_snapshots_dir: bank_snapshots_dir.clone(), + bank_snapshots_dir, ..SnapshotConfig::default() } }; @@ -256,11 +260,14 @@ pub fn load_and_process_ledger( ); info!("{measure_clean_account_paths}"); - snapshot_utils::purge_incomplete_bank_snapshots(&bank_snapshots_dir); + snapshot_utils::purge_incomplete_bank_snapshots(&snapshot_config.bank_snapshots_dir); info!("Cleaning contents of account snapshot paths: {account_snapshot_paths:?}"); - clean_orphaned_account_snapshot_dirs(&bank_snapshots_dir, &account_snapshot_paths) - .map_err(LoadAndProcessLedgerError::CleanOrphanedAccountSnapshotDirectories)?; + clean_orphaned_account_snapshot_dirs( + &snapshot_config.bank_snapshots_dir, + &account_snapshot_paths, + ) + .map_err(LoadAndProcessLedgerError::CleanOrphanedAccountSnapshotDirectories)?; let geyser_plugin_active = arg_matches.is_present("geyser_plugin_config"); let (accounts_update_notifier, transaction_notifier) = if geyser_plugin_active { diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index ca4178b4f8800a..4cadb5559e8c53 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -30,8 +30,9 @@ use { is_within_range, }, }, - solana_cli_output::{CliAccount, OutputFormat}, + solana_cli_output::{display::build_balance_message, CliAccount, OutputFormat}, solana_clock::{Epoch, Slot}, + solana_cluster_type::ClusterType, solana_core::{ banking_simulation::{BankingSimulator, BankingTraceEvents}, system_monitor_service::{SystemMonitorService, SystemMonitorStatsReportConfig}, @@ -39,7 +40,6 @@ use { }, solana_cost_model::{cost_model::CostModel, cost_tracker::CostTracker}, solana_feature_gate_interface::{self as feature, Feature}, - solana_genesis_config::ClusterType, solana_inflation::Inflation, solana_instruction::TRANSACTION_LEVEL_STACK_HEIGHT, solana_ledger::{ @@ -51,7 +51,7 @@ use { }, solana_measure::{measure::Measure, measure_time}, solana_message::SimpleAddressLoader, - solana_native_token::{lamports_to_sol, sol_to_lamports, Sol}, + solana_native_token::{Sol, LAMPORTS_PER_SOL}, solana_pubkey::Pubkey, solana_rent::Rent, solana_runtime::{ @@ -281,7 +281,7 @@ fn graph_forks(bank_forks: &BankForks, config: &GraphConfig) -> String { format!( "\nvotes: {}, stake: {:.1} SOL ({:.1}%)", votes, - lamports_to_sol(*stake), + build_balance_message(*stake, false, false), *stake as f64 / *total_stake as f64 * 100., ) } else { @@ -377,7 +377,7 @@ fn graph_forks(bank_forks: &BankForks, config: &GraphConfig) -> String { r#" "last vote {}"[shape=box,label="Latest validator vote: {}\nstake: {} SOL\nroot slot: {}\n{}"];"#, node_pubkey, node_pubkey, - lamports_to_sol(*stake), + build_balance_message(*stake, false, false), vote_state_view.root_slot().unwrap_or(0), vote_history, )); @@ -399,7 +399,7 @@ fn graph_forks(bank_forks: &BankForks, config: &GraphConfig) -> String { dot.push(format!( r#" "..."[label="...\nvotes: {}, stake: {:.1} SOL {:.1}%"];"#, absent_votes, - lamports_to_sol(absent_stake), + build_balance_message(absent_stake, false, false), absent_stake as f64 / lowest_total_stake as f64 * 100., )); } @@ -512,6 +512,7 @@ fn minimize_bank_for_snapshot( bank: &Bank, snapshot_slot: Slot, ending_slot: Slot, + should_recalculate_accounts_lt_hash: bool, ) -> bool { let ((transaction_account_set, possibly_incomplete), transaction_accounts_measure) = measure_time!( blockstore.get_accounts_used_in_range(bank, snapshot_slot, ending_slot), @@ -520,7 +521,12 @@ fn minimize_bank_for_snapshot( let total_accounts_len = transaction_account_set.len(); info!("Added {total_accounts_len} accounts from transactions. {transaction_accounts_measure}"); - SnapshotMinimizer::minimize(bank, snapshot_slot, transaction_account_set); + SnapshotMinimizer::minimize( + bank, + snapshot_slot, + transaction_account_set, + should_recalculate_accounts_lt_hash, + ); possibly_incomplete } @@ -935,10 +941,10 @@ fn main() { .help("Print account data in specified format when printing account contents."); let rent = Rent::default(); - let default_bootstrap_validator_lamports = &sol_to_lamports(500.0) - .max(VoteStateV3::get_rent_exempt_reserve(&rent)) + let default_bootstrap_validator_lamports = &(500 * LAMPORTS_PER_SOL) + .max(rent.minimum_balance(VoteStateV3::size_of())) .to_string(); - let default_bootstrap_validator_stake_lamports = &sol_to_lamports(0.5) + let default_bootstrap_validator_stake_lamports = &(LAMPORTS_PER_SOL / 2) .max(rent.minimum_balance(StakeStateV2::size_of())) .to_string(); let default_graph_vote_account_mode = GraphVoteAccountMode::default(); @@ -1310,8 +1316,8 @@ fn main() { .value_name("DIR") .takes_value(true) .help( - "Output directory for the snapshot \ - [default: --snapshot-archive-path if present else --ledger directory]", + "Output directory for the snapshot [default: --snapshot-archive-path \ + if present else --ledger directory]", ), ) .arg( @@ -1463,6 +1469,21 @@ fn main() { .value_name("ENDING_SLOT") .help("Ending slot for minimized snapshot creation"), ) + .arg( + Arg::with_name("recalculate_accounts_lt_hash") + .long("recalculate-accounts-lt-hash") + .takes_value(false) + .help("Recalculate the accounts lt hash for minimized snapshots") + .long_help( + "Recalculate the accounts lt hash for minimized snapshots. Without \ + this flag, loading the minimized snapshot will fail startup accounts \ + verification because the accounts lt hash will not match due to the \ + pruned account state. If not recalculating the accounts lt hash, \ + pass `--accounts-db-skip-initial-hash-calculation` to `leder-tool \ + verify` in order to bypass this check.", + ) + .requires("minimized"), + ) .arg( Arg::with_name("snapshot_archive_format") .long("snapshot-archive-format") @@ -2135,18 +2156,6 @@ fn main() { // possibility. accounts_background_service.join().unwrap(); - // Similar to waiting for ABS to stop, we also wait for the initial startup - // verification to complete. The startup verification runs in the background - // and verifies the snapshot's accounts hashes are correct. We only want a - // single accounts hash calculation to run at a time, and since snapshot - // creation below will calculate the accounts hash, we wait for the startup - // verification to complete before proceeding. - bank.rc - .accounts - .accounts_db - .verify_accounts_hash_in_bg - .join_background_thread(); - let child_bank_required = rent_burn_percentage.is_ok() || hashes_per_tick.is_some() || remove_stake_accounts @@ -2321,7 +2330,7 @@ fn main() { identity_pubkey, identity_pubkey, 100, - VoteStateV3::get_rent_exempt_reserve(&rent).max(1), + rent.minimum_balance(VoteStateV3::size_of()).max(1), ); bank.store_account( @@ -2410,6 +2419,7 @@ fn main() { &bank, snapshot_slot, ending_slot.unwrap(), + arg_matches.is_present("recalculate_accounts_lt_hash"), ) } else { false diff --git a/ledger-tool/src/output.rs b/ledger-tool/src/output.rs index 12679fbc7f2276..73e55154f59412 100644 --- a/ledger-tool/src/output.rs +++ b/ledger-tool/src/output.rs @@ -14,17 +14,16 @@ use { is_loadable::IsLoadable as _, }, solana_cli_output::{ - display::writeln_transaction, CliAccount, CliAccountNewConfig, OutputFormat, QuietDisplay, - VerboseDisplay, + display::{build_balance_message, writeln_transaction}, + CliAccount, CliAccountNewConfig, OutputFormat, QuietDisplay, VerboseDisplay, }, solana_clock::{Slot, UnixTimestamp}, solana_hash::Hash, solana_ledger::{ blockstore::{Blockstore, BlockstoreError}, blockstore_meta::{DuplicateSlotProof, ErasureMeta}, - shred::{self, Shred, ShredType}, + shred::{Shred, ShredType}, }, - solana_native_token::lamports_to_sol, solana_pubkey::Pubkey, solana_runtime::bank::Bank, solana_transaction::versioned::VersionedTransaction, @@ -258,14 +257,14 @@ impl fmt::Display for CliBlockWithEntries { format!( "{}◎{:<14.9}", sign, - lamports_to_sol(reward.lamports.unsigned_abs()) + build_balance_message(reward.lamports.unsigned_abs(), false, false) ), if reward.post_balance == 0 { " - -".to_string() } else { format!( "◎{:<19.9} {:>13.9}%", - lamports_to_sol(reward.post_balance), + build_balance_message(reward.post_balance, false, false), (reward.lamports.abs() as f64 / (reward.post_balance as f64 - reward.lamports as f64)) * 100.0 @@ -283,7 +282,7 @@ impl fmt::Display for CliBlockWithEntries { f, "Total Rewards: {}◎{:<12.9}", sign, - lamports_to_sol(total_rewards.unsigned_abs()) + build_balance_message(total_rewards.unsigned_abs(), false, false) )?; } for (index, entry) in self.encoded_confirmed_block.entries.iter().enumerate() { @@ -410,7 +409,7 @@ impl From for CliDuplicateShred { merkle_root: shred.merkle_root().ok(), chained_merkle_root: shred.chained_merkle_root().ok(), last_in_slot: shred.last_in_slot(), - payload: shred::Payload::unwrap_or_clone(shred.payload().clone()), + payload: Vec::from(shred.into_payload().bytes), } } } diff --git a/ledger-tool/src/program.rs b/ledger-tool/src/program.rs index b76379cb6f9364..5a6bf773bb9cc2 100644 --- a/ledger-tool/src/program.rs +++ b/ledger-tool/src/program.rs @@ -24,7 +24,7 @@ use { solana_pubkey::Pubkey, solana_runtime::bank::Bank, solana_sbpf::{ - assembler::assemble, elf::Executable, static_analysis::Analysis, + assembler::assemble, ebpf::MM_INPUT_START, elf::Executable, static_analysis::Analysis, verifier::RequisiteVerifier, }, solana_sdk_ids::{bpf_loader_upgradeable, sysvar}, @@ -408,7 +408,7 @@ pub fn program(ledger_path: &Path, matches: &ArgMatches<'_>) { pubkey, AccountSharedData::new(0, allocation_size, &Pubkey::new_unique()), )); - instruction_accounts.push(InstructionAccount::new(0, 0, false, true)); + instruction_accounts.push(InstructionAccount::new(0, false, true)); vec![] } Err(_) => { @@ -480,7 +480,6 @@ pub fn program(ledger_path: &Path, matches: &ArgMatches<'_>) { idx }; InstructionAccount::new( - txn_acct_index as IndexOfAccount, txn_acct_index as IndexOfAccount, account_info.is_signer.unwrap_or(false), account_info.is_writable.unwrap_or(false), @@ -506,6 +505,10 @@ pub fn program(ledger_path: &Path, matches: &ArgMatches<'_>) { let interpreted = matches.value_of("mode").unwrap() != "jit"; with_mock_invoke_context!(invoke_context, transaction_context, transaction_accounts); + let provide_instruction_data_offset_in_vm_r2 = invoke_context + .get_feature_set() + .provide_instruction_data_offset_in_vm_r2; + // Adding `DELAY_VISIBILITY_SLOT_OFFSET` to slots to accommodate for delay visibility of the program let mut program_cache_for_tx_batch = bank.new_program_cache_for_tx_batch_for_slot(bank.slot() + DELAY_VISIBILITY_SLOT_OFFSET); @@ -521,25 +524,24 @@ pub fn program(ledger_path: &Path, matches: &ArgMatches<'_>) { invoke_context .transaction_context - .get_next_instruction_context_mut() - .unwrap() - .configure( - vec![program_index, program_index.saturating_add(1)], + .configure_next_instruction_for_tests( + program_index.saturating_add(1), instruction_accounts, &instruction_data, - ); + ) + .unwrap(); invoke_context.push().unwrap(); - let (_parameter_bytes, regions, account_lengths) = serialize_parameters( - invoke_context.transaction_context, - invoke_context - .transaction_context - .get_current_instruction_context() - .unwrap(), - false, // stricter_abi_and_runtime_constraints - false, // account_data_direct_mapping - true, // for mask_out_rent_epoch_in_vm_serialization - ) - .unwrap(); + let (_parameter_bytes, regions, account_lengths, instruction_data_offset) = + serialize_parameters( + &invoke_context + .transaction_context + .get_current_instruction_context() + .unwrap(), + false, // stricter_abi_and_runtime_constraints + false, // account_data_direct_mapping + true, // for mask_out_rent_epoch_in_vm_serialization + ) + .unwrap(); let program = matches.value_of("PROGRAM").unwrap(); let verified_executable = load_program(Path::new(program), program_id, &invoke_context); @@ -556,6 +558,12 @@ pub fn program(ledger_path: &Path, matches: &ArgMatches<'_>) { if matches.value_of("mode").unwrap() == "debugger" { vm.debug_port = Some(matches.value_of("port").unwrap().parse::().unwrap()); } + vm.registers[1] = MM_INPUT_START; + + // SIMD-0321: Provide offset to instruction data in VM register 2. + if provide_instruction_data_offset_in_vm_r2 { + vm.registers[2] = instruction_data_offset as u64; + } let (instruction_count, result) = vm.execute_program(&verified_executable, interpreted); let duration = Instant::now() - start_time; if matches.occurrences_of("trace") > 0 { diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index 2c5321a0051df5..fcb6f807e77c4a 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -23,6 +23,7 @@ frozen-abi = [ "dep:solana-frozen-abi-macro", "solana-runtime/frozen-abi", ] +agave-unstable-api = [] [dependencies] agave-feature-set = { workspace = true } @@ -31,9 +32,11 @@ anyhow = { workspace = true } assert_matches = { workspace = true } bincode = { workspace = true } bitflags = { workspace = true, features = ["serde"] } +bytes = { workspace = true } bzip2 = { workspace = true } chrono = { workspace = true, features = ["default", "serde"] } chrono-humanize = { workspace = true } +conditional-mod = { workspace = true } crossbeam-channel = { workspace = true } dashmap = { workspace = true, features = ["rayon", "raw-api"] } eager = { workspace = true } @@ -81,6 +84,7 @@ solana-message = { workspace = true } solana-metrics = { workspace = true } solana-native-token = { workspace = true } solana-net-utils = { workspace = true } +solana-nohash-hasher = { workspace = true } solana-packet = { workspace = true } solana-perf = { workspace = true } solana-program-runtime = { workspace = true, features = ["metrics"] } @@ -99,11 +103,11 @@ solana-storage-bigtable = { workspace = true } solana-storage-proto = { workspace = true } solana-streamer = { workspace = true } solana-svm = { workspace = true } +solana-svm-timings = { workspace = true } solana-svm-transaction = { workspace = true } solana-system-interface = { workspace = true } solana-system-transaction = { workspace = true } solana-time-utils = { workspace = true } -solana-timings = { workspace = true } solana-transaction = { workspace = true } solana-transaction-context = { workspace = true } solana-transaction-error = { workspace = true } @@ -133,7 +137,7 @@ criterion = { workspace = true } proptest = { workspace = true } solana-account-decoder = { workspace = true } # See order-crates-for-publishing.py for using this unusual `path = "."` -solana-ledger = { path = ".", features = ["dev-context-only-utils"] } +solana-ledger = { path = ".", features = ["dev-context-only-utils", "agave-unstable-api"] } solana-logger = { workspace = true } solana-net-utils = { workspace = true, features = ["dev-context-only-utils"] } solana-perf = { workspace = true, features = ["dev-context-only-utils"] } diff --git a/ledger/benches/blockstore.rs b/ledger/benches/blockstore.rs index 461ef19ded6300..3a8fc248edc1c6 100644 --- a/ledger/benches/blockstore.rs +++ b/ledger/benches/blockstore.rs @@ -53,7 +53,7 @@ fn setup_read_bench( ); blockstore .insert_shreds(shreds, None, false) - .expect("Expectd successful insertion of shreds into ledger"); + .expect("Expected successful insertion of shreds into ledger"); } // Write small shreds to the ledger diff --git a/ledger/benches/blockstore_processor.rs b/ledger/benches/blockstore_processor.rs index a3730648a14b9f..6706b3eafbfaea 100644 --- a/ledger/benches/blockstore_processor.rs +++ b/ledger/benches/blockstore_processor.rs @@ -21,9 +21,9 @@ use { }, solana_runtime_transaction::runtime_transaction::RuntimeTransaction, solana_signer::Signer, + solana_svm_timings::ExecuteTimings, solana_system_interface::program as system_program, solana_system_transaction as system_transaction, - solana_timings::ExecuteTimings, solana_transaction::sanitized::SanitizedTransaction, std::sync::{Arc, RwLock}, test::Bencher, diff --git a/ledger/benches/make_shreds_from_entries.rs b/ledger/benches/make_shreds_from_entries.rs index d0571b167eab74..dcfef025c5636f 100644 --- a/ledger/benches/make_shreds_from_entries.rs +++ b/ledger/benches/make_shreds_from_entries.rs @@ -53,7 +53,7 @@ fn make_shreds_from_entries( keypair: &Keypair, entries: &[Entry], is_last_in_slot: bool, - chained_merkle_root: Option, + chained_merkle_root: Hash, reed_solomon_cache: &ReedSolomonCache, stats: &mut ProcessShredsStats, ) -> (Vec, Vec) { @@ -89,7 +89,7 @@ fn run_make_shreds_from_entries( let keypair = Keypair::new(); let data_size = num_packets * PACKET_DATA_SIZE; let entries = make_dummy_entries(&mut rng, data_size); - let chained_merkle_root = Some(make_dummy_hash(&mut rng)); + let chained_merkle_root = make_dummy_hash(&mut rng); let reed_solomon_cache = ReedSolomonCache::default(); let mut stats = ProcessShredsStats::default(); // Initialize the thread-pool and warm the Reed-Solomon cache. @@ -143,7 +143,7 @@ fn run_recover_shreds( let keypair = Keypair::new(); let data_size = num_packets * PACKET_DATA_SIZE; let entries = make_dummy_entries(&mut rng, data_size); - let chained_merkle_root = Some(make_dummy_hash(&mut rng)); + let chained_merkle_root = make_dummy_hash(&mut rng); let reed_solomon_cache = ReedSolomonCache::default(); let mut stats = ProcessShredsStats::default(); let (data, code) = make_shreds_from_entries( diff --git a/ledger/src/bank_forks_utils.rs b/ledger/src/bank_forks_utils.rs index 7cfb431134aadf..921f3cc624e062 100644 --- a/ledger/src/bank_forks_utils.rs +++ b/ledger/src/bank_forks_utils.rs @@ -194,12 +194,6 @@ pub fn load_bank_forks( exit, ) .map_err(BankForksUtilsError::ProcessBlockstoreFromGenesis)?; - bank_forks - .read() - .unwrap() - .root_bank() - .set_initial_accounts_hash_verification_completed(); - (bank_forks, None) }; @@ -276,13 +270,12 @@ fn bank_forks_from_snapshot( }; let bank = if let Some(fastboot_snapshot) = fastboot_snapshot { - let (bank, _) = snapshot_bank_utils::bank_from_snapshot_dir( + snapshot_bank_utils::bank_from_snapshot_dir( &account_paths, &fastboot_snapshot, genesis_config, &process_options.runtime_config, process_options.debug_keys.clone(), - None, process_options.limit_load_slot_count_from_snapshot, process_options.verify_index, process_options.accounts_db_config.clone(), @@ -292,8 +285,7 @@ fn bank_forks_from_snapshot( .map_err(|err| BankForksUtilsError::BankFromSnapshotsDirectory { source: err, path: fastboot_snapshot.snapshot_path(), - })?; - bank + })? } else { // Given that we are going to boot from an archive, the append vecs held in the snapshot dirs for fast-boot should // be released. They will be released by the account_background_service anyway. But in the case of the account_paths @@ -301,7 +293,7 @@ fn bank_forks_from_snapshot( // the archives, causing the out-of-memory problem. So, purge the snapshot dirs upfront before loading from the archive. snapshot_utils::purge_all_bank_snapshots(&snapshot_config.bank_snapshots_dir); - let (bank, _) = snapshot_bank_utils::bank_from_snapshot_archives( + snapshot_bank_utils::bank_from_snapshot_archives( &account_paths, &snapshot_config.bank_snapshots_dir, &full_snapshot_archive_info, @@ -309,7 +301,6 @@ fn bank_forks_from_snapshot( genesis_config, &process_options.runtime_config, process_options.debug_keys.clone(), - None, process_options.limit_load_slot_count_from_snapshot, process_options.accounts_db_skip_shrink, process_options.accounts_db_force_initial_clean, @@ -325,8 +316,7 @@ fn bank_forks_from_snapshot( .as_ref() .map(|archive| archive.path().display().to_string()) .unwrap_or("none".to_string()), - })?; - bank + })? }; // We must inform accounts-db of the latest full snapshot slot, which is used by the background diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 2e8191d3b97d22..6752e302fdf8e8 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -104,11 +104,6 @@ pub use { pub const MAX_REPLAY_WAKE_UP_SIGNALS: usize = 1; pub const MAX_COMPLETED_SLOTS_IN_CHANNEL: usize = 100_000; -// An upper bound on maximum number of data shreds we can handle in a slot -// 32K shreds would allow ~320K peak TPS -// (32K shreds per slot * 4 TX per shred * 2.5 slots per sec) -pub const MAX_DATA_SHREDS_PER_SLOT: usize = 32_768; - pub type CompletedSlotsSender = Sender>; pub type CompletedSlotsReceiver = Receiver>; @@ -1009,13 +1004,11 @@ impl Blockstore { match shred.shred_type() { ShredType::Code => { // Don't need Arc overhead here! - debug_assert_matches!(shred.payload(), shred::Payload::Unique(_)); recovered_shreds.push(shred.into_payload()); None } ShredType::Data => { // Verify that the cloning is cheap here. - debug_assert_matches!(shred.payload(), shred::Payload::Shared(_)); recovered_shreds.push(shred.payload().clone()); Some(shred) } @@ -2382,7 +2375,7 @@ impl Blockstore { let mut all_shreds = vec![]; let mut slot_entries = vec![]; let reed_solomon_cache = ReedSolomonCache::default(); - let mut chained_merkle_root = Some(Hash::new_from_array(rand::thread_rng().gen())); + let mut chained_merkle_root = Hash::new_from_array(rand::thread_rng().gen()); // Find all the entries for start_slot for entry in entries.into_iter() { if remaining_ticks_in_slot == 0 { @@ -2410,7 +2403,7 @@ impl Blockstore { ); all_shreds.append(&mut data_shreds); all_shreds.append(&mut coding_shreds); - chained_merkle_root = Some(coding_shreds.last().unwrap().merkle_root().unwrap()); + chained_merkle_root = coding_shreds.last().unwrap().merkle_root().unwrap(); shredder = Shredder::new( current_slot, parent_slot, @@ -4869,7 +4862,7 @@ pub fn create_new_ledger( &entries, true, // is_last_in_slot // chained_merkle_root - Some(Hash::new_from_array(rand::thread_rng().gen())), + Hash::new_from_array(rand::thread_rng().gen()), 0, // next_shred_index 0, // next_code_index &ReedSolomonCache::default(), @@ -5101,10 +5094,9 @@ pub fn entries_to_test_shreds( &Keypair::new(), entries, is_full_slot, - // chained_merkle_root - Some(Hash::new_from_array(rand::thread_rng().gen())), - 0, // next_shred_index, - 0, // next_code_index + Hash::new_from_array(rand::thread_rng().gen()), // chained_merkle_root + 0, // next_shred_index, + 0, // next_code_index &ReedSolomonCache::default(), &mut ProcessShredsStats::default(), ) @@ -5267,7 +5259,7 @@ fn adjust_ulimit_nofile(enforce_ulimit_nofile: bool) -> Result<()> { // usually not enough // AppendVecs and disk Account Index are also heavy users of mmapped files. // This should be kept in sync with published validator instructions. - // https://docs.solanalabs.com/operations/guides/validator-start#increased-memory-mapped-files-limit + // https://docs.anza.xyz/operations/guides/validator-start#system-tuning let desired_nofile = 1_000_000; fn get_nofile() -> libc::rlimit { @@ -5315,7 +5307,7 @@ pub mod tests { crate::{ genesis_utils::{create_genesis_config, GenesisConfigInfo}, leader_schedule::{FixedSchedule, IdentityKeyedLeaderSchedule}, - shred::max_ticks_per_n_shreds, + shred::{max_ticks_per_n_shreds, MAX_DATA_SHREDS_PER_SLOT}, }, assert_matches::assert_matches, bincode::{serialize, Options}, @@ -5343,7 +5335,6 @@ pub mod tests { InnerInstruction, InnerInstructions, Reward, Rewards, TransactionTokenBalance, }, std::{cmp::Ordering, time::Duration}, - test_case::test_case, }; // used for tests only @@ -6885,7 +6876,7 @@ pub mod tests { &keypair, &[], false, - None, + Hash::default(), // merkle_root (i * gap) as u32, (i * gap) as u32, &reed_solomon_cache, @@ -7068,7 +7059,7 @@ pub mod tests { &keypair, &entries, true, - None, //chained_merkle_root + Hash::default(), // merkle_root 0, 0, &rsc, @@ -7096,9 +7087,9 @@ pub mod tests { &keypair, &[], true, - None, //chained_merkle_root - 6, // next_shred_index, - 6, // next_code_index + Hash::default(), // merkle_root + 6, // next_shred_index, + 6, // next_code_index &rsc, &mut ProcessShredsStats::default(), ) @@ -7159,9 +7150,9 @@ pub mod tests { &Keypair::new(), &entries, true, - None, //chained_merkle_root - last_idx, // next_shred_index, - last_idx, // next_code_index + Hash::default(), // merkle_root + last_idx, // next_shred_index, + last_idx, // next_code_index &rsc, &mut ProcessShredsStats::default(), ) @@ -7552,7 +7543,7 @@ pub mod tests { &keypair, &[3, 3, 3], false, - Some(Hash::default()), + Hash::default(), new_index, new_index, &reed_solomon_cache, @@ -7626,7 +7617,12 @@ pub mod tests { let slot = 1; let (_data_shreds, code_shreds, _) = setup_erasure_shreds_with_index_and_chained_merkle_and_last_in_slot( - slot, 0, 10, 0, None, true, + slot, + 0, + 10, + 0, + Hash::default(), + true, ); let coding_shred = code_shreds[0].clone(); @@ -7663,7 +7659,12 @@ pub mod tests { let slot = 1; let (_data_shreds, code_shreds, _) = setup_erasure_shreds_with_index_and_chained_merkle_and_last_in_slot( - slot, 0, 10, 0, None, true, + slot, + 0, + 10, + 0, + Hash::default(), + true, ); let coding_shred = code_shreds[0].clone(); @@ -7885,7 +7886,7 @@ pub mod tests { &keypair, &[1, 1, 1], true, - Some(Hash::default()), + Hash::default(), next_shred_index as u32, next_shred_index as u32, &reed_solomon_cache, @@ -10163,7 +10164,7 @@ pub mod tests { parent_slot, num_entries, fec_set_index, - Some(Hash::new_from_array(rand::thread_rng().gen())), + Hash::new_from_array(rand::thread_rng().gen()), ) } @@ -10172,7 +10173,7 @@ pub mod tests { parent_slot: u64, num_entries: u64, fec_set_index: u32, - chained_merkle_root: Option, + chained_merkle_root: Hash, ) -> (Vec, Vec, Arc) { setup_erasure_shreds_with_index_and_chained_merkle_and_last_in_slot( slot, @@ -10189,7 +10190,7 @@ pub mod tests { parent_slot: u64, num_entries: u64, fec_set_index: u32, - chained_merkle_root: Option, + chained_merkle_root: Hash, is_last_in_slot: bool, ) -> (Vec, Vec, Arc) { let entries = make_slot_entries_with_transactions(num_entries); @@ -10251,21 +10252,20 @@ pub mod tests { assert_eq!(num_coding_in_index, num_coding); } - #[test_case(false)] - #[test_case(true)] - fn test_duplicate_slot(chained: bool) { + #[test] + fn test_duplicate_slot() { let slot = 0; let entries1 = make_slot_entries_with_transactions(1); let entries2 = make_slot_entries_with_transactions(1); let leader_keypair = Arc::new(Keypair::new()); let reed_solomon_cache = ReedSolomonCache::default(); let shredder = Shredder::new(slot, 0, 0, 0).unwrap(); - let chained_merkle_root = chained.then(|| Hash::new_from_array(rand::thread_rng().gen())); + let merkle_root = Hash::new_from_array(rand::thread_rng().gen()); let (shreds, _) = shredder.entries_to_merkle_shreds_for_tests( &leader_keypair, &entries1, true, // is_last_in_slot - chained_merkle_root, + merkle_root, 0, // next_shred_index 0, // next_code_index, &reed_solomon_cache, @@ -10275,7 +10275,7 @@ pub mod tests { &leader_keypair, &entries2, true, // is_last_in_slot - chained_merkle_root, + merkle_root, 0, // next_shred_index 0, // next_code_index &reed_solomon_cache, @@ -10620,7 +10620,7 @@ pub mod tests { let version = version_from_hash(&entries[0].hash); let shredder = Shredder::new(slot, 0, 0, version).unwrap(); let reed_solomon_cache = ReedSolomonCache::default(); - let merkle_root = Some(Hash::new_from_array(rand::thread_rng().gen())); + let merkle_root = Hash::new_from_array(rand::thread_rng().gen()); let kp = Keypair::new(); // produce normal shreds let (data1, coding1) = shredder.entries_to_merkle_shreds_for_tests( @@ -10735,10 +10735,10 @@ pub mod tests { .make_merkle_shreds_from_entries( &leader_keypair, &entries, - true, // is_last_in_slot - Some(Hash::new_unique()), - 0, // next_shred_index - 0, // next_code_index, + true, // is_last_in_slot + Hash::new_unique(), // chained_merkle_root + 0, // next_shred_index + 0, // next_code_index, &reed_solomon_cache, &mut ProcessShredsStats::default(), ) @@ -10751,7 +10751,7 @@ pub mod tests { &leader_keypair, &entries, true, // is_last_in_slot - Some(last_data1.chained_merkle_root().unwrap()), + last_data1.chained_merkle_root().unwrap(), last_data1.index() + 1, // next_shred_index last_code1.index() + 1, // next_code_index, &reed_solomon_cache, @@ -11151,7 +11151,7 @@ pub mod tests { parent_slot, 10, next_fec_set_index, - Some(merkle_root), + merkle_root, ); let data_shred = data_shreds[0].clone(); let coding_shred = coding_shreds[0].clone(); @@ -11184,7 +11184,7 @@ pub mod tests { parent_slot, 10, next_fec_set_index, - Some(merkle_root), + merkle_root, ); let next_coding_shred = next_coding_shreds[0].clone(); @@ -11223,7 +11223,7 @@ pub mod tests { slot, 10, fec_set_index, - Some(merkle_root), + merkle_root, ); let next_slot_data_shred = next_slot_data_shreds[0].clone(); let next_slot_coding_shred = next_slot_coding_shreds[0].clone(); @@ -11256,7 +11256,7 @@ pub mod tests { slot, 10, fec_set_index, - Some(merkle_root), + merkle_root, ); let next_slot_data_shred = next_slot_data_shreds[0].clone(); @@ -11297,7 +11297,7 @@ pub mod tests { parent_slot, 10, next_fec_set_index, - Some(merkle_root), + merkle_root, ); let data_shred = data_shreds[0].clone(); let coding_shred = coding_shreds[0].clone(); @@ -11345,7 +11345,7 @@ pub mod tests { parent_slot, 10, next_fec_set_index, - Some(merkle_root), + merkle_root, ); let data_shred = data_shreds[0].clone(); let coding_shred = coding_shreds[0].clone(); @@ -11389,7 +11389,7 @@ pub mod tests { parent_slot, 10, next_fec_set_index, - Some(merkle_root), + merkle_root, ); let next_data_shred = next_data_shreds[0].clone(); @@ -11436,7 +11436,7 @@ pub mod tests { parent_slot, 10, fec_set_index, - Some(merkle_root), + merkle_root, ); let data_shred = data_shreds[0].clone(); let coding_shred = coding_shreds[0].clone(); @@ -11451,7 +11451,7 @@ pub mod tests { parent_slot, 10, next_fec_set_index, - Some(merkle_root), + merkle_root, ); let next_data_shred = next_data_shreds[0].clone(); @@ -11541,7 +11541,7 @@ pub mod tests { parent_slot, 10, next_fec_set_index, - Some(merkle_root), + merkle_root, ); let data_shred = data_shreds[0].clone(); let coding_shred = coding_shreds[0].clone(); @@ -11576,7 +11576,7 @@ pub mod tests { parent_slot, 10, next_fec_set_index, - Some(merkle_root), + merkle_root, ); let next_data_shred = next_data_shreds[0].clone(); @@ -11670,7 +11670,7 @@ pub mod tests { parent_slot, 10, fec_set_index, - None, + Hash::default(), false, ); let merkle_root = first_data_shreds[0].merkle_root().unwrap(); @@ -11681,7 +11681,7 @@ pub mod tests { parent_slot, 40, fec_set_index, - Some(merkle_root), + merkle_root, false, ); let last_index = last_data_shreds.last().unwrap().index(); @@ -11711,7 +11711,7 @@ pub mod tests { parent_slot, 100, fec_set_index, - None, + Hash::default(), false, ); let merkle_root = first_data_shreds[0].merkle_root().unwrap(); @@ -11722,7 +11722,7 @@ pub mod tests { parent_slot, 100, fec_set_index, - Some(merkle_root), + merkle_root, false, ); let last_index = last_data_shreds.last().unwrap().index(); @@ -11743,28 +11743,6 @@ pub mod tests { let results = blockstore.check_last_fec_set(slot).unwrap(); assert_eq!(results.last_fec_set_merkle_root, Some(merkle_root)); assert!(!results.is_retransmitter_signed); - blockstore.run_purge(slot, slot, PurgeType::Exact).unwrap(); - - // Slot is full, but does not contain retransmitter shreds - let fec_set_index = 0; - let (first_data_shreds, _, _) = - setup_erasure_shreds_with_index_and_chained_merkle_and_last_in_slot( - slot, - parent_slot, - 200, - fec_set_index, - // Do not set merkle root, so shreds are not signed - None, - true, - ); - assert!(first_data_shreds.len() > DATA_SHREDS_PER_FEC_BLOCK); - let block_id = first_data_shreds.last().unwrap().merkle_root().unwrap(); - blockstore - .insert_shreds(first_data_shreds, None, false) - .unwrap(); - let results = blockstore.check_last_fec_set(slot).unwrap(); - assert_eq!(results.last_fec_set_merkle_root, Some(block_id)); - assert!(!results.is_retransmitter_signed); } #[test] diff --git a/ledger/src/blockstore_meta.rs b/ledger/src/blockstore_meta.rs index 3546dda078de93..25de89684720a1 100644 --- a/ledger/src/blockstore_meta.rs +++ b/ledger/src/blockstore_meta.rs @@ -1,8 +1,7 @@ use { crate::{ bit_vec::BitVec, - blockstore::MAX_DATA_SHREDS_PER_SLOT, - shred::{self, Shred, ShredType}, + shred::{self, Shred, ShredType, DATA_SHREDS_PER_FEC_BLOCK, MAX_DATA_SHREDS_PER_SLOT}, }, bitflags::bitflags, serde::{Deserialize, Deserializer, Serialize, Serializer}, @@ -22,7 +21,7 @@ bitflags! { // 1) S is a rooted slot itself OR // 2) S's parent is connected AND S is full (S's complete block present) // - // 1) is a straightfoward case, roots are finalized blocks on the main fork + // 1) is a straightforward case, roots are finalized blocks on the main fork // so by definition, they are connected. All roots are connected, but not // all connected slots are (or will become) roots. // @@ -40,7 +39,7 @@ bitflags! { // CONNECTED is explicitly the first bit to ensure backwards compatibility // with the boolean field that ConnectedFlags replaced in SlotMeta. const CONNECTED = 0b0000_0001; - // PARENT_CONNECTED IS INTENTIIONALLY UNUSED FOR NOW + // PARENT_CONNECTED IS INTENTIONALLY UNUSED FOR NOW const PARENT_CONNECTED = 0b1000_0000; } } @@ -342,8 +341,14 @@ mod serde_compat_cast { #[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize, Deserialize)] pub(crate) struct ErasureConfig { - num_data: usize, - num_coding: usize, + pub(crate) num_data: usize, + pub(crate) num_coding: usize, +} + +impl ErasureConfig { + pub(crate) fn is_fixed(&self) -> bool { + self.num_data == DATA_SHREDS_PER_FEC_BLOCK && self.num_coding == DATA_SHREDS_PER_FEC_BLOCK + } } #[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize, Deserialize)] @@ -621,7 +626,7 @@ impl SlotMeta { /// Mark the meta's parent as connected. /// If the meta is also full, the meta is now connected as well. Return a - /// boolean indicating whether the meta becamed connected from this call. + /// boolean indicating whether the meta became connected from this call. pub fn set_parent_connected(&mut self) -> bool { // Already connected so nothing to do, bail early if self.is_connected() { @@ -1205,7 +1210,7 @@ mod test { bincode::serialize(&with_flags).unwrap() ); - // Dserializing WithBool into WithFlags succeeds + // Deserializing WithBool into WithFlags succeeds assert_eq!( with_flags, bincode::deserialize::(&bincode::serialize(&with_bool).unwrap()).unwrap() diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 5c4af328efd1eb..191e6b3439fb11 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -51,8 +51,8 @@ use { transaction_processing_result::ProcessedTransaction, transaction_processor::ExecutionRecordingConfig, }, + solana_svm_timings::{report_execute_timings, ExecuteTimingType, ExecuteTimings}, solana_svm_transaction::{svm_message::SVMMessage, svm_transaction::SVMTransaction}, - solana_timings::{report_execute_timings, ExecuteTimingType, ExecuteTimings}, solana_transaction::{ sanitized::SanitizedTransaction, versioned::VersionedTransaction, TransactionVerificationMode, @@ -843,7 +843,7 @@ pub struct ProcessOptions { pub allow_dead_slots: bool, pub accounts_db_skip_shrink: bool, pub accounts_db_force_initial_clean: bool, - pub accounts_db_config: Option, + pub accounts_db_config: AccountsDbConfig, pub verify_index: bool, pub runtime_config: RuntimeConfig, /// true if after processing the contents of the blockstore at startup, we should run an accounts hash calc @@ -900,13 +900,11 @@ pub(crate) fn process_blockstore_for_bank_0( exit: Arc, ) -> result::Result>, BlockstoreProcessorError> { // Setup bank for slot 0 - let bank0 = Bank::new_with_paths( + let bank0 = Bank::new_from_genesis( genesis_config, Arc::new(opts.runtime_config.clone()), account_paths, opts.debug_keys.clone(), - None, - false, opts.accounts_db_config.clone(), accounts_update_notifier, None, @@ -1056,7 +1054,7 @@ pub fn process_blockstore_from_root( /// Verify that a segment of entries has the correct number of ticks and hashes fn verify_ticks( bank: &Bank, - entries: &[Entry], + mut entries: &[Entry], slot_full: bool, tick_hash_count: &mut u64, ) -> std::result::Result<(), BlockError> { @@ -1086,6 +1084,35 @@ fn verify_ticks( } } + if let Some(first_alpenglow_slot) = bank + .feature_set + .activated_slot(&agave_feature_set::alpenglow::id()) + { + if bank.parent_slot() >= first_alpenglow_slot { + // If both the parent and the bank slot are in an epoch post alpenglow activation, + // no tick verification is needed + return Ok(()); + } + + // If the bank is in the alpenglow epoch, but the parent is from an epoch + // where the feature flag is not active, we must verify ticks that correspond + // to the epoch in which PoH is active. This verification is criticial, as otherwise + // a leader could jump the gun and publish a block in the alpenglow epoch without waiting + // the appropriate time as determined by PoH in the prior epoch. + if bank.slot() >= first_alpenglow_slot && next_bank_tick_height == max_bank_tick_height { + if entries.is_empty() { + // This shouldn't happen, but good to double check + error!("Processing empty entries in verify_ticks()"); + return Ok(()); + } + // last entry must be a tick, as verified by the `has_trailing_entry` + // check above. Because in Alpenglow the last tick does not have any + // hashing guarantees, we pass everything but that last tick to the + // entry verification. + entries = &entries[..entries.len() - 1]; + } + } + let hashes_per_tick = bank.hashes_per_tick().unwrap_or(0); if !entries.verify_tick_hash_count(tick_hash_count, hashes_per_tick) { warn!( @@ -1786,6 +1813,7 @@ fn process_next_slots( .unwrap(), *next_slot, ); + set_alpenglow_ticks(&next_bank); trace!( "New bank for slot {}, parent slot is {}", next_slot, @@ -1800,6 +1828,71 @@ fn process_next_slots( Ok(()) } +/// Set alpenglow bank tick height. +/// +/// For alpenglow banks this tick height is `max_tick_height` - 1, for a bank on the epoch boundary +/// of feature activation, we need ticks_per_slot for each slot between the parent and epoch boundary +/// and one extra tick for the alpenglow bank +pub fn set_alpenglow_ticks(bank: &Bank) { + let Some(first_alpenglow_slot) = bank + .feature_set + .activated_slot(&agave_feature_set::alpenglow::id()) + else { + return; + }; + + let Some(alpenglow_ticks) = calculate_alpenglow_ticks( + bank.slot(), + first_alpenglow_slot, + bank.parent_slot(), + bank.ticks_per_slot(), + ) else { + return; + }; + + info!( + "Alpenglow: Setting tick height for slot {} to {}", + bank.slot(), + bank.max_tick_height() - alpenglow_ticks + ); + bank.set_tick_height(bank.max_tick_height() - alpenglow_ticks); +} + +/// Calculates how many ticks are needed for a block at `slot` with parent `parent_slot` +/// +/// If both `parent_slot` and `slot` are greater than or equal to `first_alpenglow_slot`, then +/// only 1 tick is needed. This tick has no hashing guarantees, it is simply used as a signal +/// for the end of the block. +/// +/// If both `parent_slot` and `slot` are less than `first_alpenglow_slot`, we need the +/// appropriate amount of PoH ticks, indicated by a None return value. +/// +/// If `parent_slot` is less than `first_alpenglow_slot` and `slot` is greater than or equal +/// to `first_alpenglow_slot` (A block that "straddles" the activation epoch boundary) then: +/// +/// 1. All slots between `parent_slot` and `first_alpenglow_slot` need to have `ticks_per_slot` ticks +/// 2. One extra tick for the actual alpenglow slot +/// 3. There are no ticks for any skipped alpenglow slots +fn calculate_alpenglow_ticks( + slot: Slot, + first_alpenglow_slot: Slot, + parent_slot: Slot, + ticks_per_slot: u64, +) -> Option { + // Slots before alpenglow shouldn't have alpenglow ticks + if slot < first_alpenglow_slot { + return None; + } + + let alpenglow_ticks = if parent_slot < first_alpenglow_slot && slot >= first_alpenglow_slot { + (first_alpenglow_slot - parent_slot - 1) * ticks_per_slot + 1 + } else { + 1 + }; + + Some(alpenglow_ticks) +} + /// Starting with the root slot corresponding to `start_slot_meta`, iteratively /// find and process children slots from the blockstore. /// @@ -3426,7 +3519,7 @@ pub mod tests { InstructionError::ProgramFailedToCompile, InstructionError::Immutable, InstructionError::IncorrectAuthority, - InstructionError::BorshIoError("error".to_string()), + InstructionError::BorshIoError, InstructionError::AccountNotRentExempt, InstructionError::InvalidAccountOwner, InstructionError::ArithmeticOverflow, @@ -3472,10 +3565,11 @@ pub mod tests { declare_process_instruction!(MockBuiltinErr, 1, |invoke_context| { let instruction_errors = get_instruction_errors(); - let err = invoke_context + let instruction_context = invoke_context .transaction_context .get_current_instruction_context() - .expect("Failed to get instruction context") + .expect("Failed to get instruction context"); + let err = instruction_context .get_instruction_data() .first() .expect("Failed to get instruction data"); @@ -4753,7 +4847,7 @@ pub mod tests { VoteStateV3::size_of(), &solana_vote_program::id(), ); - let versioned = VoteStateVersions::new_current(vote_state); + let versioned = VoteStateVersions::new_v3(vote_state); VoteStateV3::serialize(&versioned, vote_account.data_as_mut_slice()).unwrap(); ( solana_pubkey::new_rand(), @@ -5352,4 +5446,60 @@ pub mod tests { // Adding another None will noop (even though the block is already full) assert!(check_block_cost_limits(&bank, &tx_costs[0..1]).is_ok()); } + + #[test] + fn test_calculate_alpenglow_ticks() { + let first_alpenglow_slot = 10; + let ticks_per_slot = 2; + + // Slots before alpenglow don't have alpenglow ticks + let slot = 9; + let parent_slot = 8; + assert!( + calculate_alpenglow_ticks(slot, first_alpenglow_slot, parent_slot, ticks_per_slot) + .is_none() + ); + + // First alpenglow slot should only have 1 tick + let slot = first_alpenglow_slot; + let parent_slot = first_alpenglow_slot - 1; + assert_eq!( + calculate_alpenglow_ticks(slot, first_alpenglow_slot, parent_slot, ticks_per_slot) + .unwrap(), + 1 + ); + + // First alpenglow slot with skipped non-alpenglow slots + // need to have `ticks_per_slot` ticks per skipped slot and + // then one additional tick for the first alpenglow slot + let slot = first_alpenglow_slot; + let num_skipped_slots = 3; + let parent_slot = first_alpenglow_slot - num_skipped_slots - 1; + assert_eq!( + calculate_alpenglow_ticks(slot, first_alpenglow_slot, parent_slot, ticks_per_slot) + .unwrap(), + num_skipped_slots * ticks_per_slot + 1 + ); + + // Skipped alpenglow slots don't need any additional ticks + let slot = first_alpenglow_slot + 2; + let parent_slot = first_alpenglow_slot; + assert_eq!( + calculate_alpenglow_ticks(slot, first_alpenglow_slot, parent_slot, ticks_per_slot) + .unwrap(), + 1 + ); + + // Skipped alpenglow slots along skipped non-alpenglow slots + // need to have `ticks_per_slot` ticks per skipped non-alpenglow + // slot only and then one additional tick for the alpenglow slot + let slot = first_alpenglow_slot + 2; + let num_skipped_non_alpenglow_slots = 4; + let parent_slot = first_alpenglow_slot - num_skipped_non_alpenglow_slots - 1; + assert_eq!( + calculate_alpenglow_ticks(slot, first_alpenglow_slot, parent_slot, ticks_per_slot) + .unwrap(), + num_skipped_non_alpenglow_slots * ticks_per_slot + 1 + ); + } } diff --git a/ledger/src/leader_schedule_utils.rs b/ledger/src/leader_schedule_utils.rs index 0f9d85c5f0044d..825f8d58911db4 100644 --- a/ledger/src/leader_schedule_utils.rs +++ b/ledger/src/leader_schedule_utils.rs @@ -70,6 +70,27 @@ pub fn first_of_consecutive_leader_slots(slot: Slot) -> Slot { (slot / NUM_CONSECUTIVE_LEADER_SLOTS) * NUM_CONSECUTIVE_LEADER_SLOTS } +/// Returns the last slot in the leader window that contains `slot` +#[inline] +pub fn last_of_consecutive_leader_slots(slot: Slot) -> Slot { + first_of_consecutive_leader_slots(slot) + NUM_CONSECUTIVE_LEADER_SLOTS - 1 +} + +/// Returns the index within the leader slot range that contains `slot` +#[inline] +pub fn leader_slot_index(slot: Slot) -> usize { + (slot % NUM_CONSECUTIVE_LEADER_SLOTS) as usize +} + +/// Returns the number of slots left after `slot` in the leader window +/// that contains `slot` +#[inline] +pub fn remaining_slots_in_window(slot: Slot) -> u64 { + NUM_CONSECUTIVE_LEADER_SLOTS + .checked_sub(leader_slot_index(slot) as u64) + .unwrap() +} + #[cfg(test)] mod tests { use { @@ -118,4 +139,38 @@ mod tests { let bank = Bank::new_for_tests(&genesis_config); assert_eq!(slot_leader_at(bank.slot(), &bank).unwrap(), pubkey); } + + #[test] + fn test_leader_span_math() { + // All of the test cases assume a 4 slot leader span and need to be + // adjusted if it changes. + assert_eq!(NUM_CONSECUTIVE_LEADER_SLOTS, 4); + + assert_eq!(first_of_consecutive_leader_slots(0), 0); + assert_eq!(first_of_consecutive_leader_slots(1), 0); + assert_eq!(first_of_consecutive_leader_slots(2), 0); + assert_eq!(first_of_consecutive_leader_slots(3), 0); + assert_eq!(first_of_consecutive_leader_slots(4), 4); + + assert_eq!(last_of_consecutive_leader_slots(0), 3); + assert_eq!(last_of_consecutive_leader_slots(1), 3); + assert_eq!(last_of_consecutive_leader_slots(2), 3); + assert_eq!(last_of_consecutive_leader_slots(3), 3); + assert_eq!(last_of_consecutive_leader_slots(4), 7); + + assert_eq!(leader_slot_index(0), 0); + assert_eq!(leader_slot_index(1), 1); + assert_eq!(leader_slot_index(2), 2); + assert_eq!(leader_slot_index(3), 3); + assert_eq!(leader_slot_index(4), 0); + assert_eq!(leader_slot_index(5), 1); + assert_eq!(leader_slot_index(6), 2); + assert_eq!(leader_slot_index(7), 3); + + assert_eq!(remaining_slots_in_window(0), 4); + assert_eq!(remaining_slots_in_window(1), 3); + assert_eq!(remaining_slots_in_window(2), 2); + assert_eq!(remaining_slots_in_window(3), 1); + assert_eq!(remaining_slots_in_window(4), 4); + } } diff --git a/ledger/src/lib.rs b/ledger/src/lib.rs index ea1619dbb8cc3e..55afc6977ab835 100644 --- a/ledger/src/lib.rs +++ b/ledger/src/lib.rs @@ -26,7 +26,7 @@ pub mod leader_schedule_cache; pub mod leader_schedule_utils; pub mod next_slots_iterator; pub mod rooted_slot_iterator; -pub mod shred; +conditional_mod::conditional_vis_mod!(shred, feature="agave-unstable-api", pub,pub(crate)); mod shredder; pub mod sigverify_shreds; pub mod slot_stats; diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index 9440cc58a20dd0..4468aa172387f2 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -1,3 +1,4 @@ +#![cfg_attr(not(feature = "agave-unstable-api"), allow(dead_code))] //! The `shred` module defines data structures and methods to pull MTU sized data frames from the //! network. There are two types of shreds: data and coding. Data shreds contain entry information //! while coding shreds provide redundancy to protect against dropped network packets (erasures). @@ -49,8 +50,6 @@ //! So, given a) - c), we must restrict data shred's payload length such that the entire coding //! payload can fit into one coding shred / packet. -#[cfg(test)] -pub(crate) use self::shred_code::MAX_CODE_SHREDS_PER_SLOT; pub(crate) use self::{ merkle_tree::{PROOF_ENTRIES_FOR_32_32_BATCH, SIZE_OF_MERKLE_ROOT}, payload::serde_bytes_payload, @@ -65,7 +64,7 @@ pub use { }; use { self::{shred_code::ShredCode, traits::Shred as _}, - crate::blockstore::{self, MAX_DATA_SHREDS_PER_SLOT}, + crate::blockstore::{self}, assert_matches::debug_assert_matches, bitflags::bitflags, num_enum::{IntoPrimitive, TryFromPrimitive}, @@ -85,11 +84,10 @@ use { use {solana_keypair::Keypair, solana_perf::packet::Packet, solana_signer::Signer}; mod common; -mod legacy; pub(crate) mod merkle; mod merkle_tree; mod payload; -pub mod shred_code; +mod shred_code; mod shred_data; mod stats; mod traits; @@ -120,6 +118,12 @@ pub const DATA_SHREDS_PER_FEC_BLOCK: usize = 32; pub const CODING_SHREDS_PER_FEC_BLOCK: usize = 32; pub const SHREDS_PER_FEC_BLOCK: usize = DATA_SHREDS_PER_FEC_BLOCK + CODING_SHREDS_PER_FEC_BLOCK; +/// An upper bound on maximum number of data shreds we can handle in a slot +/// 32K shreds would allow ~320K peak TPS +/// (32K shreds per slot * 4 TX per shred * 2.5 slots per sec) +pub const MAX_DATA_SHREDS_PER_SLOT: usize = 32_768; +pub const MAX_CODE_SHREDS_PER_SLOT: usize = MAX_DATA_SHREDS_PER_SLOT; + // Statically compute the typical data batch size assuming: // 1. 32:32 erasure coding batch // 2. Merkles are chained @@ -135,10 +139,6 @@ pub const fn get_data_shred_bytes_per_batch_typical() -> u64 { (DATA_SHREDS_PER_FEC_BLOCK * capacity) as u64 } -// For legacy tests and benchmarks. -const_assert_eq!(LEGACY_SHRED_DATA_CAPACITY, 1051); -pub const LEGACY_SHRED_DATA_CAPACITY: usize = legacy::ShredData::CAPACITY; - // LAST_SHRED_IN_SLOT also implies DATA_COMPLETE_SHRED. // So it cannot be LAST_SHRED_IN_SLOT if not also DATA_COMPLETE_SHRED. bitflags! { @@ -164,13 +164,15 @@ impl ShredFlags { #[derive(Debug, Error)] pub enum Error { #[error(transparent)] - BincodeError(#[from] bincode::Error), + Bincode(#[from] bincode::Error), #[error(transparent)] - ErasureError(#[from] reed_solomon_erasure::Error), + Erasure(#[from] reed_solomon_erasure::Error), #[error("Invalid data size: {size}, payload: {payload}")] InvalidDataSize { size: u16, payload: usize }, #[error("Invalid deshred set")] InvalidDeshredSet, + #[error("Invalid erasure config")] + InvalidErasureConfig, #[error("Invalid erasure shard index: {0:?}")] InvalidErasureShardIndex(/*headers:*/ Box), #[error("Invalid merkle proof")] @@ -202,7 +204,7 @@ pub enum Error { #[error("Invalid packet size, could not get the shred")] InvalidPacketSize, #[error(transparent)] - IoError(#[from] std::io::Error), + Io(#[from] std::io::Error), #[error("Unknown proof size")] UnknownProofSize, } @@ -221,8 +223,6 @@ pub enum ShredType { #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)] #[serde(into = "u8", try_from = "u8")] enum ShredVariant { - LegacyCode, // 0b0101_1010 - LegacyData, // 0b1010_0101 // proof_size is the number of Merkle proof entries, and is encoded in the // lowest 4 bits of the binary representation. The first 4 bits identify // the shred variant: @@ -277,21 +277,6 @@ pub enum Shred { ShredData(ShredData), } -#[derive(Debug, PartialEq, Eq)] -pub(crate) enum SignedData<'a> { - Chunk(&'a [u8]), // Chunk of payload past signature. - MerkleRoot(Hash), -} - -impl AsRef<[u8]> for SignedData<'_> { - fn as_ref(&self) -> &[u8] { - match self { - Self::Chunk(chunk) => chunk, - Self::MerkleRoot(root) => root.as_ref(), - } - } -} - /// Tuple which uniquely identifies a shred should it exists. #[derive(Clone, Copy, Eq, Debug, Hash, PartialEq)] pub struct ShredId(Slot, /*shred index:*/ u32, ShredType); @@ -398,7 +383,7 @@ impl Shred { dispatch!(fn common_header(&self) -> &ShredCommonHeader); #[cfg(any(test, feature = "dev-context-only-utils"))] dispatch!(fn set_signature(&mut self, signature: Signature)); - dispatch!(fn signed_data(&self) -> Result); + dispatch!(fn signed_data(&self) -> Result); dispatch!(pub fn chained_merkle_root(&self) -> Result); dispatch!(pub(crate) fn retransmitter_signature(&self) -> Result); @@ -416,43 +401,12 @@ impl Shred { packet.meta_mut().size = size; } - // TODO: Should this sanitize output? - pub fn new_from_data( - slot: Slot, - index: u32, - parent_offset: u16, - data: &[u8], - flags: ShredFlags, - reference_tick: u8, - version: u16, - fec_set_index: u32, - ) -> Self { - Self::from(ShredData::new_from_data( - slot, - index, - parent_offset, - data, - flags, - reference_tick, - version, - fec_set_index, - )) - } - pub fn new_from_serialized_shred(shred: T) -> Result where T: AsRef<[u8]> + Into, Payload: From, { Ok(match layout::get_shred_variant(shred.as_ref())? { - ShredVariant::LegacyCode => { - let shred = legacy::ShredCode::from_payload(shred)?; - Self::from(ShredCode::from(shred)) - } - ShredVariant::LegacyData => { - let shred = legacy::ShredData::from_payload(shred)?; - Self::from(ShredData::from(shred)) - } ShredVariant::MerkleCode { .. } => { let shred = merkle::ShredCode::from_payload(shred)?; Self::from(ShredCode::from(shred)) @@ -616,8 +570,6 @@ impl Shred { match self { Self::ShredCode(ShredCode::Merkle(shred)) => shred.retransmitter_signature_offset(), Self::ShredData(ShredData::Merkle(shred)) => shred.retransmitter_signature_offset(), - Self::ShredCode(ShredCode::Legacy(_)) => Err(Error::InvalidShredVariant), - Self::ShredData(ShredData::Legacy(_)) => Err(Error::InvalidShredVariant), } } } @@ -648,9 +600,7 @@ impl TryFrom for merkle::Shred { fn try_from(shred: Shred) -> Result { match shred { - Shred::ShredCode(ShredCode::Legacy(_)) => Err(Error::InvalidShredVariant), Shred::ShredCode(ShredCode::Merkle(shred)) => Ok(Self::ShredCode(shred)), - Shred::ShredData(ShredData::Legacy(_)) => Err(Error::InvalidShredVariant), Shred::ShredData(ShredData::Merkle(shred)) => Ok(Self::ShredData(shred)), } } @@ -660,8 +610,6 @@ impl From for ShredType { #[inline] fn from(shred_variant: ShredVariant) -> Self { match shred_variant { - ShredVariant::LegacyCode => ShredType::Code, - ShredVariant::LegacyData => ShredType::Data, ShredVariant::MerkleCode { .. } => ShredType::Code, ShredVariant::MerkleData { .. } => ShredType::Data, } @@ -672,8 +620,6 @@ impl From for u8 { #[inline] fn from(shred_variant: ShredVariant) -> u8 { match shred_variant { - ShredVariant::LegacyCode => u8::from(ShredType::Code), - ShredVariant::LegacyData => u8::from(ShredType::Data), ShredVariant::MerkleCode { proof_size, chained: false, @@ -722,10 +668,9 @@ impl TryFrom for ShredVariant { type Error = Error; #[inline] fn try_from(shred_variant: u8) -> Result { - if shred_variant == u8::from(ShredType::Code) { - Ok(ShredVariant::LegacyCode) - } else if shred_variant == u8::from(ShredType::Data) { - Ok(ShredVariant::LegacyData) + if shred_variant == u8::from(ShredType::Code) || shred_variant == u8::from(ShredType::Data) + { + Err(Error::InvalidShredVariant) } else { let proof_size = shred_variant & 0x0F; match shred_variant & 0xF0 { @@ -797,6 +742,7 @@ pub fn should_discard_shred<'a, P>( root: Slot, max_slot: Slot, shred_version: u16, + enforce_fixed_fec_set: impl Fn(Slot) -> bool, stats: &mut ShredFetchStats, ) -> bool where @@ -843,9 +789,14 @@ where stats.index_bad_deserialize += 1; return true; }; + let Some(fec_set_index) = layout::get_fec_set_index(shred) else { + stats.fec_set_index_bad_deserialize += 1; + return true; + }; + match ShredType::from(shred_variant) { ShredType::Code => { - if index >= shred_code::MAX_CODE_SHREDS_PER_SLOT as u32 { + if index >= MAX_CODE_SHREDS_PER_SLOT as u32 { stats.index_out_of_bounds += 1; return true; } @@ -853,6 +804,18 @@ where stats.slot_out_of_range += 1; return true; } + + let Ok(erasure_config) = layout::get_erasure_config(shred) else { + stats.erasure_config_bad_deserialize += 1; + return true; + }; + + if !erasure_config.is_fixed() { + stats.misaligned_erasure_config += 1; + if enforce_fixed_fec_set(slot) { + return true; + } + } } ShredType::Data => { if index >= MAX_DATA_SHREDS_PER_SLOT as u32 { @@ -871,12 +834,31 @@ where stats.slot_out_of_range += 1; return true; } + + let Ok(shred_flags) = layout::get_flags(shred) else { + stats.shred_flags_bad_deserialize += 1; + return true; + }; + if shred_flags.contains(ShredFlags::LAST_SHRED_IN_SLOT) + && !check_last_data_shred_index(index) + { + stats.misaligned_last_data_index += 1; + if enforce_fixed_fec_set(slot) { + return true; + } + } + } + } + + if !check_fixed_fec_set(index, fec_set_index) { + stats.misaligned_fec_set += 1; + if enforce_fixed_fec_set(slot) { + return true; } } + match shred_variant { - ShredVariant::LegacyCode - | ShredVariant::LegacyData - | ShredVariant::MerkleCode { chained: false, .. } + ShredVariant::MerkleCode { chained: false, .. } | ShredVariant::MerkleData { chained: false, .. } => { return true; } @@ -892,11 +874,67 @@ where false } +/// Returns true if `index` and `fec_set_index` are valid under the assumption that +/// all erasure sets contain exactly `DATA_SHREDS_PER_FEC_BLOCK` data and coding shreds: +/// - `index` is between `fec_set_index` and `fec_set_index + DATA_SHREDS_PER_FEC_BLOCK` +/// - `fec_set_index` is a multiple of `DATA_SHREDS_PER_FEC_BLOCK` +fn check_fixed_fec_set(index: u32, fec_set_index: u32) -> bool { + index >= fec_set_index + && index < fec_set_index + DATA_SHREDS_PER_FEC_BLOCK as u32 + && fec_set_index % DATA_SHREDS_PER_FEC_BLOCK as u32 == 0 +} + +/// Returns true if `index` of the last data shred is valid under the assumption that +/// all erasure sets contain exactly `DATA_SHREDS_PER_FEC_BLOCK` data and coding shreds: +/// - `index + 1` must be a multiple of `DATA_SHREDS_PER_FEC_BLOCK` +/// +/// Note: this check is critical to verify that the last fec set is sufficiently sized. +/// This currently is checked post insert in `Blockstore::check_last_fec_set`, but in the +/// future it can be solely checked during ingest +fn check_last_data_shred_index(index: u32) -> bool { + (index + 1) % (DATA_SHREDS_PER_FEC_BLOCK as u32) == 0 +} + pub fn max_ticks_per_n_shreds(num_shreds: u64, shred_data_size: Option) -> u64 { let ticks = create_ticks(1, 0, Hash::default()); max_entries_per_n_shred(&ticks[0], num_shreds, shred_data_size) } +// This is used in the integration tests for shredding. +#[cfg(feature = "dev-context-only-utils")] +pub fn max_entries_per_n_shred_last_or_not( + entry: &Entry, + num_shreds: u64, + is_last_in_slot: bool, +) -> u64 { + // Default 32:32 erasure batches yields 64 shreds; log2(64) = 6. + let merkle_variant_unsigned = Some(( + /*proof_size:*/ 6, /*chained:*/ true, /*resigned:*/ false, + )); + let merkle_variant_signed = Some(( + /*proof_size:*/ 6, /*chained:*/ true, /*resigned:*/ true, + )); + + let vec_size = bincode::serialized_size(&vec![entry]).unwrap(); + let entry_size = bincode::serialized_size(entry).unwrap(); + let count_size = vec_size - entry_size; + + if !is_last_in_slot { + // all shreds are unsigned + let shred_data_size = ShredData::capacity(merkle_variant_unsigned).unwrap() as u64; + (shred_data_size * num_shreds - count_size) / entry_size + } else { + // last FEC SET is signed, all others are unsigned + let shred_data_size_unsigned = ShredData::capacity(merkle_variant_unsigned).unwrap() as u64; + let shred_data_size_signed = ShredData::capacity(merkle_variant_signed).unwrap() as u64; + let shreds_per_fec_block = SHREDS_PER_FEC_BLOCK as u64; + (shred_data_size_unsigned * (num_shreds - shreds_per_fec_block) + + shred_data_size_signed * shreds_per_fec_block + - count_size) + / entry_size + } +} + pub fn max_entries_per_n_shred( entry: &Entry, num_shreds: u64, @@ -955,48 +993,48 @@ mod tests { rand_chacha::{rand_core::SeedableRng, ChaChaRng}, rayon::ThreadPoolBuilder, solana_keypair::keypair_from_seed, - solana_signer::Signer, std::io::{Cursor, Seek, SeekFrom, Write}, - test_case::{test_case, test_matrix}, + test_case::test_case, }; const SIZE_OF_SHRED_INDEX: usize = 4; const SIZE_OF_SHRED_SLOT: usize = 8; const SIZE_OF_SHRED_VARIANT: usize = 1; + const SIZE_OF_VERSION: usize = 2; + const SIZE_OF_FEC_SET_INDEX: usize = 4; + const OFFSET_OF_SHRED_VARIANT: usize = SIZE_OF_SIGNATURE; const OFFSET_OF_SHRED_SLOT: usize = SIZE_OF_SIGNATURE + SIZE_OF_SHRED_VARIANT; const OFFSET_OF_SHRED_INDEX: usize = OFFSET_OF_SHRED_SLOT + SIZE_OF_SHRED_SLOT; - const OFFSET_OF_SHRED_VARIANT: usize = SIZE_OF_SIGNATURE; - - fn bs58_decode>(data: T) -> Vec { - bs58::decode(data).into_vec().unwrap() - } + const OFFSET_OF_FEC_SET_INDEX: usize = + OFFSET_OF_SHRED_INDEX + SIZE_OF_SHRED_INDEX + SIZE_OF_VERSION; + const OFFSET_OF_NUM_DATA: usize = OFFSET_OF_FEC_SET_INDEX + SIZE_OF_FEC_SET_INDEX; pub(super) fn make_merkle_shreds_for_tests( rng: &mut R, slot: Slot, data_size: usize, - chained: bool, is_last_in_slot: bool, ) -> Result, Error> { let thread_pool = ThreadPoolBuilder::new().num_threads(2).build().unwrap(); - let chained_merkle_root = chained.then(|| Hash::new_from_array(rng.gen())); + let chained_merkle_root = Hash::new_from_array(rng.gen()); let parent_offset = rng.gen_range(1..=u16::try_from(slot).unwrap_or(u16::MAX)); let parent_slot = slot.checked_sub(u64::from(parent_offset)).unwrap(); let mut data = vec![0u8; data_size]; + let fec_set_index = rng.gen_range(0..21) * DATA_SHREDS_PER_FEC_BLOCK as u32; rng.fill(&mut data[..]); merkle::make_shreds_from_data( &thread_pool, &Keypair::new(), - chained_merkle_root, + Some(chained_merkle_root), &data[..], slot, parent_slot, rng.gen(), // shred_version rng.gen_range(1..64), // reference_tick is_last_in_slot, - rng.gen_range(0..671), // next_shred_index - rng.gen_range(0..781), // next_code_index + fec_set_index, // next_shred_index + fec_set_index, // next_code_index &ReedSolomonCache::default(), &mut ProcessShredsStats::default(), ) @@ -1006,7 +1044,11 @@ mod tests { fn test_shred_constants() { let common_header = ShredCommonHeader { signature: Signature::default(), - shred_variant: ShredVariant::LegacyCode, + shred_variant: ShredVariant::MerkleCode { + proof_size: 0, + chained: true, + resigned: false, + }, slot: Slot::MAX, index: u32::MAX, version: u16::MAX, @@ -1099,17 +1141,13 @@ mod tests { #[test] fn test_invalid_parent_offset() { - let shred = Shred::new_from_data(10, 0, 1000, &[1, 2, 3], ShredFlags::empty(), 0, 1, 0); + let keypair = Keypair::new(); + let shred = Shredder::single_shred_for_tests(10, &keypair); + assert_matches!(shred.parent(), Ok(9)); let mut packet = Packet::default(); shred.copy_to_packet(&mut packet); + wire::corrupt_and_set_parent_offset(packet.buffer_mut(), 1000); let shred_res = Shred::new_from_serialized_shred(packet.data(..).unwrap().to_vec()); - assert_matches!( - shred.parent(), - Err(Error::InvalidParentOffset { - slot: 10, - parent_offset: 1000 - }) - ); assert_matches!( shred_res, Err(Error::InvalidParentOffset { @@ -1129,7 +1167,6 @@ mod tests { &mut rng, slot, 1200 * 5, // data_size - true, // chained is_last_in_slot, ) .unwrap(); @@ -1155,6 +1192,7 @@ mod tests { root, max_slot, shred_version, + |_| true, &mut stats )); } @@ -1167,6 +1205,7 @@ mod tests { root, max_slot, shred_version, + |_| true, &mut stats )); assert_eq!(stats.index_overrun, 1); @@ -1177,6 +1216,7 @@ mod tests { root, max_slot, shred_version, + |_| true, &mut stats )); assert_eq!(stats.index_overrun, 2); @@ -1187,6 +1227,7 @@ mod tests { root, max_slot, shred_version, + |_| true, &mut stats )); assert_eq!(stats.index_overrun, 3); @@ -1197,6 +1238,7 @@ mod tests { root, max_slot, shred_version, + |_| true, &mut stats )); assert_eq!(stats.index_overrun, 4); @@ -1207,6 +1249,7 @@ mod tests { root, max_slot, shred_version, + |_| true, &mut stats )); assert_eq!(stats.index_overrun, 5); @@ -1218,6 +1261,7 @@ mod tests { root, max_slot, shred_version.wrapping_add(1), + |_| true, &mut stats )); assert_eq!(stats.shred_version_mismatch, 1); @@ -1229,6 +1273,7 @@ mod tests { parent_slot + 1, // root max_slot, shred_version, + |_| true, &mut stats )); assert_eq!(stats.slot_out_of_range, 1); @@ -1250,6 +1295,7 @@ mod tests { root, max_slot, shred_version, + |_| true, &mut stats )); assert_eq!(stats.slot_out_of_range, 1); @@ -1271,6 +1317,7 @@ mod tests { root, max_slot, shred_version, + |_| true, &mut stats )); assert_eq!(stats.bad_parent_offset, 1); @@ -1291,6 +1338,7 @@ mod tests { root, max_slot, shred_version, + |_| true, &mut stats )); assert_eq!(stats.index_out_of_bounds, 1); @@ -1307,6 +1355,7 @@ mod tests { root, max_slot, shred_version, + |_| true, &mut stats )); } @@ -1317,6 +1366,7 @@ mod tests { root, max_slot, shred_version.wrapping_add(1), + |_| true, &mut stats )); assert_eq!(stats.shred_version_mismatch, 1); @@ -1328,6 +1378,7 @@ mod tests { slot, // root max_slot, shred_version, + |_| true, &mut stats )); assert_eq!(stats.slot_out_of_range, 1); @@ -1348,12 +1399,173 @@ mod tests { root, max_slot, shred_version, + |_| true, &mut stats )); assert_eq!(stats.index_out_of_bounds, 1); } } + #[test_case(true; "enforce_fixed_fec_set")] + #[test_case(false ; "do_not_enforce_fixed_fec_set")] + fn test_should_discard_shred_fec_set_checks(enforce_fixed_fec_set: bool) { + solana_logger::setup(); + let mut rng = rand::thread_rng(); + let slot = 18_291; + let shreds = make_merkle_shreds_for_tests( + &mut rng, + slot, + 1200 * 5, // data_size + false, // is_last_in_slot + ) + .unwrap(); + let shreds: Vec<_> = shreds.into_iter().map(Shred::from).collect(); + assert_eq!(shreds.iter().map(Shred::fec_set_index).dedup().count(), 1); + + assert_matches!(shreds[0].shred_type(), ShredType::Data); + let parent_slot = shreds[0].parent().unwrap(); + let shred_version = shreds[0].common_header().version; + let root = rng.gen_range(0..parent_slot); + let max_slot = slot + rng.gen_range(1..65536); + + // fec_set_index not multiple of 32 + { + let mut packet = Packet::default(); + shreds[0].copy_to_packet(&mut packet); + + let bad_fec_set_index = 5u32; + { + let mut cursor = Cursor::new(packet.buffer_mut()); + cursor + .seek(SeekFrom::Start(OFFSET_OF_FEC_SET_INDEX as u64)) + .unwrap(); + cursor.write_all(&bad_fec_set_index.to_le_bytes()).unwrap(); + } + + let mut stats = ShredFetchStats::default(); + let should_discard = should_discard_shred( + &packet, + root, + max_slot, + shred_version, + |_| enforce_fixed_fec_set, + &mut stats, + ); + assert_eq!(should_discard, enforce_fixed_fec_set); + assert_eq!(stats.misaligned_fec_set, 1); + } + + // index not in range [fec_set_index, fec_set_index + 32) + { + let mut packet = Packet::default(); + shreds[0].copy_to_packet(&mut packet); + + let fec_set_index = 64u32; // Multiple of 32 + let bad_index = 100u32; // Outside [64, 96) + { + let mut cursor = Cursor::new(packet.buffer_mut()); + cursor + .seek(SeekFrom::Start(OFFSET_OF_SHRED_INDEX as u64)) + .unwrap(); + cursor.write_all(&bad_index.to_le_bytes()).unwrap(); + cursor + .seek(SeekFrom::Start(OFFSET_OF_FEC_SET_INDEX as u64)) + .unwrap(); + cursor.write_all(&fec_set_index.to_le_bytes()).unwrap(); + } + + let mut stats = ShredFetchStats::default(); + let should_discard = should_discard_shred( + &packet, + root, + max_slot, + shred_version, + |_| enforce_fixed_fec_set, + &mut stats, + ); + assert_eq!(should_discard, enforce_fixed_fec_set); + assert_eq!(stats.misaligned_fec_set, 1); + } + + // bad erasure config 16:32 + { + let code_shred = shreds + .iter() + .find(|s| s.shred_type() == ShredType::Code) + .unwrap(); + let mut packet = Packet::default(); + code_shred.copy_to_packet(&mut packet); + + let bad_num_data = 16u16; + { + let mut cursor = Cursor::new(packet.buffer_mut()); + cursor + .seek(SeekFrom::Start(OFFSET_OF_NUM_DATA as u64)) + .unwrap(); + cursor.write_all(&bad_num_data.to_le_bytes()).unwrap(); + } + + let mut stats = ShredFetchStats::default(); + let should_discard = should_discard_shred( + &packet, + root, + max_slot, + shred_version, + |_| enforce_fixed_fec_set, + &mut stats, + ); + assert_eq!(should_discard, enforce_fixed_fec_set); + assert_eq!(stats.misaligned_erasure_config, 1); + } + + // data shred with LAST_SHRED_IN_SLOT flag on shred 30 + let shreds = make_merkle_shreds_for_tests( + &mut rng, + slot, + 1200 * 5, // data_size + true, // is_last_in_slot + ) + .unwrap(); + let shreds: Vec<_> = shreds.into_iter().map(Shred::from).collect(); + let parent_slot = shreds[0].parent().unwrap(); + let shred_version = shreds[0].common_header().version; + let root = rng.gen_range(0..parent_slot); + let data_shreds: Vec<_> = shreds + .iter() + .filter(|s| s.shred_type() == ShredType::Data) + .collect(); + let last_data_shred = data_shreds.last().unwrap(); + assert!(last_data_shred.last_in_slot()); + let mut packet = Packet::default(); + last_data_shred.copy_to_packet(&mut packet); + + let bad_last_index = 30u32; + let fec_set_index = 0u32; + { + let mut cursor = Cursor::new(packet.buffer_mut()); + cursor + .seek(SeekFrom::Start(OFFSET_OF_SHRED_INDEX as u64)) + .unwrap(); + cursor.write_all(&bad_last_index.to_le_bytes()).unwrap(); + cursor + .seek(SeekFrom::Start(OFFSET_OF_FEC_SET_INDEX as u64)) + .unwrap(); + cursor.write_all(&fec_set_index.to_le_bytes()).unwrap(); + } + + let mut stats = ShredFetchStats::default(); + let should_discard = should_discard_shred( + &packet, + root, + max_slot, + shred_version, + |_| enforce_fixed_fec_set, + &mut stats, + ); + assert_eq!(should_discard, enforce_fixed_fec_set); + assert_eq!(stats.misaligned_last_data_index, 1); + } + // Asserts that ShredType is backward compatible with u8. #[test] fn test_shred_type_compat() { @@ -1392,44 +1604,21 @@ mod tests { assert_matches!(ShredVariant::try_from(0b1010_0000), Err(_)); assert_matches!(bincode::deserialize::(&[0b0101_0000]), Err(_)); assert_matches!(bincode::deserialize::(&[0b1010_0000]), Err(_)); - // Legacy coding shred. - assert_eq!(u8::from(ShredVariant::LegacyCode), 0b0101_1010); - assert_eq!(ShredType::from(ShredVariant::LegacyCode), ShredType::Code); - assert_matches!( - ShredVariant::try_from(0b0101_1010), - Ok(ShredVariant::LegacyCode) - ); - let buf = bincode::serialize(&ShredVariant::LegacyCode).unwrap(); - assert_eq!(buf, vec![0b0101_1010]); - assert_matches!( - bincode::deserialize::(&[0b0101_1010]), - Ok(ShredVariant::LegacyCode) - ); - // Legacy data shred. - assert_eq!(u8::from(ShredVariant::LegacyData), 0b1010_0101); - assert_eq!(ShredType::from(ShredVariant::LegacyData), ShredType::Data); - assert_matches!( - ShredVariant::try_from(0b1010_0101), - Ok(ShredVariant::LegacyData) - ); - let buf = bincode::serialize(&ShredVariant::LegacyData).unwrap(); - assert_eq!(buf, vec![0b1010_0101]); - assert_matches!( - bincode::deserialize::(&[0b1010_0101]), - Ok(ShredVariant::LegacyData) - ); + assert_matches!(ShredVariant::try_from(0b0101_1010), Err(_)); + assert_matches!(bincode::deserialize::(&[0b0101_1010]), Err(_)); + assert_matches!(ShredVariant::try_from(0b1010_0101), Err(_)); + assert_matches!(bincode::deserialize::(&[0b1010_0101]), Err(_)); } - #[test_case(false, false, 0b0100_0000)] - #[test_case(true, false, 0b0110_0000)] - #[test_case(true, true, 0b0111_0000)] - fn test_shred_variant_compat_merkle_code(chained: bool, resigned: bool, byte: u8) { + #[test_case(false, 0b0110_0000)] + #[test_case(true, 0b0111_0000)] + fn test_shred_variant_compat_merkle_code(resigned: bool, byte: u8) { for proof_size in 0..=15u8 { let byte = byte | proof_size; assert_eq!( u8::from(ShredVariant::MerkleCode { proof_size, - chained, + chained: true, resigned, }), byte @@ -1437,7 +1626,7 @@ mod tests { assert_eq!( ShredType::from(ShredVariant::MerkleCode { proof_size, - chained, + chained: true, resigned, }), ShredType::Code @@ -1446,13 +1635,13 @@ mod tests { ShredVariant::try_from(byte).unwrap(), ShredVariant::MerkleCode { proof_size, - chained, + chained: true, resigned, }, ); let buf = bincode::serialize(&ShredVariant::MerkleCode { proof_size, - chained, + chained: true, resigned, }) .unwrap(); @@ -1461,23 +1650,22 @@ mod tests { bincode::deserialize::(&[byte]).unwrap(), ShredVariant::MerkleCode { proof_size, - chained, + chained: true, resigned, } ); } } - #[test_case(false, false, 0b1000_0000)] - #[test_case(true, false, 0b1001_0000)] - #[test_case(true, true, 0b1011_0000)] - fn test_shred_variant_compat_merkle_data(chained: bool, resigned: bool, byte: u8) { + #[test_case(false, 0b1001_0000)] + #[test_case(true, 0b1011_0000)] + fn test_shred_variant_compat_merkle_data(resigned: bool, byte: u8) { for proof_size in 0..=15u8 { let byte = byte | proof_size; assert_eq!( u8::from(ShredVariant::MerkleData { proof_size, - chained, + chained: true, resigned, }), byte @@ -1485,7 +1673,7 @@ mod tests { assert_eq!( ShredType::from(ShredVariant::MerkleData { proof_size, - chained, + chained: true, resigned, }), ShredType::Data @@ -1494,13 +1682,13 @@ mod tests { ShredVariant::try_from(byte).unwrap(), ShredVariant::MerkleData { proof_size, - chained, + chained: true, resigned } ); let buf = bincode::serialize(&ShredVariant::MerkleData { proof_size, - chained, + chained: true, resigned, }) .unwrap(); @@ -1509,7 +1697,7 @@ mod tests { bincode::deserialize::(&[byte]).unwrap(), ShredVariant::MerkleData { proof_size, - chained, + chained: true, resigned } ); @@ -1572,79 +1760,120 @@ mod tests { #[test] fn test_serde_compat_shred_data() { - const SEED: &str = "6qG9NGWEtoTugS4Zgs46u8zTccEJuRHtrNMiUayLHCxt"; - const PAYLOAD: &str = "hNX8YgJCQwSFGJkZ6qZLiepwPjpctC9UCsMD1SNNQurBXv\ - rm7KKfLmPRMM9CpWHt6MsJuEWpDXLGwH9qdziJzGKhBMfYH63avcchjdaUiMqzVip7cUD\ - kqZ9zZJMrHCCUDnxxKMupsJWKroUSjKeo7hrug2KfHah85VckXpRna4R9QpH7tf2WVBTD\ - M4m3EerctsEQs8eZaTRxzTVkhtJYdNf74KZbH58dc3Yn2qUxF1mexWoPS6L5oZBatx"; + // bytes of a serialized merkle data shred + const PAYLOAD: &str = "aX2ovF3sZRfd6HyqMow9kkrtL3MyJd52m7gvuSjcvA4qayXZ\ + cVPhjURcs4JX86YQM8wVrKXqdneqdEUJwBWhFrxSkegDSov6NQoK89SzZi9auEXHHr35dmN\ + 4zQbxuNdPjKM2K7b7WKRWaHyoMKQfG9jDbJGcWqwVkAxBmUXZQKryHvAqyNdBuRTdWrMtPK\ + DiJWhqVWTmokpyGNceL7mqVr3VrLby6dEuiEUCBHCkhbsXBjfpFZk4yRoSKosb7BViTWWdt\ + pWd7NrbDSiE97sBppEU1nWTPaVQh3bu91x8dEoYk696k532MxnhRLcKeL4XzG6P2HzypAck\ + JdXiRJDn5E3woA8aiPojqdN9ScthJ8yXq1h4HhvzTRWkRxRBpJL8HEYPBcshwuMLDZ9iBsW\ + SFZLmj5v1xH3kDnMuNYJg6Dau6PKHnZyD15tTyFtFtMaXaBc35RqYhsM7s8JuQ9tJ1UfFwd\ + khHa1wdrmTWGcvq9DDmALuTtejH1ccoW43GiYSs1TmByJWjRtupvLzMRifZZ7meaGbUBgHU\ + kA6t1VN3akoZ9BhdX561KpFGABxTU4NxyFqztEy1EB5EJYtTHwtbJQb1NmNMwKFkazXkn1o\ + uKK6drH5y19roH3mMo2JykapbvzYPDBSXUwKQWe1RqSvogapwPxm1EzSRDeXNDP6EYUJJjj\ + TAnckNatpT5UZDz4EhpaSbUzd9b5ztqsdPp9HxeBTm412GopAXKN5iSXSPS2WvrEdnANFD7\ + tRV3a6PM2SfwpF6eFM5J7xXGJSoPm5TWJSPBMbxttxVFUETSRrBubEsd24aymYZZePJtHr7\ + Q8S1deygcyXH5WhhYAmR23hNPv3nUUHe8iwJfaFg73Ncjr8fQBVjwePEy9JKT5jNG5sm87q\ + e2RrHEWEwkNKnNgUknoVMbL7y3wmGFpP8VoKTgP51EjMDz7JTxnVsZeRsSp29STteGKbq4i\ + wiC5EmMS5K86CAJ86FYt1kXXHJBSw4D79wAMgxRDDycp5PgdowdLxAbwySgpmwdfnxnSD4h\ + Y8mo4jLGWokP1mGdgjnPmtMbzndiQCLPjpUcbZoVc6SQrTDCufupkJhy1ewo64yA1db6T2T\ + ASTWSHJkjzaWt7QtFfnBo8WoXQrNKw5pyKAQsmP7n6r1SVD7tASfcZAjfaFHxkVvMpKwTQF\ + dy9WHxREeCPK3yeN7ACT75RgRuRT1shC1PRCuAu4EFGnBmr3nWuDrYNCG5WrWuW6RRoMyB3\ + YaXqjYMXRUVuwb5h2PBP9euBb96Ntung8ihWXa2mbKMYMtmaoYCDhYYrFYszYfdgQH68JYz\ + AXZvjFH1SxCETfiXAWGD1aYDa33rXZLcLVx637igoydr77qmzo5YozRQnuXUiJ19PScLWic\ + 8jWeVmQ6Mm7BLoGhVPyYbJBeyX5HRwh8CNeLK2ekmhFz9MypB1rM2PXUfcnr2MXS9WRK8bh\ + sy47awNdApPdN3RxmuyPLnvmN6FsG5fUNqF8rsz9KUiJh9C4ziYf6NSZvVG2c1KFsQRyFrS\ + BzyjqqxBrH1xereV9YNr1gNamFjhZTncpGcPQf9oAoA4LQeSAZXR1dMtfktCs1fFWVbA67F\ + dQ1GrpZVGTsZCbuw7Tspns8WoL158AdS7"; + let mut rng = { - let seed = <[u8; 32]>::try_from(bs58_decode(SEED)).unwrap(); + let seed = [1u8; 32]; ChaChaRng::from_seed(seed) }; - let mut data = [0u8; legacy::ShredData::CAPACITY]; - rng.fill(&mut data[..]); - let mut seed = [0u8; Keypair::SECRET_KEY_LENGTH]; rng.fill(&mut seed[..]); + let mut data = [0u8; 4096]; + rng.fill(&mut data[..]); let keypair = keypair_from_seed(&seed).unwrap(); - let mut shred = Shred::new_from_data( - 141939602, // slot - 28685, // index - 36390, // parent_offset - &data, // data - ShredFlags::LAST_SHRED_IN_SLOT, - 37, // reference_tick - 45189, // version - 28657, // fec_set_index - ); + let slot = 142076266; + let shredder = Shredder::new(slot, slot.saturating_sub(1), 0, 42).unwrap(); + let reed_solomon_cache = ReedSolomonCache::default(); + let mut shred = shredder + .make_shreds_from_data_slice( + &keypair, + &data, + false, + Hash::default(), + 64, + 64, + &reed_solomon_cache, + &mut ProcessShredsStats::default(), + ) + .unwrap() + .next() + .unwrap(); shred.sign(&keypair); assert!(shred.verify(&keypair.pubkey())); assert_matches!(shred.sanitize(), Ok(())); - let mut payload = bs58_decode(PAYLOAD); - payload.extend({ - let skip = payload.len() - SIZE_OF_DATA_SHRED_HEADERS; - data.iter().skip(skip).copied() - }); + let payload = bs58::decode(PAYLOAD).into_vec().unwrap(); let mut packet = Packet::default(); packet.buffer_mut()[..payload.len()].copy_from_slice(&payload); packet.meta_mut().size = payload.len(); assert_eq!(shred.bytes_to_store(), payload); - assert_eq!(shred, Shred::new_from_serialized_shred(payload).unwrap()); + assert_eq!( + shred, + Shred::new_from_serialized_shred(payload.to_vec()).unwrap() + ); verify_shred_layout(&shred, &packet); } #[test] fn test_serde_compat_shred_data_empty() { - const SEED: &str = "E3M5hm8yAEB7iPhQxFypAkLqxNeZCTuGBDMa8Jdrghoo"; - const PAYLOAD: &str = "nRNFVBEsV9FEM5KfmsCXJsgELRSkCV55drTavdy5aZPnsp\ - B8WvsgY99ZuNHDnwkrqe6Lx7ARVmercwugR5HwDcLA9ivKMypk9PNucDPLs67TXWy6k9R\ - ozKmy"; + // bytes of a serialized merkle data shred + const PAYLOAD: &str = "HV7qJBe3jCM8aRd4HAXJnJzyDvNYDYsPjjaaK2tTFTxJU2Qj\ + 7i87e45TzCg2Vv4rrcBznfs8212svH8aXsM2WYDPst43KyDz99FesBZ8aasxhkUgHGg3Smc\ + Pa7opSARcYBpQAG2UHRYFmoPsj3hXADsX5C8JBM3jyHLtbQ78CH11J2dh7J4ps8JxCcUsq7\ + E7PVs7NgFku54c8gBuhBuAMykvvSyhGRjyXCL17feubvA8WQyMJz27eXk8hE6LGs98ucsV9\ + pScMuXVbAL4rT2cW9gN77QP4mBohJ8miWbMYbi7eLxiXJ9nA6i7XZd32GKscf3Ln8PK7NFT\ + JpKxinptw2vd3MuVQRQjNuyZLTEjJSiaxoR1mhSKet27PCTkPWRAxPfMCvLNY2mtdixnFfk\ + BdKj1nrwHcQYqNnKHjg3axRhx58QQ7VX5LSNGRtLByZRFMq9wCy7zz7HiMKvMZzddqixHcx\ + EFeaM6YFh2sfmW3AKz4pS9s6XL3mAD9MtFvRJUXupNK9P3XtA7BGU8Z8AyYnVi4wxfaWoSd\ + 22nhDmMuHTRNHgUDxCEPHfE5enuR3PG4q1DGpVyV9rM6678qFG3cUKdTbJw85uFAkedEhAn\ + RsU5u7H2DUTMrZ3AsLGuRmRoLZdxcFGe9GJmWY3WYEBJsdrJUVTnQVDsgvcc7HiFCoGBmGG\ + KPS19ncA7Ynie1iokXCjwZNaQf3fpUMksGzkqjfYiFfGQDMNTsPUdcHFAae5Nmh3p8bduG6\ + TNHn3A4LoSMX8wwM6Sn4XL3fFLHrkWTV8CJfg67AoqpzgGiWQNdeK2HRABGRbamUxXSzWhA\ + tJ7yEPrh9tKTX851mpPjrFHxpu68xDL3t5nd18mtALJ5n5gmTsXMwUxpt9GfGrZyHXteZfH\ + jaMLmSqvmDAFH1xADU2SZucRDbAWsuaWUMwaPXCNtiozgJ6uRnyxmwBhikChgtxDSZmRALA\ + uYwEnCE8uj4NWvryeVpzfy6m9tqYutCkRsbNodaGmBZ2KGtg4iaQjj4iCK9jKKAxpYVnxbr\ + n2jtFezUTsR9dqFh94c9Aa4NdPgkr19hcqabqBZyzan3xP3Jvs78Z5uqkSUVtXP3t3b5ozd\ + qjYRMkgwsgGpNqcmBLGANiPXrQ8SseoNsCs3Xtv6Vf7oGg1St3teXSrtWMbsouK3uF8DPgn\ + S14yUtrs8cyXX53QMCEuY1wKcoQZwSWqL86FTvZUA3vT6SYvjVKYxAXwNP1ouKdwtePfwdH\ + wdM37RT9SMht4BLEXutSCcBys1K3pTWWwqGB87A26apuG1TiqeEugv1FrjprEKyt84S3FgM\ + 5eJfdN5NDArvw9bBR81UoYmyZgX5pEY6gNg2xw28Gd7gH9TVe5Y31iggni2oJ3GuBj3R7Ma\ + umdo8rE1S4tBWVGsXikv9KFDtMmT2sMmeFuAwDbUZEGsBCAfK7EQpKcYSv7KajtgLpqi6JW\ + RP8nBR7FkKR9qv5khhiBLuRzfSxwtADXZknFRU6bh8Ba6JmLhjhqkCYETizXmZVrjvy4gLZ\ + we5YHZW5uhbthAzfcLRxuTxnPbyW7LehgwkYde64b8PzqYepYUtxqFHuJSwddis1VuoA2Lb\ + M3SeyTe262Q7gUiPEjwQdRXKwUAgGrxVu"; let mut rng = { - let seed = <[u8; 32]>::try_from(bs58_decode(SEED)).unwrap(); + let seed = [1u8; 32]; ChaChaRng::from_seed(seed) }; let mut seed = [0u8; Keypair::SECRET_KEY_LENGTH]; rng.fill(&mut seed[..]); let keypair = keypair_from_seed(&seed).unwrap(); - let mut shred = Shred::new_from_data( - 142076266, // slot - 21443, // index - 51279, // parent_offset - &[], // data - ShredFlags::DATA_COMPLETE_SHRED, - 49, // reference_tick - 59445, // version - 21414, // fec_set_index - ); + let mut shred = Shredder::single_shred_for_tests(142076266, &keypair); shred.sign(&keypair); assert!(shred.verify(&keypair.pubkey())); assert_matches!(shred.sanitize(), Ok(())); - let payload = bs58_decode(PAYLOAD); + let payload = bs58::decode(PAYLOAD).into_vec().unwrap(); let mut packet = Packet::default(); packet.buffer_mut()[..payload.len()].copy_from_slice(&payload); packet.meta_mut().size = payload.len(); assert_eq!(shred.bytes_to_store(), payload); - assert_eq!(shred, Shred::new_from_serialized_shred(payload).unwrap()); + assert_eq!( + shred, + Shred::new_from_serialized_shred(payload.to_vec()).unwrap() + ); verify_shred_layout(&shred, &packet); } @@ -1704,41 +1933,41 @@ mod tests { assert!(flags.contains(ShredFlags::LAST_SHRED_IN_SLOT)); } - #[test_matrix( - [true, false], - [true, false] - )] - fn test_is_shred_duplicate(chained: bool, is_last_in_slot: bool) { + #[test_case(true)] + #[test_case(false)] + fn test_is_shred_duplicate(is_last_in_slot: bool) { fn fill_retransmitter_signature( rng: &mut R, shred: Shred, - chained: bool, is_last_in_slot: bool, ) -> Shred { let mut shred = shred.into_payload(); let mut signature = [0u8; SIGNATURE_BYTES]; rng.fill(&mut signature[..]); - let out = layout::set_retransmitter_signature(&mut shred, &Signature::from(signature)); - if chained && is_last_in_slot { + let out = layout::set_retransmitter_signature( + &mut shred.as_mut(), + &Signature::from(signature), + ); + if is_last_in_slot { assert_matches!(out, Ok(())); } else { assert_matches!(out, Err(Error::InvalidShredVariant)); } Shred::new_from_serialized_shred(shred).unwrap() } + let mut rng = rand::thread_rng(); let slot = 285_376_049 + rng.gen_range(0..100_000); let shreds: Vec<_> = make_merkle_shreds_for_tests( &mut rng, slot, 1200 * 5, // data_size - chained, is_last_in_slot, ) .unwrap() .into_iter() .map(Shred::from) - .map(|shred| fill_retransmitter_signature(&mut rng, shred, chained, is_last_in_slot)) + .map(|shred| fill_retransmitter_signature(&mut rng, shred, is_last_in_slot)) .collect(); { let num_data_shreds = shreds.iter().filter(|shred| shred.is_data()).count(); @@ -1755,9 +1984,8 @@ mod tests { } // Different retransmitter signature does not make shreds duplicate. for shred in &shreds { - let other = - fill_retransmitter_signature(&mut rng, shred.clone(), chained, is_last_in_slot); - if chained && is_last_in_slot { + let other = fill_retransmitter_signature(&mut rng, shred.clone(), is_last_in_slot); + if is_last_in_slot { assert_ne!(shred.payload(), other.payload()); } assert!(!shred.is_shred_duplicate(&other)); @@ -1767,7 +1995,7 @@ mod tests { // (ignoring retransmitter signature) are duplicate. for shred in &shreds { let mut other = shred.payload().clone(); - other[90] = other[90].wrapping_add(1); + other.as_mut()[90] = other[90].wrapping_add(1); let other = Shred::new_from_serialized_shred(other).unwrap(); assert_ne!(shred.payload(), other.payload()); assert_eq!( diff --git a/ledger/src/shred/common.rs b/ledger/src/shred/common.rs index 769faa484b1323..4a4e26c4b93326 100644 --- a/ledger/src/shred/common.rs +++ b/ledger/src/shred/common.rs @@ -3,7 +3,6 @@ macro_rules! dispatch { #[inline] $vis fn $name(&self $(, $arg:$ty)?) $(-> $out)? { match self { - Self::Legacy(shred) => shred.$name($($arg, )?), Self::Merkle(shred) => shred.$name($($arg, )?), } } @@ -12,7 +11,6 @@ macro_rules! dispatch { #[inline] $vis fn $name(self $(, $arg:$ty)?) $(-> $out)? { match self { - Self::Legacy(shred) => shred.$name($($arg, )?), Self::Merkle(shred) => shred.$name($($arg, )?), } } @@ -21,7 +19,6 @@ macro_rules! dispatch { #[inline] $vis fn $name(&mut self $(, $arg:$ty)?) $(-> $out)? { match self { - Self::Legacy(shred) => shred.$name($($arg, )?), Self::Merkle(shred) => shred.$name($($arg, )?), } } @@ -47,7 +44,7 @@ macro_rules! impl_shred_common { #[inline] fn set_signature(&mut self, signature: Signature) { - self.payload[..SIZE_OF_SIGNATURE].copy_from_slice(signature.as_ref()); + self.payload.as_mut()[..SIZE_OF_SIGNATURE].copy_from_slice(signature.as_ref()); self.common_header.signature = signature; } }; diff --git a/ledger/src/shred/legacy.rs b/ledger/src/shred/legacy.rs deleted file mode 100644 index e2d20ae0327364..00000000000000 --- a/ledger/src/shred/legacy.rs +++ /dev/null @@ -1,271 +0,0 @@ -use { - crate::shred::{ - self, - common::impl_shred_common, - payload::Payload, - shred_code, shred_data, - traits::{Shred, ShredCode as ShredCodeTrait, ShredData as ShredDataTrait}, - CodingShredHeader, DataShredHeader, Error, ShredCommonHeader, ShredFlags, ShredVariant, - SIZE_OF_CODING_SHRED_HEADERS, SIZE_OF_DATA_SHRED_HEADERS, SIZE_OF_SIGNATURE, - }, - assert_matches::debug_assert_matches, - solana_clock::Slot, - solana_perf::packet::deserialize_from_with_limit, - solana_signature::Signature, - static_assertions::const_assert_eq, - std::{io::Cursor, ops::Range}, -}; - -// All payload including any zero paddings are signed. -// Code and data shreds have the same payload size. -pub(super) const SIGNED_MESSAGE_OFFSETS: Range = - SIZE_OF_SIGNATURE..ShredData::SIZE_OF_PAYLOAD; -const_assert_eq!(ShredData::SIZE_OF_PAYLOAD, ShredCode::SIZE_OF_PAYLOAD); -const_assert_eq!(ShredData::SIZE_OF_PAYLOAD, 1228); -const_assert_eq!(ShredData::CAPACITY, 1051); - -// ShredCode::SIZE_OF_HEADERS bytes at the end of data shreds -// is never used and is not part of erasure coding. -const_assert_eq!(SIZE_OF_ERASURE_ENCODED_SLICE, 1139); -pub(super) const SIZE_OF_ERASURE_ENCODED_SLICE: usize = - ShredCode::SIZE_OF_PAYLOAD - ShredCode::SIZE_OF_HEADERS; - -// Layout: {common, data} headers | data | zero padding -// Everything up to ShredCode::SIZE_OF_HEADERS bytes at the end (which is part -// of zero padding) is erasure coded. -// All payload past signature, including the entirety of zero paddings, is -// signed. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct ShredData { - common_header: ShredCommonHeader, - data_header: DataShredHeader, - payload: Payload, -} - -// Layout: {common, coding} headers | erasure coded shard -// All payload past signature is singed. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct ShredCode { - common_header: ShredCommonHeader, - coding_header: CodingShredHeader, - payload: Payload, -} - -impl<'a> Shred<'a> for ShredData { - type SignedData = &'a [u8]; - - impl_shred_common!(); - // Legacy data shreds are always zero padded and - // the same size as coding shreds. - const SIZE_OF_PAYLOAD: usize = shred_code::ShredCode::SIZE_OF_PAYLOAD; - const SIZE_OF_HEADERS: usize = SIZE_OF_DATA_SHRED_HEADERS; - - fn from_payload(payload: T) -> Result - where - Payload: From, - { - let mut payload = Payload::from(payload); - let mut cursor = Cursor::new(&payload[..]); - let common_header: ShredCommonHeader = deserialize_from_with_limit(&mut cursor)?; - if common_header.shred_variant != ShredVariant::LegacyData { - return Err(Error::InvalidShredVariant); - } - let data_header = deserialize_from_with_limit(&mut cursor)?; - // Shreds stored to blockstore may have trailing zeros trimmed. - // Repair packets have nonce at the end of packet payload; see: - // https://github.com/solana-labs/solana/pull/10109 - // https://github.com/solana-labs/solana/pull/16602 - if payload.len() < Self::SIZE_OF_HEADERS { - return Err(Error::InvalidPayloadSize(payload.len())); - } - payload.resize(Self::SIZE_OF_PAYLOAD, 0u8); - let shred = Self { - common_header, - data_header, - payload, - }; - shred.sanitize().map(|_| shred) - } - - fn erasure_shard_index(&self) -> Result { - shred_data::erasure_shard_index(self).ok_or_else(|| { - let headers = Box::new((self.common_header, self.data_header)); - Error::InvalidErasureShardIndex(headers) - }) - } - - fn erasure_shard(&self) -> Result<&[u8], Error> { - if self.payload.len() != Self::SIZE_OF_PAYLOAD { - return Err(Error::InvalidPayloadSize(self.payload.len())); - } - Ok(&self.payload[..SIZE_OF_ERASURE_ENCODED_SLICE]) - } - - fn sanitize(&self) -> Result<(), Error> { - match self.common_header.shred_variant { - ShredVariant::LegacyData => (), - _ => return Err(Error::InvalidShredVariant), - } - shred_data::sanitize(self) - } - - fn signed_data(&'a self) -> Result { - debug_assert_eq!(self.payload.len(), Self::SIZE_OF_PAYLOAD); - Ok(&self.payload[SIZE_OF_SIGNATURE..]) - } -} - -impl<'a> Shred<'a> for ShredCode { - type SignedData = &'a [u8]; - - impl_shred_common!(); - const SIZE_OF_PAYLOAD: usize = shred_code::ShredCode::SIZE_OF_PAYLOAD; - const SIZE_OF_HEADERS: usize = SIZE_OF_CODING_SHRED_HEADERS; - - fn from_payload(payload: T) -> Result - where - Payload: From, - { - let mut payload = Payload::from(payload); - let mut cursor = Cursor::new(&payload[..]); - let common_header: ShredCommonHeader = deserialize_from_with_limit(&mut cursor)?; - if common_header.shred_variant != ShredVariant::LegacyCode { - return Err(Error::InvalidShredVariant); - } - let coding_header = deserialize_from_with_limit(&mut cursor)?; - // Repair packets have nonce at the end of packet payload: - // https://github.com/solana-labs/solana/pull/10109 - payload.truncate(Self::SIZE_OF_PAYLOAD); - let shred = Self { - common_header, - coding_header, - payload, - }; - shred.sanitize().map(|_| shred) - } - - fn erasure_shard_index(&self) -> Result { - shred_code::erasure_shard_index(self).ok_or_else(|| { - let headers = Box::new((self.common_header, self.coding_header)); - Error::InvalidErasureShardIndex(headers) - }) - } - - fn erasure_shard(&self) -> Result<&[u8], Error> { - if self.payload.len() != Self::SIZE_OF_PAYLOAD { - return Err(Error::InvalidPayloadSize(self.payload.len())); - } - Ok(&self.payload[Self::SIZE_OF_HEADERS..]) - } - - fn sanitize(&self) -> Result<(), Error> { - match self.common_header.shred_variant { - ShredVariant::LegacyCode => (), - _ => return Err(Error::InvalidShredVariant), - } - shred_code::sanitize(self) - } - - fn signed_data(&'a self) -> Result { - debug_assert_eq!(self.payload.len(), Self::SIZE_OF_PAYLOAD); - Ok(&self.payload[SIZE_OF_SIGNATURE..]) - } -} - -impl ShredDataTrait for ShredData { - #[inline] - fn data_header(&self) -> &DataShredHeader { - &self.data_header - } - - #[inline] - fn data(&self) -> Result<&[u8], Error> { - Self::get_data(&self.payload, self.data_header.size) - } -} - -impl ShredCodeTrait for ShredCode { - #[inline] - fn coding_header(&self) -> &CodingShredHeader { - &self.coding_header - } -} - -impl ShredData { - // Maximum size of ledger data that can be embedded in a data-shred. - pub(super) const CAPACITY: usize = - Self::SIZE_OF_PAYLOAD - Self::SIZE_OF_HEADERS - ShredCode::SIZE_OF_HEADERS; - - pub(super) fn new_from_data( - slot: Slot, - index: u32, - parent_offset: u16, - data: &[u8], - flags: ShredFlags, - reference_tick: u8, - version: u16, - fec_set_index: u32, - ) -> Self { - let mut payload = vec![0; Self::SIZE_OF_PAYLOAD]; - let common_header = ShredCommonHeader { - signature: Signature::default(), - shred_variant: ShredVariant::LegacyData, - slot, - index, - version, - fec_set_index, - }; - let size = (data.len() + Self::SIZE_OF_HEADERS) as u16; - let flags = flags | ShredFlags::from_reference_tick(reference_tick); - let data_header = DataShredHeader { - parent_offset, - flags, - size, - }; - let mut cursor = Cursor::new(&mut payload[..]); - bincode::serialize_into(&mut cursor, &common_header).unwrap(); - bincode::serialize_into(&mut cursor, &data_header).unwrap(); - // TODO: Need to check if data is too large! - let offset = cursor.position() as usize; - debug_assert_eq!(offset, Self::SIZE_OF_HEADERS); - payload[offset..offset + data.len()].copy_from_slice(data); - Self { - common_header, - data_header, - payload: Payload::from(payload), - } - } - - // Given shred payload and DataShredHeader.size, returns the slice storing - // ledger entries in the shred. - pub(super) fn get_data(shred: &[u8], size: u16) -> Result<&[u8], Error> { - debug_assert_matches!( - shred::layout::get_shred_variant(shred), - Ok(ShredVariant::LegacyData) - ); - let size = usize::from(size); - (Self::SIZE_OF_HEADERS..=Self::SIZE_OF_HEADERS + Self::CAPACITY) - .contains(&size) - .then(|| shred.get(Self::SIZE_OF_HEADERS..size)) - .flatten() - .ok_or_else(|| Error::InvalidDataSize { - size: size as u16, - payload: shred.len(), - }) - } - - pub(super) fn bytes_to_store(&self) -> &[u8] { - // Payload will be padded out to Self::SIZE_OF_PAYLOAD. - // But only need to store the bytes within data_header.size. - &self.payload[..self.data_header.size as usize] - } - - pub(super) fn resize_stored_shred(mut shred: Vec) -> Result, Error> { - // Old shreds might have been extra zero padded. - if !(Self::SIZE_OF_HEADERS..=Self::SIZE_OF_PAYLOAD).contains(&shred.len()) { - return Err(Error::InvalidPayloadSize(shred.len())); - } - shred.resize(Self::SIZE_OF_PAYLOAD, 0u8); - Ok(shred) - } -} diff --git a/ledger/src/shred/merkle.rs b/ledger/src/shred/merkle.rs index 6d5743469c6440..a98e1e84aa3cc4 100644 --- a/ledger/src/shred/merkle.rs +++ b/ledger/src/shred/merkle.rs @@ -7,7 +7,7 @@ use { common::impl_shred_common, dispatch, merkle_tree::*, - payload::Payload, + payload::{Payload, PayloadMutGuard}, shred_code, shred_data, traits::{ Shred as ShredTrait, ShredCode as ShredCodeTrait, ShredData as ShredDataTrait, @@ -77,7 +77,7 @@ pub(crate) enum Shred { impl Shred { dispatch!(fn erasure_shard_index(&self) -> Result); - dispatch!(fn erasure_shard_mut(&mut self) -> Result<&mut [u8], Error>); + dispatch!(fn erasure_shard_mut(&mut self) -> Result>, Error>); dispatch!(fn merkle_node(&self) -> Result); dispatch!(fn sanitize(&self) -> Result<(), Error>); dispatch!(fn set_chained_merkle_root(&mut self, chained_merkle_root: &Hash) -> Result<(), Error>); @@ -129,7 +129,6 @@ impl Shred { Payload: From, { match shred::layout::get_shred_variant(shred.as_ref())? { - ShredVariant::LegacyCode | ShredVariant::LegacyData => Err(Error::InvalidShredVariant), ShredVariant::MerkleCode { .. } => Ok(Self::ShredCode(ShredCode::from_payload(shred)?)), ShredVariant::MerkleData { .. } => Ok(Self::ShredData(ShredData::from_payload(shred)?)), } @@ -197,9 +196,7 @@ impl ShredData { ); // Shred index in the erasure batch. let index = { - let fec_set_index = <[u8; 4]>::try_from(shred.get(79..83)?) - .map(u32::from_le_bytes) - .ok()?; + let fec_set_index = shred::layout::get_fec_set_index(shred)?; shred::layout::get_index(shred)? .checked_sub(fec_set_index) .map(usize::try_from)? @@ -362,7 +359,8 @@ macro_rules! impl_merkle_shred { fn set_chained_merkle_root(&mut self, chained_merkle_root: &Hash) -> Result<(), Error> { let offset = self.chained_merkle_root_offset()?; - let Some(buffer) = self.payload.get_mut(offset..offset + SIZE_OF_MERKLE_ROOT) else { + let Some(mut buffer) = self.payload.get_mut(offset..offset + SIZE_OF_MERKLE_ROOT) + else { return Err(Error::InvalidPayloadSize(self.payload.len())); }; buffer.copy_from_slice(chained_merkle_root.as_ref()); @@ -395,11 +393,11 @@ macro_rules! impl_merkle_shred { { let proof_size = self.proof_size()?; let proof_offset = self.proof_offset()?; - let mut cursor = Cursor::new( - self.payload - .get_mut(proof_offset..) - .ok_or(Error::InvalidProofSize(proof_size))?, - ); + let mut slice = self + .payload + .get_mut(proof_offset..) + .ok_or(Error::InvalidProofSize(proof_size))?; + let mut cursor = Cursor::new(slice.as_mut()); let proof_size = usize::from(proof_size); proof.into_iter().enumerate().try_for_each(|(k, entry)| { if k >= proof_size { @@ -425,7 +423,7 @@ macro_rules! impl_merkle_shred { fn set_retransmitter_signature(&mut self, signature: &Signature) -> Result<(), Error> { let offset = self.retransmitter_signature_offset()?; - let Some(buffer) = self.payload.get_mut(offset..offset + SIZE_OF_SIGNATURE) else { + let Some(mut buffer) = self.payload.get_mut(offset..offset + SIZE_OF_SIGNATURE) else { return Err(Error::InvalidPayloadSize(self.payload.len())); }; buffer.copy_from_slice(signature.as_ref()); @@ -481,7 +479,7 @@ macro_rules! impl_merkle_shred { } // Returns the erasure coded slice as a mutable reference. - fn erasure_shard_mut(&mut self) -> Result<&mut [u8], Error> { + fn erasure_shard_mut(&mut self) -> Result>, Error> { let offsets = self.erasure_shard_offsets()?; let payload_size = self.payload.len(); self.payload @@ -711,7 +709,7 @@ pub(super) fn recover( chained, resigned, } => (proof_size, chained, resigned), - ShredVariant::MerkleData { .. } | ShredVariant::LegacyCode | ShredVariant::LegacyData => { + ShredVariant::MerkleData { .. } => { return Err(Error::InvalidShredVariant); } }; @@ -800,14 +798,16 @@ pub(super) fn recover( batch }; // Obtain erasure encoded shards from the shreds and reconstruct shreds. - let mut shards: Vec<(&mut [u8], bool)> = shreds + let mut shards = shreds .iter_mut() .zip(&mask) .map(|(shred, &mask)| Ok((shred.erasure_shard_mut()?, mask))) - .collect::>()?; + .collect::, Error>>()?; reed_solomon_cache .get(num_data_shreds, num_coding_shreds)? .reconstruct(&mut shards)?; + // Drop the mut guards to allow further mutation below. + drop(shards); // Verify and sanitize recovered shreds, re-compute the Merkle tree and set // the merkle proof on the recovered shreds. let nodes = shreds @@ -955,7 +955,7 @@ fn make_stub_shred( // while their payload is sent to retransmit-stage. Using a shared // payload between the two concurrent paths will reduce allocations // and memcopies. - payload: Payload::from(std::sync::Arc::new(payload)), + payload: Payload::from(payload), }) }; if let Some(chained_merkle_root) = chained_merkle_root { @@ -1024,7 +1024,7 @@ pub(crate) fn make_shreds_from_data( keypair: &Keypair, // The Merkle root of the previous erasure batch if chained. chained_merkle_root: Option, - mut data: &[u8], // Serialized &[Entry] + data: &[u8], // Serialized &[Entry] slot: Slot, parent_slot: Slot, shred_version: u16, @@ -1037,18 +1037,31 @@ pub(crate) fn make_shreds_from_data( ) -> Result, Error> { let now = Instant::now(); let chained = chained_merkle_root.is_some(); - let resigned = chained && is_last_in_slot; + + // only sign if last fec set in slot and is chained + let sign_last_fec_set = chained && is_last_in_slot; let proof_size = PROOF_ENTRIES_FOR_32_32_BATCH; - let data_buffer_per_shred_size = ShredData::capacity(proof_size, chained, resigned)?; + + // unsigned data_buffer size + let data_buffer_per_shred_size = ShredData::capacity(proof_size, chained, false)?; let data_buffer_total_size = DATA_SHREDS_PER_FEC_BLOCK * data_buffer_per_shred_size; + // signed data_buffer size + let data_buffer_per_shred_size_signed = if sign_last_fec_set { + ShredData::capacity(proof_size, chained, true)? + } else { + 0 + }; + let data_buffer_total_size_signed = + DATA_SHREDS_PER_FEC_BLOCK * data_buffer_per_shred_size_signed; + // Common header for the data shreds. let mut common_header_data = ShredCommonHeader { signature: Signature::default(), shred_variant: ShredVariant::MerkleData { proof_size, chained, - resigned, + resigned: false, }, slot, index: next_shred_index, @@ -1061,7 +1074,7 @@ pub(crate) fn make_shreds_from_data( shred_variant: ShredVariant::MerkleCode { proof_size, chained, - resigned, + resigned: false, }, index: next_code_index, ..common_header_data @@ -1081,18 +1094,34 @@ pub(crate) fn make_shreds_from_data( } }; - // Pre-allocate shreds to avoid reallocations. - let mut shreds = { - let number_of_batches = data.len().div_ceil(data_buffer_total_size); - let total_num_shreds = SHREDS_PER_FEC_BLOCK * number_of_batches; - Vec::::with_capacity(total_num_shreds) + let (mut unsigned_data, signed_data) = if sign_last_fec_set { + // Reserve at least one signed batch (may be empty) at the end. + if data.len() > data_buffer_total_size_signed { + // sign everything except the last batch + let split_at = data.len() - data_buffer_total_size_signed; + data.split_at(split_at) + } else { + // only enough data for one fec set, sign the whole thing + (&[][..], data) + } + } else { + // not last fec set, so don't sign + (data, &[][..]) }; - stats.data_bytes += data.len(); + stats.data_bytes += unsigned_data.len() + signed_data.len(); + + let unsigned_sets = unsigned_data.len().div_ceil(data_buffer_total_size); + let number_of_fec_sets = if sign_last_fec_set { + unsigned_sets + 1 + } else { + unsigned_sets + }; + let mut shreds = Vec::::with_capacity(SHREDS_PER_FEC_BLOCK * number_of_fec_sets); // Split the data into full erasure batches and initialize data and coding // shreds for each batch. - while data.len() >= data_buffer_total_size { - let (current_batch_data_chunk, rest) = data.split_at(data_buffer_total_size); + while unsigned_data.len() >= data_buffer_total_size { + let (current_batch_data_chunk, rest) = unsigned_data.split_at(data_buffer_total_size); debug_assert_eq!( current_batch_data_chunk.len(), DATA_SHREDS_PER_FEC_BLOCK * data_buffer_per_shred_size @@ -1108,7 +1137,7 @@ pub(crate) fn make_shreds_from_data( .map(Shred::ShredData), ); shreds.extend(make_shreds_code_header_only(&mut common_header_code).map(Shred::ShredCode)); - data = rest; + unsigned_data = rest; } // Two possibilities for taking this conditional: @@ -1118,29 +1147,33 @@ pub(crate) fn make_shreds_from_data( // 2.) Shreds is_empty, which only happens when we entered w/ zero data. // // In either case, we want to generate empty data shreds. - if !data.is_empty() || shreds.is_empty() { - stats.padding_bytes += data_buffer_total_size - data.len(); - common_header_data.shred_variant = ShredVariant::MerkleData { + if !unsigned_data.is_empty() || (shreds.is_empty() && !sign_last_fec_set) { + stats.padding_bytes += data_buffer_total_size - unsigned_data.len(); + shred_leftover_data( proof_size, chained, - resigned, - }; - common_header_code.shred_variant = ShredVariant::MerkleCode { + false, + unsigned_data, + data_buffer_per_shred_size, + &mut common_header_data, + &mut common_header_code, + data_header, + &mut shreds, + ); + } + if !signed_data.is_empty() || (shreds.is_empty() && sign_last_fec_set) { + stats.padding_bytes += data_buffer_total_size_signed - signed_data.len(); + shred_leftover_data( proof_size, chained, - resigned, - }; - common_header_data.fec_set_index = common_header_data.index; - common_header_code.fec_set_index = common_header_data.fec_set_index; - shreds.extend({ - // Create data chunks out of remaining data + padding. - let chunks = data - .chunks(data_buffer_per_shred_size) - .chain(std::iter::repeat(&[][..])) // possible padding - .take(DATA_SHREDS_PER_FEC_BLOCK); - make_shreds_data(&mut common_header_data, data_header, chunks).map(Shred::ShredData) - }); - shreds.extend(make_shreds_code_header_only(&mut common_header_code).map(Shred::ShredCode)); + true, + signed_data, + data_buffer_per_shred_size_signed, + &mut common_header_data, + &mut common_header_code, + data_header, + &mut shreds, + ); } // Adjust flags for the very last data shred. @@ -1210,6 +1243,41 @@ pub(crate) fn make_shreds_from_data( Ok(shreds) } +#[allow(clippy::too_many_arguments)] +fn shred_leftover_data( + proof_size: u8, + chained: bool, + resigned: bool, + data: &[u8], + data_buffer_per_shred_size: usize, + common_header_data: &mut ShredCommonHeader, + common_header_code: &mut ShredCommonHeader, + data_header: DataShredHeader, + shreds: &mut Vec, +) { + common_header_data.shred_variant = ShredVariant::MerkleData { + proof_size, + chained, + resigned, + }; + common_header_code.shred_variant = ShredVariant::MerkleCode { + proof_size, + chained, + resigned, + }; + common_header_data.fec_set_index = common_header_data.index; + common_header_code.fec_set_index = common_header_data.fec_set_index; + shreds.extend({ + // Create data chunks out of remaining data + padding. + let chunks = data + .chunks(data_buffer_per_shred_size) + .chain(std::iter::repeat(&[][..])) // possible padding + .take(DATA_SHREDS_PER_FEC_BLOCK); + make_shreds_data(common_header_data, data_header, chunks).map(Shred::ShredData) + }); + shreds.extend(make_shreds_code_header_only(common_header_code).map(Shred::ShredCode)); +} + // Given shreds of the same erasure batch: // - Writes common and {data,coding} headers into shreds' payload. // - Fills in erasure code buffers in the coding shreds. @@ -1231,11 +1299,11 @@ fn finish_erasure_batch( fn write_headers(shred: &mut Shred) -> Result<(), bincode::Error> { match shred { Shred::ShredCode(shred) => bincode::serialize_into( - &mut shred.payload[..], + &mut shred.payload.as_mut()[..], &(&shred.common_header, &shred.coding_header), ), Shred::ShredData(shred) => bincode::serialize_into( - &mut shred.payload[..], + &mut shred.payload.as_mut()[..], &(&shred.common_header, &shred.data_header), ), } @@ -1267,7 +1335,7 @@ fn finish_erasure_batch( shreds .iter_mut() .map(Shred::erasure_shard_mut) - .collect::, _>>()?, + .collect::, _>>()?, )?; // Set the chained_merkle_root for each shred. if let Some(chained_merkle_root) = chained_merkle_root { @@ -1311,7 +1379,7 @@ fn finish_erasure_batch( mod test { use { super::*, - crate::shred::{merkle_tree::get_proof_size, ShredFlags, ShredId, SignedData}, + crate::shred::{merkle_tree::get_proof_size, ShredFlags, ShredId}, assert_matches::assert_matches, itertools::Itertools, rand::{seq::SliceRandom, CryptoRng, Rng}, @@ -1587,14 +1655,14 @@ mod test { }) { assert_matches!( recover(shreds, reed_solomon_cache).err(), - Some(Error::ErasureError(TooFewParityShards)) + Some(Error::Erasure(TooFewParityShards)) ); continue; } if shreds.len() < num_data_shreds { assert_matches!( recover(shreds, reed_solomon_cache).err(), - Some(Error::ErasureError(TooFewShardsPresent)) + Some(Error::Erasure(TooFewShardsPresent)) ); continue; } @@ -1614,12 +1682,6 @@ mod test { } }); assert_eq!(recovered_shreds, removed_shreds); - for shred in recovered_shreds { - match shred.shred_type() { - ShredType::Code => assert_matches!(shred.payload(), Payload::Unique(_)), - ShredType::Data => assert_matches!(shred.payload(), Payload::Shared(_)), - } - } } } @@ -1691,7 +1753,11 @@ mod test { let thread_pool = ThreadPoolBuilder::new().num_threads(2).build().unwrap(); let keypair = Keypair::new(); let chained_merkle_root = chained.then(|| Hash::new_from_array(rng.gen())); - let resigned = chained && is_last_in_slot; + + // only sign last batch if it is chained and is the last in slot + // let resigned = chained && is_last_in_slot; + let sign_last_fec_set = chained && is_last_in_slot; + let slot = 149_745_689; let parent_slot = slot - rng.gen_range(1..65536); let shred_version = rng.gen(); @@ -1724,14 +1790,12 @@ mod test { }) .collect(); // Assert that the input data can be recovered from data shreds. - assert_eq!( - data, - data_shreds - .iter() - .flat_map(|shred| shred.data().unwrap()) - .copied() - .collect::>() - ); + let data2 = data_shreds + .iter() + .flat_map(|shred| shred.data().unwrap()) + .copied() + .collect::>(); + assert_eq!(data, data2); // Assert that shreds sanitize and verify. let pubkey = keypair.pubkey(); for shred in &shreds { @@ -1772,16 +1836,17 @@ mod test { shred::layout::get_chained_merkle_root(shred), chained_merkle_root ); - assert_eq!(shred::layout::get_signed_data_offsets(shred), None); let data = shred::layout::get_signed_data(shred).unwrap(); - assert_eq!(data, SignedData::MerkleRoot(merkle_root)); + assert_eq!(data, merkle_root); assert!(signature.verify(pubkey.as_ref(), data.as_ref())); } // Verify common, data and coding headers. let mut num_data_shreds = 0; let mut num_coding_shreds = 0; - for shred in &shreds { + for (index, shred) in shreds.iter().enumerate() { let common_header = shred.common_header(); + let resigned = sign_last_fec_set && index >= shreds.len() - 64; + assert_eq!(common_header.slot, slot); assert_eq!(common_header.version, shred_version); let proof_size = shred.proof_size().unwrap(); @@ -1914,9 +1979,7 @@ mod test { }) .collect(); assert_eq!(recovered_data_shreds.len(), data_shreds.len()); - for shred in &recovered_data_shreds { - assert_matches!(shred.payload(), Payload::Shared(_)); - } + for (shred, other) in recovered_data_shreds.into_iter().zip(data_shreds) { match shred { Shred::ShredCode(_) => panic!("Invalid shred type!"), diff --git a/ledger/src/shred/payload.rs b/ledger/src/shred/payload.rs index a096ff90ec5fe3..00941dded39030 100644 --- a/ledger/src/shred/payload.rs +++ b/ledger/src/shred/payload.rs @@ -1,71 +1,66 @@ -use std::{ - ops::{Deref, DerefMut}, - sync::Arc, -}; #[cfg(any(test, feature = "dev-context-only-utils"))] use { crate::shred::Nonce, - solana_perf::packet::{ - bytes::{BufMut, BytesMut}, - BytesPacket, Meta, Packet, + solana_perf::packet::{bytes::BufMut, BytesPacket, Meta, Packet}, +}; +use { + bytes::{Bytes, BytesMut}, + std::{ + mem, + ops::{Bound, Deref, DerefMut, RangeBounds, RangeFull}, + slice::SliceIndex, }, - std::mem, }; #[derive(Clone, Debug, Eq)] -pub enum Payload { - Shared(Arc>), - Unique(Vec), -} - -macro_rules! make_mut { - ($self:ident) => { - match $self { - Self::Shared(bytes) => Arc::make_mut(bytes), - Self::Unique(bytes) => bytes, - } - }; +pub struct Payload { + pub bytes: Bytes, } -macro_rules! dispatch { - ($vis:vis fn $name:ident(&self $(, $arg:ident : $ty:ty)?) $(-> $out:ty)?) => { - #[inline] - $vis fn $name(&self $(, $arg:$ty)?) $(-> $out)? { - match self { - Self::Shared(bytes) => bytes.$name($($arg, )?), - Self::Unique(bytes) => bytes.$name($($arg, )?), - } - } - }; - ($vis:vis fn $name:ident(&mut self $(, $arg:ident : $ty:ty)*) $(-> $out:ty)?) => { - #[inline] - $vis fn $name(&mut self $(, $arg:$ty)*) $(-> $out)? { - make_mut!(self).$name($($arg, )*) - } +impl Payload { + /// Convert the payload's inner [`Bytes`] into a [`BytesMut`], consuming the [`Payload`]. + /// + /// If the payload is unique (single reference), this will return a [`BytesMut`] with the + /// contents of the payload without copying. If the payload is not unique, this will make a copy + /// of the payload in a new [`BytesMut`]. As such, take care to avoid performing this conversion + /// if the payload is not unique. + #[inline] + pub fn into_bytes_mut(self) -> BytesMut { + self.bytes.into() } -} -impl Payload { + /// Get a mutable reference via [`PayloadMutGuard`] to the payload's _full_ inner bytes. + /// See [`Payload::get_mut`] for selecting a subset of the payload's inner bytes. + /// + /// If the payload is unique (single reference), this will not perform any copying. Otherwise it + /// will. As such, take care to avoid performing this conversion if the payload is not unique. #[inline] - pub(crate) fn resize(&mut self, size: usize, byte: u8) { - if self.len() != size { - make_mut!(self).resize(size, byte); - } + pub fn as_mut(&mut self) -> PayloadMutGuard<'_, RangeFull> { + PayloadMutGuard::new(self, ..) } #[inline] - pub(crate) fn truncate(&mut self, size: usize) { - if self.len() > size { - make_mut!(self).truncate(size); + /// Get a mutable reference via [`PayloadMutGuard`] to a subset of the payload's inner bytes. + /// + /// If the payload is unique (single reference), this will not perform any copying. Otherwise it + /// will. As such, take care to avoid performing this conversion if the payload is not unique. + pub fn get_mut(&mut self, index: I) -> Option> + where + I: RangeBounds, + { + match index.end_bound() { + Bound::Included(&end) if end >= self.bytes.len() => None, + Bound::Excluded(&end) if end > self.bytes.len() => None, + _ => Some(PayloadMutGuard::new(self, index)), } } + /// Shortens the buffer, keeping the first `len` bytes and dropping the rest. + /// + /// See [`Bytes::truncate`]. #[inline] - pub fn unwrap_or_clone(this: Self) -> Vec { - match this { - Self::Shared(bytes) => Arc::unwrap_or_clone(bytes), - Self::Unique(bytes) => bytes, - } + pub fn truncate(&mut self, len: usize) { + self.bytes.truncate(len); } } @@ -137,26 +132,147 @@ impl PartialEq for Payload { impl From> for Payload { #[inline] fn from(bytes: Vec) -> Self { - Self::Unique(bytes) + Self { + bytes: Bytes::from(bytes), + } + } +} + +impl From for Payload { + #[inline] + fn from(bytes: Bytes) -> Self { + Self { bytes } } } -impl From>> for Payload { +impl From for Payload { #[inline] - fn from(bytes: Arc>) -> Self { - Self::Shared(bytes) + fn from(bytes: BytesMut) -> Self { + Self { + bytes: bytes.freeze(), + } } } impl AsRef<[u8]> for Payload { - dispatch!(fn as_ref(&self) -> &[u8]); + #[inline] + fn as_ref(&self) -> &[u8] { + self.bytes.as_ref() + } } impl Deref for Payload { type Target = [u8]; - dispatch!(fn deref(&self) -> &Self::Target); + + #[inline] + fn deref(&self) -> &Self::Target { + self.bytes.deref() + } +} + +/// Convenience wrapper around [`Payload`] and a [`BytesMut`] into that payload's bytes. +/// +/// [`Bytes`] is immutable, yet it's desirable to be able to "simulate" mutability for quick +/// inline updates when buildilng shreds, especially to minimize code changes at the time of this +/// refactor. Given that references to shreds are not propagated until a shred is fully constructed, +/// we should not incur any copying overhead when using this guard to facilitate mutability during +/// shred construction. +/// +/// # How it works +/// +/// Upon construction, the guard converts the payload's [`Bytes`] into a [`BytesMut`], temporarily +/// replacing the payload's internal bytes reference with an empty [`Bytes`] (which does not +/// allocate). This will not perform any copying if the payload is unique (single reference). +/// +/// The guard will then provide a mutable reference to the bytes via [`DerefMut`] and [`AsMut`] +/// implementations, which forward indexing to the underlying [`BytesMut`]. +/// +/// The guard has a specialized [`Drop`] implementation that will write back the mutated bytes to the +/// payload, effectively "simulating" typical mutability semantics. +pub struct PayloadMutGuard<'a, I> { + payload: &'a mut Payload, + bytes_mut: BytesMut, + slice_index: I, } -impl DerefMut for Payload { - dispatch!(fn deref_mut(&mut self) -> &mut Self::Target); +impl<'a, I> PayloadMutGuard<'a, I> { + #[inline] + pub fn new(payload: &'a mut Payload, slice_index: I) -> Self { + let bytes_mut: BytesMut = mem::take(&mut payload.bytes).into(); + Self { + payload, + bytes_mut, + slice_index, + } + } +} + +impl Drop for PayloadMutGuard<'_, I> { + #[inline] + fn drop(&mut self) { + self.payload.bytes = mem::take(&mut self.bytes_mut).freeze(); + } +} + +impl Deref for PayloadMutGuard<'_, I> +where + I: SliceIndex<[u8]> + Clone, +{ + type Target = >::Output; + + #[inline] + fn deref(&self) -> &Self::Target { + &self.bytes_mut[self.slice_index.clone()] + } +} + +impl DerefMut for PayloadMutGuard<'_, I> +where + I: SliceIndex<[u8]> + Clone, +{ + #[inline] + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.bytes_mut[self.slice_index.clone()] + } +} + +impl AsMut<[u8]> for PayloadMutGuard<'_, I> +where + I: SliceIndex<[u8], Output = [u8]> + Clone, +{ + #[inline] + fn as_mut(&mut self) -> &mut [u8] { + &mut self.bytes_mut[self.slice_index.clone()] + } +} + +impl AsRef<[u8]> for PayloadMutGuard<'_, I> +where + I: SliceIndex<[u8], Output = [u8]> + Clone, +{ + #[inline] + fn as_ref(&self) -> &[u8] { + &self.bytes_mut[self.slice_index.clone()] + } +} + +#[cfg(test)] +mod test { + use super::Payload; + + #[test] + fn test_guard_write_back() { + let mut payload = Payload::from(vec![1, 2, 3, 4, 5]); + { + let mut guard = payload.get_mut(..).unwrap(); + assert_eq!(guard[0], 1); + assert_eq!(guard[1], 2); + guard[0] = 10; + guard[1] = 20; + assert_eq!(guard[0], 10); + assert_eq!(guard[1], 20); + } + + assert_eq!(payload.bytes[..], vec![10, 20, 3, 4, 5]); + } } diff --git a/ledger/src/shred/shred_code.rs b/ledger/src/shred/shred_code.rs index 5f24af23dc6581..9123b37dae2d1f 100644 --- a/ledger/src/shred/shred_code.rs +++ b/ledger/src/shred/shred_code.rs @@ -1,11 +1,11 @@ use { crate::shred::{ common::dispatch, - legacy, merkle, + merkle, payload::Payload, traits::{Shred, ShredCode as ShredCodeTrait}, - CodingShredHeader, Error, ShredCommonHeader, ShredType, SignedData, - DATA_SHREDS_PER_FEC_BLOCK, MAX_DATA_SHREDS_PER_SLOT, SIZE_OF_NONCE, + CodingShredHeader, Error, ShredCommonHeader, ShredType, DATA_SHREDS_PER_FEC_BLOCK, + MAX_CODE_SHREDS_PER_SLOT, MAX_DATA_SHREDS_PER_SLOT, SIZE_OF_NONCE, }, solana_hash::Hash, solana_packet::PACKET_DATA_SIZE, @@ -13,14 +13,10 @@ use { static_assertions::const_assert_eq, }; -const_assert_eq!(MAX_CODE_SHREDS_PER_SLOT, 32_768); -pub const MAX_CODE_SHREDS_PER_SLOT: usize = MAX_DATA_SHREDS_PER_SLOT; - const_assert_eq!(ShredCode::SIZE_OF_PAYLOAD, 1228); #[derive(Clone, Debug, Eq, PartialEq)] pub enum ShredCode { - Legacy(legacy::ShredCode), Merkle(merkle::ShredCode), } @@ -37,23 +33,19 @@ impl ShredCode { #[cfg(any(test, feature = "dev-context-only-utils"))] dispatch!(pub(super) fn set_signature(&mut self, signature: Signature)); - pub(super) fn signed_data(&self) -> Result { - match self { - Self::Legacy(shred) => Ok(SignedData::Chunk(shred.signed_data()?)), - Self::Merkle(shred) => Ok(SignedData::MerkleRoot(shred.signed_data()?)), - } + pub(super) fn signed_data(&self) -> Result { + let Self::Merkle(shred) = self; + shred.signed_data() } pub(super) fn chained_merkle_root(&self) -> Result { match self { - Self::Legacy(_) => Err(Error::InvalidShredType), Self::Merkle(shred) => shred.chained_merkle_root(), } } pub(super) fn merkle_root(&self) -> Result { match self { - Self::Legacy(_) => Err(Error::InvalidShredType), Self::Merkle(shred) => shred.merkle_root(), } } @@ -69,9 +61,6 @@ impl ShredCode { // Returns true if the erasure coding of the two shreds mismatch. pub(super) fn erasure_mismatch(&self, other: &ShredCode) -> bool { match (self, other) { - (Self::Legacy(shred), Self::Legacy(other)) => erasure_mismatch(shred, other), - (Self::Legacy(_), Self::Merkle(_)) => true, - (Self::Merkle(_), Self::Legacy(_)) => true, (Self::Merkle(shred), Self::Merkle(other)) => { // Merkle shreds within the same erasure batch have the same // merkle root. The root of the merkle tree is signed. So @@ -84,18 +73,11 @@ impl ShredCode { pub(super) fn retransmitter_signature(&self) -> Result { match self { - Self::Legacy(_) => Err(Error::InvalidShredVariant), Self::Merkle(shred) => shred.retransmitter_signature(), } } } -impl From for ShredCode { - fn from(shred: legacy::ShredCode) -> Self { - Self::Legacy(shred) - } -} - impl From for ShredCode { fn from(shred: merkle::ShredCode) -> Self { Self::Merkle(shred) diff --git a/ledger/src/shred/shred_data.rs b/ledger/src/shred/shred_data.rs index 315218dfea3e2c..fae7990a779b3d 100644 --- a/ledger/src/shred/shred_data.rs +++ b/ledger/src/shred/shred_data.rs @@ -2,10 +2,10 @@ use { crate::shred::{ self, common::dispatch, - legacy, merkle, + merkle, payload::Payload, traits::{Shred as _, ShredData as ShredDataTrait}, - DataShredHeader, Error, ShredCommonHeader, ShredFlags, ShredType, ShredVariant, SignedData, + DataShredHeader, Error, ShredCommonHeader, ShredFlags, ShredType, ShredVariant, MAX_DATA_SHREDS_PER_SLOT, }, solana_clock::Slot, @@ -15,7 +15,6 @@ use { #[derive(Clone, Debug, Eq, PartialEq)] pub enum ShredData { - Legacy(legacy::ShredData), Merkle(merkle::ShredData), } @@ -30,49 +29,23 @@ impl ShredData { #[cfg(any(test, feature = "dev-context-only-utils"))] dispatch!(pub(super) fn set_signature(&mut self, signature: Signature)); - pub(super) fn signed_data(&self) -> Result { - match self { - Self::Legacy(shred) => Ok(SignedData::Chunk(shred.signed_data()?)), - Self::Merkle(shred) => Ok(SignedData::MerkleRoot(shred.signed_data()?)), - } + pub(super) fn signed_data(&self) -> Result { + let Self::Merkle(shred) = self; + shred.signed_data() } pub(super) fn chained_merkle_root(&self) -> Result { match self { - Self::Legacy(_) => Err(Error::InvalidShredType), Self::Merkle(shred) => shred.chained_merkle_root(), } } pub(super) fn merkle_root(&self) -> Result { match self { - Self::Legacy(_) => Err(Error::InvalidShredType), Self::Merkle(shred) => shred.merkle_root(), } } - pub(super) fn new_from_data( - slot: Slot, - index: u32, - parent_offset: u16, - data: &[u8], - flags: ShredFlags, - reference_tick: u8, - version: u16, - fec_set_index: u32, - ) -> Self { - Self::from(legacy::ShredData::new_from_data( - slot, - index, - parent_offset, - data, - flags, - reference_tick, - version, - fec_set_index, - )) - } - pub(super) fn last_in_slot(&self) -> bool { let flags = self.data_header().flags; flags.contains(ShredFlags::LAST_SHRED_IN_SLOT) @@ -92,7 +65,6 @@ impl ShredData { // Should only be used when storing shreds to blockstore. pub(super) fn bytes_to_store(&self) -> &[u8] { match self { - Self::Legacy(shred) => shred.bytes_to_store(), Self::Merkle(shred) => shred.payload(), } } @@ -100,16 +72,13 @@ impl ShredData { // Possibly zero pads bytes stored in blockstore. pub(crate) fn resize_stored_shred(shred: Vec) -> Result, Error> { match shred::layout::get_shred_variant(&shred)? { - ShredVariant::LegacyCode | ShredVariant::MerkleCode { .. } => { - Err(Error::InvalidShredType) - } + ShredVariant::MerkleCode { .. } => Err(Error::InvalidShredType), ShredVariant::MerkleData { .. } => { if shred.len() != merkle::ShredData::SIZE_OF_PAYLOAD { return Err(Error::InvalidPayloadSize(shred.len())); } Ok(shred) } - ShredVariant::LegacyData => legacy::ShredData::resize_stored_shred(shred), } } @@ -124,7 +93,7 @@ impl ShredData { )>, ) -> Result { match merkle_variant { - None => Ok(legacy::ShredData::CAPACITY), + None => Err(Error::InvalidShredVariant), Some((proof_size, chained, resigned)) => { debug_assert!(chained || !resigned); merkle::ShredData::capacity(proof_size, chained, resigned) @@ -134,18 +103,11 @@ impl ShredData { pub(super) fn retransmitter_signature(&self) -> Result { match self { - Self::Legacy(_) => Err(Error::InvalidShredVariant), Self::Merkle(shred) => shred.retransmitter_signature(), } } } -impl From for ShredData { - fn from(shred: legacy::ShredData) -> Self { - Self::Legacy(shred) - } -} - impl From for ShredData { fn from(shred: merkle::ShredData) -> Self { Self::Merkle(shred) diff --git a/ledger/src/shred/stats.rs b/ledger/src/shred/stats.rs index fe8a3f9047c77e..40a864d50b8610 100644 --- a/ledger/src/shred/stats.rs +++ b/ledger/src/shred/stats.rs @@ -58,6 +58,12 @@ pub struct ShredFetchStats { pub(super) bad_shred_type: usize, pub(super) shred_version_mismatch: usize, pub(super) bad_parent_offset: usize, + pub(super) fec_set_index_bad_deserialize: usize, + pub(super) misaligned_fec_set: usize, + pub(super) erasure_config_bad_deserialize: usize, + pub(super) misaligned_erasure_config: usize, + pub(super) shred_flags_bad_deserialize: usize, + pub(super) misaligned_last_data_index: usize, since: Option, pub overflow_shreds: usize, } @@ -182,6 +188,24 @@ impl ShredFetchStats { ("bad_shred_type", self.bad_shred_type, i64), ("shred_version_mismatch", self.shred_version_mismatch, i64), ("bad_parent_offset", self.bad_parent_offset, i64), + ( + "fec_set_index_bad_deserialize", + self.fec_set_index_bad_deserialize, + i64 + ), + ("misaligned_fec_set_size", self.misaligned_fec_set, i64), + ( + "erasure_config_bad_deserialize", + self.erasure_config_bad_deserialize, + i64 + ), + ("misaligned_erasure_config", self.misaligned_erasure_config, i64), + ( + "shred_flags_bad_deserialize", + self.shred_flags_bad_deserialize, + i64 + ), + ("misaligned_last_data_index", self.misaligned_last_data_index, i64), ("overflow_shreds", self.overflow_shreds, i64), ); *self = Self { diff --git a/ledger/src/shred/wire.rs b/ledger/src/shred/wire.rs index 1ebbe4ac82556b..0e74997f36a0e6 100644 --- a/ledger/src/shred/wire.rs +++ b/ledger/src/shred/wire.rs @@ -2,9 +2,12 @@ // deserializing the entire payload. #![deny(clippy::indexing_slicing)] use { - crate::shred::{ - self, merkle_tree::SIZE_OF_MERKLE_ROOT, traits::Shred, Error, Nonce, ShredFlags, ShredId, - ShredType, ShredVariant, SignedData, SIZE_OF_COMMON_SHRED_HEADER, + crate::{ + blockstore_meta::ErasureConfig, + shred::{ + self, merkle_tree::SIZE_OF_MERKLE_ROOT, traits::Shred, Error, Nonce, ShredFlags, + ShredId, ShredType, ShredVariant, SIZE_OF_COMMON_SHRED_HEADER, + }, }, solana_clock::Slot, solana_hash::Hash, @@ -23,14 +26,10 @@ use { #[inline] fn get_shred_size(shred: &[u8]) -> Option { - // Legacy data shreds have zero padding at the end which might have been - // trimmed. Other variants do not have any trailing zeros. - Some(match get_shred_variant(shred).ok()? { - ShredVariant::LegacyCode => shred::legacy::ShredCode::SIZE_OF_PAYLOAD, - ShredVariant::LegacyData => shred::legacy::ShredData::SIZE_OF_PAYLOAD.min(shred.len()), - ShredVariant::MerkleCode { .. } => shred::merkle::ShredCode::SIZE_OF_PAYLOAD, - ShredVariant::MerkleData { .. } => shred::merkle::ShredData::SIZE_OF_PAYLOAD, - }) + match get_shred_variant(shred).ok()? { + ShredVariant::MerkleCode { .. } => Some(shred::merkle::ShredCode::SIZE_OF_PAYLOAD), + ShredVariant::MerkleData { .. } => Some(shred::merkle::ShredData::SIZE_OF_PAYLOAD), + } } #[inline] @@ -106,6 +105,12 @@ pub(super) fn get_version(shred: &[u8]) -> Option { Some(u16::from_le_bytes(bytes)) } +#[inline] +pub fn get_fec_set_index(shred: &[u8]) -> Option { + let bytes = <[u8; 4]>::try_from(shred.get(79..79 + 4)?).unwrap(); + Some(u32::from_le_bytes(bytes)) +} + // The caller should verify first that the shred is data and not code! #[inline] pub(super) fn get_parent_offset(shred: &[u8]) -> Option { @@ -114,6 +119,14 @@ pub(super) fn get_parent_offset(shred: &[u8]) -> Option { Some(u16::from_le_bytes(bytes)) } +#[cfg(test)] +/// this will corrupt the shred by setting parent offset bytes +pub(crate) fn corrupt_and_set_parent_offset(shred: &mut [u8], parent_offset: u16) { + let bytes = parent_offset.to_le_bytes(); + assert_eq!(get_shred_type(shred).unwrap(), ShredType::Data); + shred.get_mut(83..83 + 2).unwrap().copy_from_slice(&bytes); +} + // Returns DataShredHeader.flags if the shred is data. // Returns Error::InvalidShredType for coding shreds. #[inline] @@ -144,11 +157,7 @@ fn get_data_size(shred: &[u8]) -> Result { #[inline] pub(crate) fn get_data(shred: &[u8]) -> Result<&[u8], Error> { match get_shred_variant(shred)? { - ShredVariant::LegacyCode => Err(Error::InvalidShredType), ShredVariant::MerkleCode { .. } => Err(Error::InvalidShredType), - ShredVariant::LegacyData => { - shred::legacy::ShredData::get_data(shred, get_data_size(shred)?) - } ShredVariant::MerkleData { proof_size, chained, @@ -163,6 +172,34 @@ pub(crate) fn get_data(shred: &[u8]) -> Result<&[u8], Error> { } } +/// Returns the ErasureConfig specified by the coding shred, or an Error if +/// the shred is a data shred +#[inline] +pub(crate) fn get_erasure_config(shred: &[u8]) -> Result { + if !matches!(get_shred_type(shred).unwrap(), ShredType::Code) { + return Err(Error::InvalidShredType); + } + let Some(num_data_bytes) = shred.get(83..83 + 2) else { + return Err(Error::InvalidPayloadSize(shred.len())); + }; + let Some(num_coding_bytes) = shred.get(85..85 + 2) else { + return Err(Error::InvalidPayloadSize(shred.len())); + }; + let num_data = <[u8; 2]>::try_from(num_data_bytes) + .map(u16::from_le_bytes) + .map(usize::from) + .map_err(|_| Error::InvalidErasureConfig)?; + let num_coding = <[u8; 2]>::try_from(num_coding_bytes) + .map(u16::from_le_bytes) + .map(usize::from) + .map_err(|_| Error::InvalidErasureConfig)?; + + Ok(ErasureConfig { + num_data, + num_coding, + }) +} + #[inline] pub fn get_shred_id(shred: &[u8]) -> Option { Some(ShredId( @@ -172,49 +209,22 @@ pub fn get_shred_id(shred: &[u8]) -> Option { )) } -pub(crate) fn get_signed_data(shred: &[u8]) -> Option { +pub(crate) fn get_signed_data(shred: &[u8]) -> Option { let data = match get_shred_variant(shred).ok()? { - ShredVariant::LegacyCode | ShredVariant::LegacyData => { - let chunk = shred.get(shred::legacy::SIGNED_MESSAGE_OFFSETS)?; - SignedData::Chunk(chunk) - } ShredVariant::MerkleCode { proof_size, chained, resigned, - } => { - let merkle_root = - shred::merkle::ShredCode::get_merkle_root(shred, proof_size, chained, resigned)?; - SignedData::MerkleRoot(merkle_root) - } + } => shred::merkle::ShredCode::get_merkle_root(shred, proof_size, chained, resigned)?, ShredVariant::MerkleData { proof_size, chained, resigned, - } => { - let merkle_root = - shred::merkle::ShredData::get_merkle_root(shred, proof_size, chained, resigned)?; - SignedData::MerkleRoot(merkle_root) - } + } => shred::merkle::ShredData::get_merkle_root(shred, proof_size, chained, resigned)?, }; Some(data) } -// Returns offsets within the shred payload which is signed. -pub(crate) fn get_signed_data_offsets(shred: &[u8]) -> Option> { - match get_shred_variant(shred).ok()? { - ShredVariant::LegacyCode | ShredVariant::LegacyData => { - let offsets = shred::legacy::SIGNED_MESSAGE_OFFSETS; - (offsets.end <= shred.len()).then_some(offsets) - } - // Merkle shreds sign merkle tree root which can be recovered from - // the merkle proof embedded in the payload but itself is not - // stored the payload. - ShredVariant::MerkleCode { .. } => None, - ShredVariant::MerkleData { .. } => None, - } -} - pub fn get_reference_tick(shred: &[u8]) -> Result { if get_shred_type(shred)? != ShredType::Data { return Err(Error::InvalidShredType); @@ -227,7 +237,6 @@ pub fn get_reference_tick(shred: &[u8]) -> Result { pub fn get_merkle_root(shred: &[u8]) -> Option { match get_shred_variant(shred).ok()? { - ShredVariant::LegacyCode | ShredVariant::LegacyData => None, ShredVariant::MerkleCode { proof_size, chained, @@ -243,7 +252,6 @@ pub fn get_merkle_root(shred: &[u8]) -> Option { pub(crate) fn get_chained_merkle_root(shred: &[u8]) -> Option { let offset = match get_shred_variant(shred).ok()? { - ShredVariant::LegacyCode | ShredVariant::LegacyData => return None, ShredVariant::MerkleCode { proof_size, chained, @@ -268,7 +276,6 @@ pub(crate) fn get_chained_merkle_root(shred: &[u8]) -> Option { fn get_retransmitter_signature_offset(shred: &[u8]) -> Result { match get_shred_variant(shred)? { - ShredVariant::LegacyCode | ShredVariant::LegacyData => Err(Error::InvalidShredVariant), ShredVariant::MerkleCode { proof_size, chained, @@ -296,7 +303,6 @@ pub fn get_retransmitter_signature(shred: &[u8]) -> Result { pub fn is_retransmitter_signed_variant(shred: &[u8]) -> Result { match get_shred_variant(shred)? { - ShredVariant::LegacyCode | ShredVariant::LegacyData => Ok(false), ShredVariant::MerkleCode { proof_size: _, chained: _, @@ -352,9 +358,6 @@ pub fn resign_packet(packet: &mut PacketRefMut, keypair: &Keypair) -> Result<(), /// signature which is left intact. pub fn resign_shred(shred: &mut [u8], keypair: &Keypair) -> Result<(), Error> { let (offset, merkle_root) = match get_shred_variant(shred)? { - ShredVariant::LegacyCode | ShredVariant::LegacyData => { - return Err(Error::InvalidShredVariant) - } ShredVariant::MerkleCode { proof_size, chained, @@ -403,7 +406,6 @@ pub(crate) fn corrupt_packet( // as moved. let shred = get_shred(&*packet).unwrap(); let merkle_variant = match get_shred_variant(shred).unwrap() { - ShredVariant::LegacyCode | ShredVariant::LegacyData => None, ShredVariant::MerkleCode { proof_size, resigned, @@ -429,11 +431,8 @@ pub(crate) fn corrupt_packet( let size = shred.len() - if resigned { SIGNATURE_BYTES } else { 0 }; size - offset..size }) - .or_else(|| { - let Range { start, end } = get_signed_data_offsets(shred)?; - Some(start + 1..end) // +1 to exclude ShredVariant. - }); - modify_packet(rng, packet, offsets.unwrap()); + .expect("Only merkle shreds are possible"); + modify_packet(rng, packet, offsets); } // Assert that the signature no longer verifies. let shred = get_shred(packet).unwrap(); @@ -443,17 +442,12 @@ pub(crate) fn corrupt_packet( let pubkey = keypairs[&slot].pubkey(); let data = get_signed_data(shred).unwrap(); assert!(!signature.verify(pubkey.as_ref(), data.as_ref())); - if let Some(offsets) = get_signed_data_offsets(shred) { - assert!(!signature.verify(pubkey.as_ref(), &shred[offsets])); - } } else { // Slot may have been corrupted and no longer mapping to a keypair. let pubkey = keypairs.get(&slot).map(Keypair::pubkey).unwrap_or_default(); if let Some(data) = get_signed_data(shred) { assert!(!signature.verify(pubkey.as_ref(), data.as_ref())); } - let offsets = get_signed_data_offsets(shred).unwrap_or_default(); - assert!(!signature.verify(pubkey.as_ref(), &shred[offsets])); } } @@ -461,7 +455,9 @@ pub(crate) fn corrupt_packet( mod tests { use { super::*, - crate::shred::{tests::make_merkle_shreds_for_tests, traits::ShredData}, + crate::shred::{ + tests::make_merkle_shreds_for_tests, traits::ShredData, SHREDS_PER_FEC_BLOCK, + }, assert_matches::assert_matches, rand::Rng, solana_perf::packet::PacketFlags, @@ -475,22 +471,23 @@ mod tests { } #[test_matrix( - [true, false], [true, false], [true, false] )] - fn test_resign_packet(repaired: bool, chained: bool, is_last_in_slot: bool) { + fn test_resign_packet(repaired: bool, is_last_in_slot: bool) { let mut rng = rand::thread_rng(); let slot = 318_230_963 + rng.gen_range(0..318_230_963); let data_size = 1200 * rng.gen_range(32..64); let mut shreds = - make_merkle_shreds_for_tests(&mut rng, slot, data_size, chained, is_last_in_slot) - .unwrap(); - for shred in shreds.iter_mut() { + make_merkle_shreds_for_tests(&mut rng, slot, data_size, is_last_in_slot).unwrap(); + // enumerate the shreds so that I have index of each shred + let shreds_len = shreds.len(); + for (index, shred) in shreds.iter_mut().enumerate() { let keypair = Keypair::new(); let signature = make_dummy_signature(&mut rng); let nonce = repaired.then(|| rng.gen::()); - if chained && is_last_in_slot { + let is_last_batch = index >= shreds_len - SHREDS_PER_FEC_BLOCK; + if is_last_in_slot && is_last_batch { shred.set_retransmitter_signature(&signature).unwrap(); let packet = &mut shred.payload().to_packet(nonce); @@ -532,20 +529,20 @@ mod tests { } #[test_matrix( - [true, false], [true, false], [true, false] )] - fn test_merkle_shred_wire_layout(repaired: bool, chained: bool, is_last_in_slot: bool) { + fn test_merkle_shred_wire_layout(repaired: bool, is_last_in_slot: bool) { let mut rng = rand::thread_rng(); let slot = 318_230_963 + rng.gen_range(0..318_230_963); let data_size = 1200 * rng.gen_range(32..64); let mut shreds = - make_merkle_shreds_for_tests(&mut rng, slot, data_size, chained, is_last_in_slot) - .unwrap(); - for shred in &mut shreds { + make_merkle_shreds_for_tests(&mut rng, slot, data_size, is_last_in_slot).unwrap(); + let shreds_len = shreds.len(); + for (index, shred) in shreds.iter_mut().enumerate() { let signature = make_dummy_signature(&mut rng); - if chained && is_last_in_slot { + let is_last_batch = index >= shreds_len - SHREDS_PER_FEC_BLOCK; + if is_last_in_slot && is_last_batch { shred.set_retransmitter_signature(&signature).unwrap(); } else { assert_matches!( @@ -554,8 +551,10 @@ mod tests { ); } } - for shred in &shreds { + + for (index, shred) in shreds.iter().enumerate() { let nonce = repaired.then(|| rng.gen::()); + let is_last_batch = index >= shreds_len - SHREDS_PER_FEC_BLOCK; let mut packet = shred.payload().to_packet(nonce); if repaired { packet.meta_mut().flags |= PacketFlags::REPAIR; @@ -598,26 +597,21 @@ mod tests { }); assert_eq!( get_signed_data(bytes).unwrap(), - SignedData::MerkleRoot(shred.merkle_root().unwrap()) + shred.merkle_root().unwrap() ); - assert_matches!(get_signed_data_offsets(bytes), None); assert_eq!( get_merkle_root(bytes).unwrap(), shred.merkle_root().unwrap(), ); - if chained { - assert_eq!( - get_chained_merkle_root(bytes).unwrap(), - shred.chained_merkle_root().unwrap(), - ); - } else { - assert_matches!(get_chained_merkle_root(bytes), None); - } + assert_eq!( + get_chained_merkle_root(bytes).unwrap(), + shred.chained_merkle_root().unwrap(), + ); assert_eq!( is_retransmitter_signed_variant(bytes).unwrap(), - chained && is_last_in_slot + is_last_in_slot && is_last_batch, ); - if chained && is_last_in_slot { + if is_last_in_slot && is_last_batch { assert_eq!( get_retransmitter_signature_offset(bytes).unwrap(), shred.retransmitter_signature_offset().unwrap(), diff --git a/ledger/src/shredder.rs b/ledger/src/shredder.rs index 44a10a7771383b..9b3a3e55c355c6 100644 --- a/ledger/src/shredder.rs +++ b/ledger/src/shredder.rs @@ -69,7 +69,7 @@ impl Shredder { keypair: &Keypair, entries: &[Entry], is_last_in_slot: bool, - chained_merkle_root: Option, + chained_merkle_root: Hash, next_shred_index: u32, next_code_index: u32, reed_solomon_cache: &ReedSolomonCache, @@ -98,7 +98,7 @@ impl Shredder { keypair: &Keypair, data: &[u8], is_last_in_slot: bool, - chained_merkle_root: Option, + chained_merkle_root: Hash, next_shred_index: u32, next_code_index: u32, reed_solomon_cache: &ReedSolomonCache, @@ -108,7 +108,7 @@ impl Shredder { let shreds = shred::merkle::make_shreds_from_data( thread_pool, keypair, - chained_merkle_root, + Some(chained_merkle_root), data, self.slot, self.parent_slot, @@ -128,7 +128,7 @@ impl Shredder { keypair: &Keypair, entries: &[Entry], is_last_in_slot: bool, - chained_merkle_root: Option, + chained_merkle_root: Hash, next_shred_index: u32, next_code_index: u32, reed_solomon_cache: &ReedSolomonCache, @@ -204,7 +204,7 @@ impl Shredder { keypair, &[], true, - Some(Hash::default()), + Hash::default(), 0, 0, &reed_solomon_cache, @@ -276,7 +276,7 @@ mod tests { assert_eq!(verify, shred.verify(pk)); } - fn run_test_data_shredder(slot: Slot, chained: bool, is_last_in_slot: bool) { + fn run_test_data_shredder(slot: Slot, is_last_in_slot: bool) { let keypair = Arc::new(Keypair::new()); // Test that parent cannot be > current slot @@ -308,10 +308,9 @@ mod tests { &keypair, &entries, is_last_in_slot, - // chained_merkle_root - chained.then(|| Hash::new_from_array(rand::thread_rng().gen())), - start_index, // next_shred_index - start_index, // next_code_index + Hash::new_from_array(rand::thread_rng().gen()), // chained_merkle_root + start_index, // next_shred_index + start_index, // next_code_index &ReedSolomonCache::default(), &mut ProcessShredsStats::default(), ); @@ -366,19 +365,13 @@ mod tests { assert_eq!(entries, deshred_entries); } - #[test_matrix( - [true, false], - [true, false] - )] - fn test_data_shredder(chained: bool, is_last_in_slot: bool) { - run_test_data_shredder(0x1234_5678_9abc_def0, chained, is_last_in_slot); + #[test_matrix([true, false])] + fn test_data_shredder(is_last_in_slot: bool) { + run_test_data_shredder(0x1234_5678_9abc_def0, is_last_in_slot); } - #[test_matrix( - [true, false], - [true, false] - )] - fn test_deserialize_shred_payload(chained: bool, is_last_in_slot: bool) { + #[test_matrix([true, false])] + fn test_deserialize_shred_payload(is_last_in_slot: bool) { let keypair = Arc::new(Keypair::new()); let shredder = Shredder::new( 259_241_705, // slot @@ -401,10 +394,9 @@ mod tests { &keypair, &entries, is_last_in_slot, - // chained_merkle_root - chained.then(|| Hash::new_from_array(rand::thread_rng().gen())), - 369, // next_shred_index - 776, // next_code_index + Hash::new_from_array(rand::thread_rng().gen()), // chained_merkle_root + 369, // next_shred_index + 776, // next_code_index &ReedSolomonCache::default(), &mut ProcessShredsStats::default(), ); @@ -414,11 +406,8 @@ mod tests { } } - #[test_matrix( - [true, false], - [true, false] - )] - fn test_shred_reference_tick(chained: bool, is_last_in_slot: bool) { + #[test_matrix([true, false])] + fn test_shred_reference_tick(is_last_in_slot: bool) { let keypair = Arc::new(Keypair::new()); let slot = 1; let parent_slot = 0; @@ -437,10 +426,9 @@ mod tests { &keypair, &entries, is_last_in_slot, - // chained_merkle_root - chained.then(|| Hash::new_from_array(rand::thread_rng().gen())), - 0, // next_shred_index - 0, // next_code_index + Hash::new_from_array(rand::thread_rng().gen()), // chained_merkle_root + 0, // next_shred_index + 0, // next_code_index &ReedSolomonCache::default(), &mut ProcessShredsStats::default(), ); @@ -455,11 +443,8 @@ mod tests { assert_eq!(deserialized_shred.reference_tick(), 5); } - #[test_matrix( - [true, false], - [true, false] - )] - fn test_shred_reference_tick_overflow(chained: bool, is_last_in_slot: bool) { + #[test_matrix([true, false])] + fn test_shred_reference_tick_overflow(is_last_in_slot: bool) { let keypair = Arc::new(Keypair::new()); let slot = 1; let parent_slot = 0; @@ -478,10 +463,9 @@ mod tests { &keypair, &entries, is_last_in_slot, - // chained_merkle_root - chained.then(|| Hash::new_from_array(rand::thread_rng().gen())), - 0, // next_shred_index - 0, // next_code_index + Hash::new_from_array(rand::thread_rng().gen()), // chained_merkle_root + 0, // next_shred_index + 0, // next_code_index &ReedSolomonCache::default(), &mut ProcessShredsStats::default(), ); @@ -505,11 +489,12 @@ mod tests { ); } - fn run_test_data_and_code_shredder(slot: Slot, chained: bool, is_last_in_slot: bool) { + fn run_test_data_and_code_shredder(slot: Slot, is_last_in_slot: bool) { let keypair = Arc::new(Keypair::new()); let shredder = Shredder::new(slot, slot - 5, 0, 0).unwrap(); // Create enough entries to make > 1 shred - let data_buffer_size = ShredData::capacity(/*merkle_proof_size:*/ None).unwrap(); + let data_buffer_size = + ShredData::capacity(/*merkle_proof_size:*/ Some((6, true, false))).unwrap(); let num_entries = max_ticks_per_n_shreds(1, Some(data_buffer_size)) + 1; let entries: Vec<_> = (0..num_entries) .map(|_| { @@ -525,10 +510,9 @@ mod tests { &keypair, &entries, is_last_in_slot, - // chained_merkle_root - chained.then(|| Hash::new_from_array(rand::thread_rng().gen())), - 0, // next_shred_index - 0, // next_code_index + Hash::new_from_array(rand::thread_rng().gen()), // chained_merkle_root + 0, // next_shred_index + 0, // next_code_index &ReedSolomonCache::default(), &mut ProcessShredsStats::default(), ); @@ -550,19 +534,13 @@ mod tests { } } - #[test_matrix( - [true, false], - [true, false] - )] - fn test_data_and_code_shredder(chained: bool, is_last_in_slot: bool) { - run_test_data_and_code_shredder(0x1234_5678_9abc_def0, chained, is_last_in_slot); + #[test_matrix([true, false])] + fn test_data_and_code_shredder(is_last_in_slot: bool) { + run_test_data_and_code_shredder(0x1234_5678_9abc_def0, is_last_in_slot); } - #[test_matrix( - [true, false], - [true, false] - )] - fn test_shred_version(chained: bool, is_last_in_slot: bool) { + #[test_matrix([true, false])] + fn test_shred_version(is_last_in_slot: bool) { let keypair = Arc::new(Keypair::new()); let hash = hash(Hash::default().as_ref()); let version = shred_version::version_from_hash(&hash); @@ -582,10 +560,9 @@ mod tests { &keypair, &entries, is_last_in_slot, - // chained_merkle_root - chained.then(|| Hash::new_from_array(rand::thread_rng().gen())), - 0, // next_shred_index - 0, // next_code_index + Hash::new_from_array(rand::thread_rng().gen()), // chained_merkle_root + 0, // next_shred_index + 0, // next_code_index &ReedSolomonCache::default(), &mut ProcessShredsStats::default(), ); @@ -595,11 +572,8 @@ mod tests { .any(|s| s.version() != version)); } - #[test_matrix( - [true, false], - [true, false] - )] - fn test_shred_fec_set_index(chained: bool, is_last_in_slot: bool) { + #[test_matrix([true, false])] + fn test_shred_fec_set_index(is_last_in_slot: bool) { let keypair = Arc::new(Keypair::new()); let hash = hash(Hash::default().as_ref()); let version = shred_version::version_from_hash(&hash); @@ -620,10 +594,9 @@ mod tests { &keypair, &entries, is_last_in_slot, - // chained_merkle_root - chained.then(|| Hash::new_from_array(rand::thread_rng().gen())), - start_index, // next_shred_index - start_index, // next_code_index + Hash::new_from_array(rand::thread_rng().gen()), // chained_merkle_root + start_index, // next_shred_index + start_index, // next_code_index &ReedSolomonCache::default(), &mut ProcessShredsStats::default(), ); diff --git a/ledger/src/sigverify_shreds.rs b/ledger/src/sigverify_shreds.rs index cfe11c8f7b4dbf..5184e10cb9200e 100644 --- a/ledger/src/sigverify_shreds.rs +++ b/ledger/src/sigverify_shreds.rs @@ -1,11 +1,12 @@ #![allow(clippy::implicit_hasher)] use { - crate::shred::{self, SignedData, SIZE_OF_MERKLE_ROOT}, + crate::shred::{self, SIZE_OF_MERKLE_ROOT}, itertools::{izip, Itertools}, rayon::{prelude::*, ThreadPool}, solana_clock::Slot, solana_hash::Hash, solana_metrics::inc_new_counter_debug, + solana_nohash_hasher::BuildNoHashHasher, solana_perf::{ cuda_runtime::PinnedVec, packet::{Packet, PacketBatch, PacketRef}, @@ -38,10 +39,12 @@ const SIGN_SHRED_GPU_MIN: usize = 256; pub type LruCache = lazy_lru::LruCache<(Signature, Pubkey, /*merkle root:*/ Hash), ()>; +pub type SlotPubkeys = HashMap>; + #[must_use] pub fn verify_shred_cpu( packet: PacketRef, - slot_leaders: &HashMap, + slot_leaders: &SlotPubkeys, cache: &RwLock, ) -> bool { if packet.meta().discard() { @@ -64,26 +67,22 @@ pub fn verify_shred_cpu( let Some(data) = shred::layout::get_signed_data(shred) else { return false; }; - match data { - SignedData::Chunk(chunk) => signature.verify(pubkey.as_ref(), chunk), - SignedData::MerkleRoot(root) => { - let key = (signature, *pubkey, root); - if cache.read().unwrap().get(&key).is_some() { - true - } else if key.0.verify(key.1.as_ref(), key.2.as_ref()) { - cache.write().unwrap().put(key, ()); - true - } else { - false - } - } + + let key = (signature, *pubkey, data); + if cache.read().unwrap().get(&key).is_some() { + true + } else if key.0.verify(key.1.as_ref(), key.2.as_ref()) { + cache.write().unwrap().put(key, ()); + true + } else { + false } } fn verify_shreds_cpu( thread_pool: &ThreadPool, batches: &[PacketBatch], - slot_leaders: &HashMap, + slot_leaders: &SlotPubkeys, cache: &RwLock, ) -> Vec> { let packet_count = count_packets_in_batches(batches); @@ -106,7 +105,7 @@ fn verify_shreds_cpu( fn slot_key_data_for_gpu( thread_pool: &ThreadPool, batches: &[PacketBatch], - slot_keys: &HashMap, + slot_keys: &SlotPubkeys, recycler_cache: &RecyclerCache, ) -> (/*pubkeys:*/ PinnedVec, TxOffset) { //TODO: mark Pubkey::default shreds as failed after the GPU returns @@ -225,6 +224,7 @@ fn elems_from_buffer(buffer: &PinnedVec) -> perf_libs::Elems { } } +// TODO: clean up legacy shred artifacts fn shred_gpu_offsets( offset: usize, batches: &[PacketBatch], @@ -244,7 +244,7 @@ fn shred_gpu_offsets( offset.checked_add(std::mem::size_of::()) }); let packets = batches.iter().flatten(); - for (offset, packet, merkle_root_offset) in izip!(offsets, packets, merkle_roots_offsets) { + for (offset, _packet, merkle_root_offset) in izip!(offsets, packets, merkle_roots_offsets) { let sig = shred::layout::get_signature_range(); let sig = add_offset(sig, offset); debug_assert_eq!(sig.end - sig.start, std::mem::size_of::()); @@ -252,9 +252,7 @@ fn shred_gpu_offsets( // discarded during deserialization. let msg: Range = match merkle_root_offset { None => { - let shred = shred::layout::get_shred(packet); - let msg = shred.and_then(shred::layout::get_signed_data_offsets); - add_offset(msg.unwrap_or_default(), offset) + 0..SIZE_OF_MERKLE_ROOT // legacy shreds - remove valid but useless offset } Some(merkle_root_offset) => { merkle_root_offset..merkle_root_offset + SIZE_OF_MERKLE_ROOT @@ -271,7 +269,7 @@ fn shred_gpu_offsets( pub fn verify_shreds_gpu( thread_pool: &ThreadPool, batches: &[PacketBatch], - slot_leaders: &HashMap, + slot_leaders: &SlotPubkeys, recycler_cache: &RecyclerCache, cache: &RwLock, ) -> Vec> { @@ -391,7 +389,7 @@ fn sign_shreds_cpu(thread_pool: &ThreadPool, keypair: &Keypair, batches: &mut [P fn sign_shreds_gpu_pinned_keypair(keypair: &Keypair, cache: &RecyclerCache) -> PinnedVec { let mut vec = cache.buffer().allocate("pinned_keypair"); let pubkey = keypair.pubkey().to_bytes(); - let secret = keypair.secret().to_bytes(); + let secret = keypair.secret_bytes(); let mut hasher = Sha512::default(); hasher.update(secret); let mut result = hasher.finalize(); @@ -544,7 +542,7 @@ mod tests { solana_system_transaction as system_transaction, solana_transaction::Transaction, std::iter::{once, repeat_with}, - test_case::test_matrix, + test_case::test_case, }; fn run_test_sigverify_shred_cpu(slot: Slot) { @@ -558,7 +556,7 @@ mod tests { &keypair, &[], true, - Some(Hash::default()), + Hash::default(), 0, 0, &reed_solomon_cache, @@ -570,14 +568,14 @@ mod tests { packet.buffer_mut()[..shred.payload().len()].copy_from_slice(shred.payload()); packet.meta_mut().size = shred.payload().len(); - let leader_slots = HashMap::from([(slot, keypair.pubkey())]); + let leader_slots: SlotPubkeys = [(slot, keypair.pubkey())].into_iter().collect(); assert!(verify_shred_cpu((&packet).into(), &leader_slots, &cache)); let wrong_keypair = Keypair::new(); - let leader_slots = HashMap::from([(slot, wrong_keypair.pubkey())]); + let leader_slots: SlotPubkeys = [(slot, wrong_keypair.pubkey())].into_iter().collect(); assert!(!verify_shred_cpu((&packet).into(), &leader_slots, &cache)); - let leader_slots = HashMap::new(); + let leader_slots: SlotPubkeys = HashMap::default(); assert!(!verify_shred_cpu((&packet).into(), &leader_slots, &cache)); } @@ -593,20 +591,20 @@ mod tests { let batch = make_packet_batch(&keypair, slot); let mut batches = [batch]; - let leader_slots = HashMap::from([(slot, keypair.pubkey())]); + let leader_slots: SlotPubkeys = [(slot, keypair.pubkey())].into_iter().collect(); let rv = verify_shreds_cpu(thread_pool, &batches, &leader_slots, &cache); assert_eq!(rv.into_iter().flatten().all_equal_value().unwrap(), 1); let wrong_keypair = Keypair::new(); - let leader_slots = HashMap::from([(slot, wrong_keypair.pubkey())]); + let leader_slots: SlotPubkeys = [(slot, wrong_keypair.pubkey())].into_iter().collect(); let rv = verify_shreds_cpu(thread_pool, &batches, &leader_slots, &cache); assert_eq!(rv.into_iter().flatten().all_equal_value().unwrap(), 0); - let leader_slots = HashMap::new(); + let leader_slots: SlotPubkeys = HashMap::default(); let rv = verify_shreds_cpu(thread_pool, &batches, &leader_slots, &cache); assert_eq!(rv.into_iter().flatten().all_equal_value().unwrap(), 0); - let leader_slots = HashMap::from([(slot, keypair.pubkey())]); + let leader_slots: SlotPubkeys = [(slot, keypair.pubkey())].into_iter().collect(); batches[0] .iter_mut() .for_each(|mut packet_ref| packet_ref.meta_mut().size = 0); @@ -629,7 +627,9 @@ mod tests { let batch = make_packet_batch(&keypair, slot); let mut batches = [batch]; - let leader_slots = HashMap::from([(u64::MAX, Pubkey::default()), (slot, keypair.pubkey())]); + let leader_slots: SlotPubkeys = [(u64::MAX, Pubkey::default()), (slot, keypair.pubkey())] + .into_iter() + .collect(); let rv = verify_shreds_gpu( thread_pool, &batches, @@ -640,10 +640,12 @@ mod tests { assert_eq!(rv.into_iter().flatten().all_equal_value().unwrap(), 1); let wrong_keypair = Keypair::new(); - let leader_slots = HashMap::from([ + let leader_slots: SlotPubkeys = [ (u64::MAX, Pubkey::default()), (slot, wrong_keypair.pubkey()), - ]); + ] + .into_iter() + .collect(); let rv = verify_shreds_gpu( thread_pool, &batches, @@ -653,7 +655,7 @@ mod tests { ); assert_eq!(rv.into_iter().flatten().all_equal_value().unwrap(), 0); - let leader_slots = HashMap::from([(u64::MAX, Pubkey::default())]); + let leader_slots: SlotPubkeys = [(u64::MAX, Pubkey::default())].into_iter().collect(); let rv = verify_shreds_gpu( thread_pool, &batches, @@ -666,7 +668,9 @@ mod tests { batches[0] .iter_mut() .for_each(|mut pr| pr.meta_mut().size = 0); - let leader_slots = HashMap::from([(u64::MAX, Pubkey::default()), (slot, keypair.pubkey())]); + let leader_slots: SlotPubkeys = [(u64::MAX, Pubkey::default()), (slot, keypair.pubkey())] + .into_iter() + .collect(); let rv = verify_shreds_gpu( thread_pool, &batches, @@ -685,7 +689,7 @@ mod tests { keypair, &[], true, - Some(Hash::default()), + Hash::default(), 0, 0, &reed_solomon_cache, @@ -736,7 +740,6 @@ mod tests { fn make_shreds( rng: &mut R, - chained: bool, is_last_in_slot: bool, keypairs: &HashMap, ) -> Vec { @@ -757,10 +760,9 @@ mod tests { keypair, &make_entries(rng, num_entries), is_last_in_slot, - // chained_merkle_root - chained.then(|| Hash::new_from_array(rng.gen())), - rng.gen_range(0..2671), // next_shred_index - rng.gen_range(0..2781), // next_code_index + Hash::new_from_array(rng.gen()), // chained_merkle_root + rng.gen_range(0..2671), // next_shred_index + rng.gen_range(0..2781), // next_code_index &reed_solomon_cache, &mut ProcessShredsStats::default(), ) @@ -779,9 +781,6 @@ mod tests { let slot = shred::layout::get_slot(shred).unwrap(); let signature = shred::layout::get_signature(shred).unwrap(); let pubkey = keypairs[&slot].pubkey(); - if let Some(offsets) = shred::layout::get_signed_data_offsets(shred) { - assert!(signature.verify(pubkey.as_ref(), &shred[offsets])); - } let data = shred::layout::get_signed_data(shred).unwrap(); assert!(signature.verify(pubkey.as_ref(), data.as_ref())); } @@ -810,11 +809,9 @@ mod tests { packets } - #[test_matrix( - [true, false], - [true, false] - )] - fn test_verify_shreds_fuzz(chained: bool, is_last_in_slot: bool) { + #[test_case(true)] + #[test_case(false)] + fn test_verify_shreds_fuzz(is_last_in_slot: bool) { let mut rng = rand::thread_rng(); let cache = RwLock::new(LruCache::new(/*capacity:*/ 128)); let thread_pool = ThreadPoolBuilder::new().num_threads(3).build().unwrap(); @@ -823,8 +820,8 @@ mod tests { .map(|slot| (slot, Keypair::new())) .take(3) .collect(); - let shreds = make_shreds(&mut rng, chained, is_last_in_slot, &keypairs); - let pubkeys: HashMap = keypairs + let shreds = make_shreds(&mut rng, is_last_in_slot, &keypairs); + let pubkeys: SlotPubkeys = keypairs .iter() .map(|(&slot, keypair)| (slot, keypair.pubkey())) .chain(once((Slot::MAX, Pubkey::default()))) @@ -862,11 +859,9 @@ mod tests { ); } - #[test_matrix( - [true, false], - [true, false] - )] - fn test_sign_shreds_gpu(chained: bool, is_last_in_slot: bool) { + #[test_case(true)] + #[test_case(false)] + fn test_sign_shreds_gpu(is_last_in_slot: bool) { let mut rng = rand::thread_rng(); let cache = RwLock::new(LruCache::new(/*capacity:*/ 128)); let thread_pool = ThreadPoolBuilder::new().num_threads(3).build().unwrap(); @@ -876,10 +871,10 @@ mod tests { .map(|slot| (slot, Keypair::new())) .take(3) .collect(); - make_shreds(&mut rng, chained, is_last_in_slot, &keypairs) + make_shreds(&mut rng, is_last_in_slot, &keypairs) }; let keypair = Keypair::new(); - let pubkeys: HashMap = { + let pubkeys: SlotPubkeys = { let pubkey = keypair.pubkey(); shreds .iter() diff --git a/ledger/src/slot_stats.rs b/ledger/src/slot_stats.rs index 7d4398341032fc..1c202e16e1512a 100644 --- a/ledger/src/slot_stats.rs +++ b/ledger/src/slot_stats.rs @@ -3,10 +3,7 @@ use { bitflags::bitflags, lru::LruCache, solana_clock::Slot, - std::{ - collections::HashMap, - sync::{Mutex, MutexGuard}, - }, + std::{collections::HashMap, sync::Mutex}, }; const SLOTS_STATS_CACHE_CAPACITY: usize = 300; @@ -74,19 +71,25 @@ impl Default for SlotsStats { } impl SlotsStats { - fn get_or_default_with_eviction_check<'a>( - stats: &'a mut MutexGuard>, + /// Returns a mutable reference to [`SlotStats`] associated with the slot in the stats LruCache + /// and a possibly evicted cache entry. + /// + /// A new SlotStats entry will be inserted if there is not one present for `slot`; insertion + /// may cause an existing entry to be evicted. + fn get_or_default_with_eviction_check( + stats: &mut LruCache, slot: Slot, - ) -> (&'a mut SlotStats, Option<(Slot, SlotStats)>) { - let evicted = if stats.len() == stats.cap() { - match stats.peek_lru() { - Some((s, _)) if *s == slot => None, - _ => stats.pop_lru(), - } - } else { + ) -> (&mut SlotStats, Option<(Slot, SlotStats)>) { + let evicted = if stats.contains(&slot) { None + } else { + // insert slot in cache which might potentially evict an entry + let evicted = stats.push(slot, SlotStats::default()); + if let Some((evicted_slot, _)) = evicted { + assert_ne!(evicted_slot, slot); + } + evicted }; - stats.get_or_insert(slot, SlotStats::default); (stats.get_mut(&slot).unwrap(), evicted) } @@ -97,30 +100,32 @@ impl SlotsStats { source: ShredSource, slot_meta: Option<&SlotMeta>, ) { - let mut slot_full_reporting_info = None; - let mut stats = self.stats.lock().unwrap(); - let (slot_stats, evicted) = Self::get_or_default_with_eviction_check(&mut stats, slot); - match source { - ShredSource::Recovered => slot_stats.num_recovered += 1, - ShredSource::Repaired => slot_stats.num_repaired += 1, - ShredSource::Turbine => { - *slot_stats - .turbine_fec_set_index_counts - .entry(fec_set_index) - .or_default() += 1 + let (slot_full_reporting_info, evicted) = { + let mut stats = self.stats.lock().unwrap(); + let (slot_stats, evicted) = Self::get_or_default_with_eviction_check(&mut stats, slot); + match source { + ShredSource::Recovered => slot_stats.num_recovered += 1, + ShredSource::Repaired => slot_stats.num_repaired += 1, + ShredSource::Turbine => { + *slot_stats + .turbine_fec_set_index_counts + .entry(fec_set_index) + .or_default() += 1 + } } - } - if let Some(meta) = slot_meta { - if meta.is_full() { - slot_stats.last_index = meta.last_index.unwrap_or_default(); - if !slot_stats.flags.contains(SlotFlags::FULL) { - slot_stats.flags |= SlotFlags::FULL; - slot_full_reporting_info = - Some((slot_stats.num_repaired, slot_stats.num_recovered)); + let mut slot_full_reporting_info = None; + if let Some(meta) = slot_meta { + if meta.is_full() { + slot_stats.last_index = meta.last_index.unwrap(); + if !slot_stats.flags.contains(SlotFlags::FULL) { + slot_stats.flags |= SlotFlags::FULL; + slot_full_reporting_info = + Some((slot_stats.num_repaired, slot_stats.num_recovered)); + } } } - } - drop(stats); + (slot_full_reporting_info, evicted) + }; if let Some((num_repaired, num_recovered)) = slot_full_reporting_info { let slot_meta = slot_meta.unwrap(); let total_time_ms = diff --git a/ledger/src/staking_utils.rs b/ledger/src/staking_utils.rs index 8e677b22067a12..a146de99c58402 100644 --- a/ledger/src/staking_utils.rs +++ b/ledger/src/staking_utils.rs @@ -2,7 +2,7 @@ pub(crate) mod tests { use { rand::Rng, - solana_account::AccountSharedData, + solana_account::{AccountSharedData, WritableAccount}, solana_clock::Clock, solana_instruction::Instruction, solana_keypair::Keypair, @@ -10,8 +10,9 @@ pub(crate) mod tests { solana_runtime::bank::Bank, solana_signer::{signers::Signers, Signer}, solana_stake_interface::{ - instruction as stake_instruction, - state::{Authorized, Lockup}, + program as stake_program, + stake_flags::StakeFlags, + state::{Authorized, Delegation, Meta, Stake, StakeStateV2}, }, solana_transaction::Transaction, solana_vote::vote_account::{VoteAccount, VoteAccounts}, @@ -53,7 +54,7 @@ pub(crate) mod tests { }, amount, vote_instruction::CreateVoteAccountConfig { - space: VoteStateVersions::vote_state_size_of(true) as u64, + space: VoteStateV3::size_of() as u64, ..vote_instruction::CreateVoteAccountConfig::default() }, ), @@ -62,18 +63,31 @@ pub(crate) mod tests { let stake_account_keypair = Keypair::new(); let stake_account_pubkey = stake_account_keypair.pubkey(); - process_instructions( - bank, - &[from_account, &stake_account_keypair], - &stake_instruction::create_account_and_delegate_stake( - &from_account.pubkey(), - &stake_account_pubkey, - &vote_pubkey, - &Authorized::auto(&stake_account_pubkey), - &Lockup::default(), - amount, - ), + let stake_account = StakeStateV2::Stake( + Meta { + authorized: Authorized::auto(&stake_account_pubkey), + ..Meta::default() + }, + Stake { + delegation: Delegation { + voter_pubkey: vote_pubkey, + stake: amount, + ..Delegation::default() + }, + ..Stake::default() + }, + StakeFlags::default(), ); + + let account = AccountSharedData::create( + 1, + bincode::serialize(&stake_account).unwrap(), + stake_program::id(), + false, + u64::MAX, + ); + + bank.store_account(&stake_account_pubkey, &account); } #[test] @@ -112,7 +126,7 @@ pub(crate) mod tests { let vote_accounts = stakes.into_iter().map(|(stake, vote_state)| { let account = AccountSharedData::new_data( rng.gen(), // lamports - &VoteStateVersions::new_current(vote_state), + &VoteStateVersions::new_v3(vote_state), &solana_vote_program::id(), // owner ) .unwrap(); diff --git a/ledger/src/transaction_balances.rs b/ledger/src/transaction_balances.rs index 1c8c26436e19fc..7c0e14bc6e7916 100644 --- a/ledger/src/transaction_balances.rs +++ b/ledger/src/transaction_balances.rs @@ -38,7 +38,7 @@ fn collected_token_infos_to_token_balances( .collect() } -fn svm_token_info_to_token_balance(svm_info: SvmTokenInfo) -> TransactionTokenBalance { +pub fn svm_token_info_to_token_balance(svm_info: SvmTokenInfo) -> TransactionTokenBalance { let SvmTokenInfo { account_index, mint, diff --git a/ledger/tests/shred.rs b/ledger/tests/shred.rs index ce7851adb7a5f0..a4c2b36cb0380e 100644 --- a/ledger/tests/shred.rs +++ b/ledger/tests/shred.rs @@ -5,8 +5,9 @@ use { solana_hash::Hash, solana_keypair::Keypair, solana_ledger::shred::{ - self, max_entries_per_n_shred, recover, verify_test_data_shred, ProcessShredsStats, - ReedSolomonCache, Shred, ShredData, Shredder, DATA_SHREDS_PER_FEC_BLOCK, + self, max_entries_per_n_shred, max_entries_per_n_shred_last_or_not, recover, + verify_test_data_shred, ProcessShredsStats, ReedSolomonCache, Shred, ShredData, Shredder, + DATA_SHREDS_PER_FEC_BLOCK, }, solana_signer::Signer, solana_system_transaction as system_transaction, @@ -32,10 +33,8 @@ fn test_multi_fec_block_coding(is_last_in_slot: bool) { let keypair1 = Keypair::new(); let tx0 = system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default()); let entry = Entry::new(&Hash::default(), 1, vec![tx0]); - let chained_merkle_root = Some(Hash::default()); - let merkle_capacity = ShredData::capacity(Some((6, true, is_last_in_slot))).unwrap(); let num_entries = - max_entries_per_n_shred(&entry, num_data_shreds as u64, Some(merkle_capacity)); + max_entries_per_n_shred_last_or_not(&entry, num_data_shreds as u64, is_last_in_slot); let entries: Vec<_> = (0..num_entries) .map(|_| { @@ -54,9 +53,9 @@ fn test_multi_fec_block_coding(is_last_in_slot: bool) { &keypair, &entries, is_last_in_slot, - chained_merkle_root, - 0, // next_shred_index - 0, // next_code_index + Hash::default(), // chained_merkle_root + 0, // next_shred_index + 0, // next_code_index &reed_solomon_cache, &mut ProcessShredsStats::default(), ); @@ -145,7 +144,7 @@ fn test_multi_fec_block_different_size_coding() { // Necessary in order to ensure the last shred in the slot // is part of the recovered set, and that the below `index` // calculation in the loop is correct - assert!(fec_data_shreds.len() % 2 == 0); + assert_eq!(fec_data_shreds.len() % 2, 0); for (i, recovered_shred) in recovered_data.into_iter().enumerate() { let index = first_data_index + (i * 2) + 1; verify_test_data_shred( @@ -202,7 +201,7 @@ fn setup_different_sized_fec_blocks( let tx0 = system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default()); let entry = Entry::new(&Hash::default(), 1, vec![tx0]); let merkle_capacity = ShredData::capacity(Some((6, true, true))).unwrap(); - let chained_merkle_root = Some(Hash::default()); + let chained_merkle_root = Hash::default(); assert!(DATA_SHREDS_PER_FEC_BLOCK > 2); let num_shreds_per_iter = DATA_SHREDS_PER_FEC_BLOCK; diff --git a/local-cluster/Cargo.toml b/local-cluster/Cargo.toml index 7aac89612af26e..ead0c45d67e3ac 100644 --- a/local-cluster/Cargo.toml +++ b/local-cluster/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "solana-local-cluster" -description = "Blockchain, Rebuilt for Scale" documentation = "https://docs.rs/solana-local-cluster" version = { workspace = true } authors = { workspace = true } +description = { workspace = true } repository = { workspace = true } homepage = { workspace = true } license = { workspace = true } @@ -13,7 +13,7 @@ edition = { workspace = true } targets = ["x86_64-unknown-linux-gnu"] [features] -dev-context-only-utils = [] +dev-context-only-utils = ["solana-core/dev-context-only-utils"] [dependencies] crossbeam-channel = { workspace = true } @@ -26,6 +26,7 @@ solana-accounts-db = { workspace = true } solana-client = { workspace = true } solana-client-traits = { workspace = true } solana-clock = { workspace = true } +solana-cluster-type = { workspace = true } solana-commitment-config = { workspace = true } solana-core = { workspace = true } solana-entry = { workspace = true } @@ -41,9 +42,11 @@ solana-message = { workspace = true } solana-native-token = { workspace = true } solana-net-utils = { workspace = true } solana-poh-config = { workspace = true } +solana-program-binaries = { workspace = true } solana-pubkey = { workspace = true } solana-pubsub-client = { workspace = true } solana-quic-client = { workspace = true } +solana-rent = { workspace = true } solana-rpc-client = { workspace = true } solana-rpc-client-api = { workspace = true } solana-runtime = { workspace = true } diff --git a/local-cluster/src/local_cluster.rs b/local-cluster/src/local_cluster.rs index 085efb35ed8812..646aca0e8e8896 100644 --- a/local-cluster/src/local_cluster.rs +++ b/local-cluster/src/local_cluster.rs @@ -11,13 +11,14 @@ use { solana_accounts_db::utils::create_accounts_run_and_snapshot_dirs, solana_client::connection_cache::ConnectionCache, solana_clock::{Slot, DEFAULT_DEV_SLOTS_PER_EPOCH, DEFAULT_TICKS_PER_SLOT}, + solana_cluster_type::ClusterType, solana_commitment_config::CommitmentConfig, solana_core::{ consensus::tower_storage::FileTowerStorage, validator::{Validator, ValidatorConfig, ValidatorStartProgress, ValidatorTpuConfig}, }, solana_epoch_schedule::EpochSchedule, - solana_genesis_config::{ClusterType, GenesisConfig}, + solana_genesis_config::GenesisConfig, solana_gossip::{ contact_info::{ContactInfo, Protocol}, gossip_service::{discover, discover_validators}, @@ -29,7 +30,9 @@ use { solana_native_token::LAMPORTS_PER_SOL, solana_net_utils::sockets::bind_to_localhost_unique, solana_poh_config::PohConfig, + solana_program_binaries::core_bpf_programs, solana_pubkey::Pubkey, + solana_rent::Rent, solana_rpc_client::rpc_client::RpcClient, solana_runtime::{ genesis_utils::{ @@ -37,6 +40,7 @@ use { ValidatorVoteKeypairs, }, snapshot_config::SnapshotConfig, + snapshot_utils::BANK_SNAPSHOTS_DIR, }, solana_signer::{signers::Signers, Signer}, solana_stake_interface::{ @@ -91,7 +95,6 @@ pub struct ClusterConfig { pub slots_per_epoch: u64, pub stakers_slot_offset: u64, pub skip_warmup_slots: bool, - pub native_instruction_processors: Vec<(String, Pubkey)>, pub cluster_type: ClusterType, pub poh_config: PohConfig, pub additional_accounts: Vec<(Pubkey, AccountSharedData)>, @@ -130,7 +133,6 @@ impl Default for ClusterConfig { ticks_per_slot: DEFAULT_TICKS_PER_SLOT, slots_per_epoch: DEFAULT_DEV_SLOTS_PER_EPOCH, stakers_slot_offset: DEFAULT_DEV_SLOTS_PER_EPOCH, - native_instruction_processors: vec![], cluster_type: ClusterType::Development, poh_config: PohConfig::default(), skip_warmup_slots: false, @@ -190,7 +192,7 @@ impl LocalCluster { snapshot_config.full_snapshot_archives_dir = ledger_path.to_path_buf(); } if snapshot_config.bank_snapshots_dir == dummy { - snapshot_config.bank_snapshots_dir = ledger_path.join("snapshot"); + snapshot_config.bank_snapshots_dir = ledger_path.join(BANK_SNAPSHOTS_DIR); } } @@ -250,6 +252,12 @@ impl LocalCluster { } }; + for core_program_account in &core_bpf_programs(&Rent::default(), |_| true) { + config + .additional_accounts + .push(core_program_account.clone()); + } + // Mint used to fund validator identities for non-genesis accounts. // Verify we have enough lamports in the mint address to do those transfers. let mut required_mint_lamports = 0; @@ -318,9 +326,6 @@ impl LocalCluster { !config.skip_warmup_slots, ); genesis_config.poh_config = config.poh_config.clone(); - genesis_config - .native_instruction_processors - .extend_from_slice(&config.native_instruction_processors); let mut leader_config = safe_clone_config(&config.validator_configs[0]); let (leader_ledger_path, _blockhash) = create_new_tmp_ledger_with_size!( @@ -815,7 +820,7 @@ impl LocalCluster { }, amount, vote_instruction::CreateVoteAccountConfig { - space: vote_state::VoteStateVersions::vote_state_size_of(true) as u64, + space: vote_state::VoteStateV3::size_of() as u64, ..vote_instruction::CreateVoteAccountConfig::default() }, ); @@ -973,7 +978,7 @@ fn create_connection_cache( Arc::new(ConnectionCache::new_with_client_options( "connection_cache_local_cluster_quic_staked", tpu_connection_pool_size, - None, + Some(solana_net_utils::sockets::bind_to_localhost_unique().unwrap()), Some(( &config.client_keypair, IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), diff --git a/local-cluster/src/validator_configs.rs b/local-cluster/src/validator_configs.rs index e94361664dca9c..c57c2017792441 100644 --- a/local-cluster/src/validator_configs.rs +++ b/local-cluster/src/validator_configs.rs @@ -64,6 +64,7 @@ pub fn safe_clone_config(config: &ValidatorConfig) -> ValidatorConfig { banking_trace_dir_byte_limit: config.banking_trace_dir_byte_limit, block_verification_method: config.block_verification_method.clone(), block_production_method: config.block_production_method.clone(), + block_production_num_workers: config.block_production_num_workers, transaction_struct: config.transaction_struct.clone(), enable_block_production_forwarding: config.enable_block_production_forwarding, generator_config: config.generator_config.clone(), diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index ae8482d4274ff2..b94d1a1ebc1cd3 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -15,6 +15,7 @@ use { solana_clock::{ self as clock, Slot, DEFAULT_SLOTS_PER_EPOCH, DEFAULT_TICKS_PER_SLOT, MAX_PROCESSING_AGE, }, + solana_cluster_type::ClusterType, solana_commitment_config::CommitmentConfig, solana_core::{ consensus::{ @@ -27,7 +28,6 @@ use { solana_download_utils::download_snapshot_archive, solana_entry::entry::create_ticks, solana_epoch_schedule::{MAX_LEADER_SCHEDULE_EPOCH_OFFSET, MINIMUM_SLOTS_PER_EPOCH}, - solana_genesis_config::ClusterType, solana_gossip::{crds_data::MAX_VOTES, gossip_service::discover_validators}, solana_hard_forks::HardForks, solana_hash::Hash, @@ -74,7 +74,7 @@ use { snapshot_bank_utils, snapshot_config::SnapshotConfig, snapshot_package::SnapshotKind, - snapshot_utils::{self, SnapshotInterval}, + snapshot_utils::{self, SnapshotInterval, BANK_SNAPSHOTS_DIR}, }, solana_signer::Signer, solana_stake_interface::{self as stake, state::NEW_WARMUP_COOLDOWN_RATE}, @@ -295,7 +295,7 @@ fn test_two_unbalanced_stakes() { error!("test_two_unbalanced_stakes"); let validator_config = ValidatorConfig::default_for_test(); let num_ticks_per_second = 100; - let num_ticks_per_slot = 10; + let num_ticks_per_slot = 16; let num_slots_per_epoch = MINIMUM_SLOTS_PER_EPOCH; let mut cluster = LocalCluster::new( @@ -2321,7 +2321,7 @@ fn test_run_test_load_program_accounts_root() { fn create_simple_snapshot_config(ledger_path: &Path) -> SnapshotConfig { SnapshotConfig { full_snapshot_archives_dir: ledger_path.to_path_buf(), - bank_snapshots_dir: ledger_path.join("snapshot"), + bank_snapshots_dir: ledger_path.join(BANK_SNAPSHOTS_DIR), ..SnapshotConfig::default() } } @@ -5040,7 +5040,7 @@ fn test_boot_from_local_state() { let timer = Instant::now(); let bank_snapshot = loop { if let Some(bank_snapshot) = - snapshot_utils::get_highest_bank_snapshot_post(&validator2_config.bank_snapshots_dir) + snapshot_utils::get_highest_bank_snapshot(&validator2_config.bank_snapshots_dir) { if bank_snapshot.slot > incremental_snapshot_archive.slot() { break bank_snapshot; @@ -5890,10 +5890,10 @@ fn test_invalid_forks_persisted_on_restart() { .entries_to_merkle_shreds_for_tests( &majority_keypair, &entries, - true, // is_full_slot - None, // chained_merkle_root - 0, // next_shred_index, - 0, // next_code_index + true, // is_full_slot + Hash::default(), // chained_merkle_root + 0, // next_shred_index, + 0, // next_code_index &ReedSolomonCache::default(), &mut ProcessShredsStats::default(), ) diff --git a/log-analyzer/Cargo.toml b/log-analyzer/Cargo.toml deleted file mode 100644 index 27c95c9b7ed5ba..00000000000000 --- a/log-analyzer/Cargo.toml +++ /dev/null @@ -1,26 +0,0 @@ -[package] -name = "solana-log-analyzer" -description = "The solana cluster network analysis tool" -publish = false -version = { workspace = true } -authors = { workspace = true } -repository = { workspace = true } -homepage = { workspace = true } -license = { workspace = true } -edition = { workspace = true } - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[[bin]] -name = "solana-log-analyzer" -path = "src/main.rs" - -[dependencies] -byte-unit = { workspace = true } -clap = { version = "3.1.5", features = ["cargo"] } -serde = { workspace = true } -serde_derive = { workspace = true } -serde_json = { workspace = true } -solana-logger = "=2.3.1" -solana-version = { workspace = true } diff --git a/log-analyzer/src/main.rs b/log-analyzer/src/main.rs deleted file mode 100644 index 32ce3fbb5a27fc..00000000000000 --- a/log-analyzer/src/main.rs +++ /dev/null @@ -1,253 +0,0 @@ -#![allow(clippy::arithmetic_side_effects)] -extern crate byte_unit; - -use { - byte_unit::Byte, - clap::{crate_description, crate_name, Arg, ArgMatches, Command}, - serde_derive::{Deserialize, Serialize}, - std::{collections::HashMap, fs, ops::Sub, path::PathBuf}, -}; - -#[derive(Deserialize, Serialize, Debug)] -struct IpAddrMapping { - private: String, - public: String, -} - -#[derive(Deserialize, Serialize, Debug)] -struct LogLine { - a: String, - b: String, - a_to_b: String, - b_to_a: String, -} - -impl Default for LogLine { - fn default() -> Self { - Self { - a: String::default(), - b: String::default(), - a_to_b: "0B".to_string(), - b_to_a: "0B".to_string(), - } - } -} - -impl LogLine { - fn output(a: &str, b: &str, v1: u128, v2: u128) -> String { - format!( - "Lost {}%, {}, ({} - {}), sender {}, receiver {}", - ((v1 - v2) * 100 / v1), - Byte::from_bytes(v1 - v2).get_appropriate_unit(true), - Byte::from_bytes(v1).get_appropriate_unit(true), - Byte::from_bytes(v2).get_appropriate_unit(true), - a, - b - ) - } -} - -impl Sub for &LogLine { - type Output = String; - - #[allow(clippy::comparison_chain)] - fn sub(self, rhs: Self) -> Self::Output { - let a_to_b = Byte::from_str(&self.a_to_b) - .expect("Failed to read a_to_b bytes") - .get_bytes(); - let b_to_a = Byte::from_str(&self.b_to_a) - .expect("Failed to read b_to_a bytes") - .get_bytes(); - let rhs_a_to_b = Byte::from_str(&rhs.a_to_b) - .expect("Failed to read a_to_b bytes") - .get_bytes(); - let rhs_b_to_a = Byte::from_str(&rhs.b_to_a) - .expect("Failed to read b_to_a bytes") - .get_bytes(); - let mut out1 = if a_to_b > rhs_b_to_a { - LogLine::output(&self.a, &self.b, a_to_b, rhs_b_to_a) - } else if a_to_b < rhs_b_to_a { - LogLine::output(&self.b, &self.a, rhs_b_to_a, a_to_b) - } else { - String::default() - }; - let out2 = if rhs_a_to_b > b_to_a { - LogLine::output(&self.a, &self.b, rhs_a_to_b, b_to_a) - } else if rhs_a_to_b < b_to_a { - LogLine::output(&self.b, &self.a, b_to_a, rhs_a_to_b) - } else { - String::default() - }; - if !out1.is_empty() && !out2.is_empty() { - out1.push('\n'); - } - out1.push_str(&out2); - out1 - } -} - -fn map_ip_address(mappings: &[IpAddrMapping], target: String) -> String { - for mapping in mappings { - if target.contains(&mapping.private) { - return target.replace(&mapping.private, mapping.public.as_str()); - } - } - target -} - -fn process_iftop_logs(matches: &ArgMatches) { - let mut map_list: Vec = vec![]; - if let Some(("map-IP", args_matches)) = matches.subcommand() { - let mut list = args_matches - .value_of("list") - .expect("Missing list of IP address mappings") - .to_string(); - list.insert(0, '['); - let terminate_at = list - .rfind('}') - .expect("Didn't find a terminating '}' in IP list") - + 1; - let _ = list.split_off(terminate_at); - list.push(']'); - map_list = serde_json::from_str(&list).expect("Failed to parse IP address mapping list"); - }; - - let log_path = PathBuf::from(matches.value_of_t_or_exit::("file")); - let mut log = fs::read_to_string(log_path).expect("Unable to read log file"); - log.insert(0, '['); - let terminate_at = log.rfind('}').expect("Didn't find a terminating '}'") + 1; - let _ = log.split_off(terminate_at); - log.push(']'); - let json_log: Vec = serde_json::from_str(&log).expect("Failed to parse log as JSON"); - - let mut unique_latest_logs = HashMap::new(); - - json_log.into_iter().rev().for_each(|l| { - if !l.a.is_empty() && !l.b.is_empty() && !l.a_to_b.is_empty() && !l.b_to_a.is_empty() { - let key = (l.a.clone(), l.b.clone()); - unique_latest_logs.entry(key).or_insert(l); - } - }); - let output: Vec = unique_latest_logs - .into_values() - .map(|l| { - if map_list.is_empty() { - l - } else { - LogLine { - a: map_ip_address(&map_list, l.a), - b: map_ip_address(&map_list, l.b), - a_to_b: l.a_to_b, - b_to_a: l.b_to_a, - } - } - }) - .collect(); - - println!("{}", serde_json::to_string(&output).unwrap()); -} - -fn analyze_logs(matches: &ArgMatches) { - let dir_path = PathBuf::from(matches.value_of_t_or_exit::("folder")); - assert!( - dir_path.is_dir(), - "Need a folder that contains all log files" - ); - let list_all_diffs = matches.is_present("all"); - let files = fs::read_dir(dir_path).expect("Failed to read log folder"); - let logs: Vec<_> = files - .flat_map(|f| { - if let Ok(f) = f { - let log_str = fs::read_to_string(f.path()).expect("Unable to read log file"); - let log: Vec = - serde_json::from_str(log_str.as_str()).expect("Failed to deserialize log"); - log - } else { - vec![] - } - }) - .collect(); - let mut logs_hash = HashMap::new(); - logs.iter().for_each(|l| { - let key = (l.a.clone(), l.b.clone()); - logs_hash.entry(key).or_insert(l); - }); - - logs.iter().for_each(|l| { - let diff = logs_hash - .remove(&(l.a.clone(), l.b.clone())) - .map(|v1| { - logs_hash.remove(&(l.b.clone(), l.a.clone())).map_or( - if list_all_diffs { - v1 - &LogLine::default() - } else { - String::default() - }, - |v2| v1 - v2, - ) - }) - .unwrap_or_default(); - if !diff.is_empty() { - println!("{diff}"); - } - }); -} - -fn main() { - solana_logger::setup(); - - let matches = Command::new(crate_name!()) - .about(crate_description!()) - .version(solana_version::version!()) - .subcommand( - Command::new("iftop") - .about("Process iftop log file") - .arg( - Arg::new("file") - .short('f') - .long("file") - .value_name("iftop log file") - .takes_value(true) - .help("Location of the log file generated by iftop"), - ) - .subcommand( - Command::new("map-IP") - .about("Map private IP to public IP Address") - .arg( - Arg::new("list") - .short('l') - .long("list") - .value_name("JSON string") - .takes_value(true) - .required(true) - .help("JSON string with a list of mapping"), - ), - ), - ) - .subcommand( - Command::new("analyze") - .about("Compare processed network log files") - .arg( - Arg::new("folder") - .short('f') - .long("folder") - .value_name("DIR") - .takes_value(true) - .help("Location of processed log files"), - ) - .arg( - Arg::new("all") - .short('a') - .long("all") - .takes_value(false) - .help("List all differences"), - ), - ) - .get_matches(); - - match matches.subcommand() { - Some(("iftop", args_matches)) => process_iftop_logs(args_matches), - Some(("analyze", args_matches)) => analyze_logs(args_matches), - _ => {} - }; -} diff --git a/low-pass-filter/Cargo.toml b/low-pass-filter/Cargo.toml new file mode 100644 index 00000000000000..a7f6cf79a76120 --- /dev/null +++ b/low-pass-filter/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "agave-low-pass-filter" +description = "Low Pass Filter" +version = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[lib] +path = "src/lib.rs" + +[features] +agave-unstable-api = [] + +[dependencies] diff --git a/low-pass-filter/src/lib.rs b/low-pass-filter/src/lib.rs new file mode 100644 index 00000000000000..db9a6089dd548c --- /dev/null +++ b/low-pass-filter/src/lib.rs @@ -0,0 +1,217 @@ +#![cfg(feature = "agave-unstable-api")] +//! Fixed-point IIR filter for smoothing `alpha` updates. +//! +//! This is equivalent to a discrete-time Butterworth filter of order 1 +//! Implements: +//! alpha_new = K * target + (1 - K) * previous +//! +//! All math is unsigned integer fixed-point with `SCALE = 1,000,000` +//! +//! The filter constant K is derived from: +//! K = W_C / (1 + W_C), where Wc = 2π * Fs / Tc +//! Fc = 1 / TC (cutoff frequency) +//! Fs = 1 / refresh interval +pub mod api { + use std::num::NonZeroU64; + + // Fixed point scale for K and `alpha` calculation + pub const SCALE: NonZeroU64 = NonZeroU64::new(1_000_000).unwrap(); + // 2 * pi * SCALE + const TWO_PI_SCALED: u64 = (2.0 * std::f64::consts::PI * SCALE.get() as f64) as u64; + + #[derive(Clone)] + pub struct FilterConfig { + pub output_range: std::ops::Range, + pub k: u64, + } + + /// Computes the filter constant `K` for a given sample period and + /// time‑constant, both in **milliseconds**. + /// + /// Returns `K` scaled by `SCALE` (0–1,000,000). + #[allow(clippy::arithmetic_side_effects)] + pub fn compute_k(fs_ms: u64, tc_ms: u64) -> u64 { + if tc_ms == 0 { + return 0; + } + let scale = SCALE.get(); + let wc_scaled = (TWO_PI_SCALED.saturating_mul(fs_ms)).saturating_div(tc_ms); + // ((wc_scaled * scale + scale / 2) / (scale + wc_scaled)).min(scale) rounded to nearest integer + ((wc_scaled + .saturating_mul(scale) + .saturating_add(scale.saturating_div(2))) + .saturating_div(scale.saturating_add(wc_scaled))) + .min(scale) + } + + /// Updates alpha with a first-order low-pass filter. + /// ### Convergence Characteristics (w/ K = 0.611): + /// + /// - From a step change in target, `alpha` reaches: + /// - ~61% of the way to target after 1 update + /// - ~85% after 2 + /// - ~94% after 3 + /// - ~98% after 4 + /// - ~99% after 5 + /// + /// Note: Each update is `fs_ms` apart. `fs_ms` is 7500ms for push_active_set. + /// + /// If future code changes make `alpha_target` jump larger, we must retune + /// `TC`/`K` or use a higher‑order filter to avoid lag/overshoot. + /// Returns `alpha_new = K * target + (1 - K) * prev`, rounded and clamped. + #[allow(clippy::arithmetic_side_effects)] + pub fn filter_alpha(prev: u64, target: u64, filter_config: FilterConfig) -> u64 { + let scale = SCALE.get(); + // (k * target + (scale - k) * prev) / scale + let next = (filter_config.k.saturating_mul(target)) + .saturating_add((scale.saturating_sub(filter_config.k)).saturating_mul(prev)) + .saturating_div(scale); + next.clamp( + filter_config.output_range.start, + filter_config.output_range.end, + ) + } +} + +#[cfg(test)] +mod tests { + use super::api::*; + + #[test] + fn test_compute_k_zero_tc() { + // When time constant is 0, K should be 0 + assert_eq!(compute_k(100, 0), 0); + assert_eq!(compute_k(1000, 0), 0); + assert_eq!(compute_k(u64::MAX, 0), 0); + } + + #[test] + fn test_compute_k_zero_fs() { + // When sample frequency is 0, K should be 0 + assert_eq!(compute_k(0, 100), 0); + assert_eq!(compute_k(0, 1000), 0); + assert_eq!(compute_k(0, u64::MAX), 0); + } + + #[test] + fn test_compute_k_large_values() { + // K should never exceed SCALE + let k = compute_k(u64::MAX, 1); + assert!(k <= SCALE.get()); + + let k = compute_k(1000000, 1); + assert!(k <= SCALE.get()); + + let k = compute_k(u64::MAX / 2, u64::MAX / 4); + assert!(k <= SCALE.get()); + + let k = compute_k(500000000, 1000000000); + assert!(k <= SCALE.get()); + } + + #[test] + fn test_compute_k_normal_cases() { + // Test some normal cases + let k1 = compute_k(100, 1000); + assert_eq!(k1, 385869); + + let k2 = compute_k(1000, 100); + assert_eq!(k2, 984333); + assert!(k2 > k1); + + let k3 = compute_k(1000, 1000); + assert_eq!(k3, 862697); + } + + #[test] + fn test_filter_alpha_k_zero() { + // When K=0, alpha should not change + let config = FilterConfig { + output_range: 0..1000000, + k: 0, + }; + + assert_eq!(filter_alpha(100, 500, config.clone()), 100); + assert_eq!(filter_alpha(0, 1000000, config.clone()), 0); + assert_eq!(filter_alpha(999999, 0, config), 999999); + } + + #[test] + fn test_filter_alpha_k_max() { + // When K=SCALE, alpha should equal target value (clamped to range) + let config = FilterConfig { + output_range: 0..1000000, + k: SCALE.get(), + }; + + assert_eq!(filter_alpha(100, 500, config.clone()), 500); + assert_eq!(filter_alpha(0, 1000000, config), 1000000); + + // Test clamping - target outside range + let config = FilterConfig { + output_range: 100..900, + k: SCALE.get(), + }; + assert_eq!(filter_alpha(200, 50, config.clone()), 100); + assert_eq!(filter_alpha(200, 1000, config), 900); + } + + #[test] + fn test_filter_alpha_clamping() { + // Test output range clamping + let config = FilterConfig { + output_range: 100..900, + k: SCALE.get() / 2, + }; + + // This should be within range + let result = filter_alpha(950, 50, config); + assert_eq!(result, 500); + + // Test extreme clamping + let config_narrow = FilterConfig { + output_range: 500..501, + k: SCALE.get() / 4, + }; + let result = filter_alpha(0, 1000000, config_narrow); + assert_eq!(result, 501); + } + + #[test] + fn test_filter_alpha_overflow_protection() { + // Test with large values that might cause overflow + let config = FilterConfig { + output_range: 0..u64::MAX, + k: SCALE.get() / 2, + }; + + let result = filter_alpha(u64::MAX / 2, u64::MAX / 2, config.clone()); + assert_eq!(result, 18446744073709); + + let result2 = filter_alpha(u64::MAX - 1000, u64::MAX - 2000, config); + assert_eq!(result2, 18446744073709); + } + + #[test] + fn test_filter_alpha_mathematical_correctness() { + let config = FilterConfig { + output_range: 0..u64::MAX, + k: SCALE.get() / 4, // 25% + }; + + let prev = 800; + let target = 400; + let result = filter_alpha(prev, target, config); + assert_eq!(result, 700); + + let config = FilterConfig { + output_range: 0..u64::MAX, + k: SCALE.get() * 60 / 100, // 60% + }; + + let prev = 111111; + let target = 222222; + let result = filter_alpha(prev, target, config); + assert_eq!(result, 177777); + } +} diff --git a/measure/Cargo.toml b/measure/Cargo.toml index 2cad7a2270278e..019b48d6a2be70 100644 --- a/measure/Cargo.toml +++ b/measure/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "solana-measure" -description = "Blockchain, Rebuilt for Scale" documentation = "https://docs.rs/solana-measure" readme = "../README.md" version = { workspace = true } authors = { workspace = true } +description = { workspace = true } repository = { workspace = true } homepage = { workspace = true } license = { workspace = true } diff --git a/metrics/src/counter.rs b/metrics/src/counter.rs index 21ba51ee46b0a7..dc3584ccc18aa1 100644 --- a/metrics/src/counter.rs +++ b/metrics/src/counter.rs @@ -174,13 +174,12 @@ impl Counter { let metricsrate = self.metricsrate.load(Ordering::Relaxed); if times % lograte == 0 && times > 0 && log_enabled!(level) { - log!(level, - "COUNTER:{{\"name\": \"{}\", \"counts\": {}, \"samples\": {}, \"now\": {}, \"events\": {}}}", + log!( + level, + "COUNTER:{{\"name\": \"{}\", \"counts\": {}, \"samples\": {times}, \"now\": \ + {now}, \"events\": {events}}}", self.name, counts + events, - times, - now, - events, ); } @@ -298,7 +297,8 @@ mod tests { assert_eq!( Counter::default_log_rate(), DEFAULT_LOG_RATE, - "default_log_rate() is {}, expected {}, SOLANA_DEFAULT_LOG_RATE environment variable set?", + "default_log_rate() is {}, expected {}, SOLANA_DEFAULT_LOG_RATE environment variable \ + set?", Counter::default_log_rate(), DEFAULT_LOG_RATE, ); diff --git a/metrics/src/metrics.rs b/metrics/src/metrics.rs index 313fb053a064bd..59f9f11fa8356c 100644 --- a/metrics/src/metrics.rs +++ b/metrics/src/metrics.rs @@ -81,7 +81,7 @@ impl InfluxDbMetricsWriter { fn build_write_url() -> Result { let config = get_metrics_config().map_err(|err| { - info!("metrics disabled: {}", err); + info!("metrics disabled: {err}"); err })?; @@ -149,7 +149,7 @@ impl MetricsWriter for InfluxDbMetricsWriter { let client = match client { Ok(client) => client, Err(err) => { - warn!("client instantiation failed: {}", err); + warn!("client instantiation failed: {err}"); return; } }; @@ -161,7 +161,7 @@ impl MetricsWriter for InfluxDbMetricsWriter { let text = resp .text() .unwrap_or_else(|_| "[text body empty]".to_string()); - warn!("submit response unsuccessful: {} {}", status, text,); + warn!("submit response unsuccessful: {status} {text}",); } } else { warn!("submit error: {}", response.unwrap_err()); @@ -226,13 +226,12 @@ impl MetricsAgent { let fit_counters = max_points.saturating_sub(points.len()); let points_written = cmp::min(num_points, max_points); - debug!("run: attempting to write {} points", num_points); + debug!("run: attempting to write {num_points} points"); if num_points > max_points { warn!( - "Max submission rate of {} datapoints per second exceeded. Only the \ - first {} of {} points will be submitted.", - max_points_per_sec, max_points, num_points + "Max submission rate of {max_points_per_sec} datapoints per second exceeded. Only \ + the first {max_points} of {num_points} points will be submitted." ); } @@ -321,11 +320,11 @@ impl MetricsAgent { barrier.wait(); } MetricsCommand::Submit(point, level) => { - log!(level, "{}", point); + log!(level, "{point}"); points.push(point); } MetricsCommand::SubmitCounter(counter, _level, bucket) => { - debug!("{:?}", counter); + debug!("{counter:?}"); let key = (counter.name, bucket); if let Some(value) = counters.get_mut(&key) { value.count += counter.count; @@ -351,11 +350,9 @@ impl MetricsAgent { debug_assert!( points.is_empty() && counters.is_empty(), - "Controlling `MetricsAgent` is expected to call `flush()` from the `Drop` \n\ - implementation, before exiting. So both `points` and `counters` must be empty at \n\ - this point.\n\ - `points`: {points:?}\n\ - `counters`: {counters:?}", + "Controlling `MetricsAgent` is expected to call `flush()` from the `Drop` \ + implementation, before exiting. So both `points` and `counters` must be empty at \ + this point. `points`: {points:?}, `counters`: {counters:?}", ); trace!("run: exit"); @@ -406,7 +403,7 @@ static HOST_ID: std::sync::LazyLock> = std::sync::LazyLock::new(| }); pub fn set_host_id(host_id: String) { - info!("host id: {}", host_id); + info!("host id: {host_id}"); *HOST_ID.write().unwrap() = host_id; } diff --git a/multinode-demo/bench-tps.sh b/multinode-demo/bench-tps.sh index 6350a2ea26d212..32f15810931fa0 100755 --- a/multinode-demo/bench-tps.sh +++ b/multinode-demo/bench-tps.sh @@ -21,7 +21,6 @@ usage() { args=("$@") default_arg --url "http://127.0.0.1:8899" -default_arg --entrypoint "127.0.0.1:8001" default_arg --faucet "127.0.0.1:9900" default_arg --duration 90 default_arg --tx-count 50000 diff --git a/net-shaper/Cargo.toml b/net-shaper/Cargo.toml deleted file mode 100644 index ff047038c29beb..00000000000000 --- a/net-shaper/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "solana-net-shaper" -description = "The solana cluster network shaping tool" -publish = false -version = { workspace = true } -authors = { workspace = true } -repository = { workspace = true } -homepage = { workspace = true } -license = { workspace = true } -edition = { workspace = true } - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[[bin]] -name = "solana-net-shaper" -path = "src/main.rs" - -[dependencies] -clap = { version = "3.1.5", features = ["cargo"] } -rand = { workspace = true } -serde = { workspace = true } -serde_derive = { workspace = true } -serde_json = { workspace = true } -solana-logger = "=2.3.1" diff --git a/net-shaper/src/main.rs b/net-shaper/src/main.rs deleted file mode 100644 index 6443c777fdb367..00000000000000 --- a/net-shaper/src/main.rs +++ /dev/null @@ -1,620 +0,0 @@ -#![allow(clippy::arithmetic_side_effects)] -use { - clap::{crate_description, crate_name, crate_version, Arg, ArgMatches, Command}, - rand::{thread_rng, Rng}, - serde_derive::{Deserialize, Serialize}, - std::{fs, io, path::PathBuf}, -}; - -#[derive(Deserialize, Serialize, Debug)] -struct NetworkInterconnect { - pub a: u8, - pub b: u8, - pub config: String, -} - -#[derive(Deserialize, Serialize, Debug)] -struct NetworkTopology { - pub partitions: Vec, - pub interconnects: Vec, -} - -impl Default for NetworkTopology { - fn default() -> Self { - Self { - partitions: vec![100], - interconnects: vec![], - } - } -} - -impl NetworkTopology { - pub fn verify(&self) -> bool { - let sum: u8 = self.partitions.iter().sum(); - if sum != 100 { - return false; - } - - for x in self.interconnects.iter() { - if x.a as usize > self.partitions.len() || x.b as usize > self.partitions.len() { - return false; - } - } - - true - } - - pub fn new_from_stdin() -> Self { - let mut input = String::new(); - println!("Configure partition map (must add up to 100, e.g. [70, 20, 10]):"); - let partitions_str = match io::stdin().read_line(&mut input) { - Ok(_) => input, - Err(error) => panic!("error: {error}"), - }; - - let partitions: Vec = serde_json::from_str(&partitions_str) - .expect("Failed to parse input. It must be a JSON string"); - - let mut interconnects: Vec = vec![]; - - for i in 0..partitions.len() - 1 { - for j in i + 1..partitions.len() { - println!("Configure interconnect ({i} <-> {j}):"); - let mut input = String::new(); - let mut interconnect_config = match io::stdin().read_line(&mut input) { - Ok(_) => input, - Err(error) => panic!("error: {error}"), - }; - - if interconnect_config.ends_with('\n') { - interconnect_config.pop(); - if interconnect_config.ends_with('\r') { - interconnect_config.pop(); - } - } - - if !interconnect_config.is_empty() { - let interconnect = NetworkInterconnect { - a: i as u8, - b: j as u8, - config: interconnect_config.clone(), - }; - interconnects.push(interconnect); - let interconnect = NetworkInterconnect { - a: j as u8, - b: i as u8, - config: interconnect_config, - }; - interconnects.push(interconnect); - } - } - } - - Self { - partitions, - interconnects, - } - } - - fn new_random(max_partitions: usize, max_packet_drop: u8, max_packet_delay: u32) -> Self { - let mut rng = thread_rng(); - let num_partitions = rng.gen_range(0..max_partitions + 1); - - if num_partitions == 0 { - return NetworkTopology::default(); - } - - let mut partitions = vec![]; - let mut used_partition = 0; - for i in 0..num_partitions { - let partition = if i == num_partitions - 1 { - 100 - used_partition - } else { - rng.gen_range(0..100 - used_partition - num_partitions + i) - }; - used_partition += partition; - partitions.push(partition as u8); - } - - let mut interconnects: Vec = vec![]; - for i in 0..partitions.len() - 1 { - for j in i + 1..partitions.len() { - let drop_config = if max_packet_drop > 0 { - let packet_drop = rng.gen_range(0..max_packet_drop + 1); - format!("loss {packet_drop}% 25% ") - } else { - String::default() - }; - - let config = if max_packet_delay > 0 { - let packet_delay = rng.gen_range(0..max_packet_delay + 1); - format!("{drop_config}delay {packet_delay}ms 10ms") - } else { - drop_config - }; - - let interconnect = NetworkInterconnect { - a: i as u8, - b: j as u8, - config: config.clone(), - }; - interconnects.push(interconnect); - let interconnect = NetworkInterconnect { - a: j as u8, - b: i as u8, - config, - }; - interconnects.push(interconnect); - } - } - Self { - partitions, - interconnects, - } - } -} - -fn run( - cmd: &str, - args: &[&str], - launch_err_msg: &str, - status_err_msg: &str, - ignore_err: bool, -) -> bool { - println!("Running {:?}", std::process::Command::new(cmd).args(args)); - let output = std::process::Command::new(cmd) - .args(args) - .output() - .expect(launch_err_msg); - - if ignore_err { - return true; - } - - if !output.status.success() { - eprintln!( - "{} command failed with exit code: {}", - status_err_msg, output.status - ); - use std::str::from_utf8; - println!("stdout: {}", from_utf8(&output.stdout).unwrap_or("?")); - println!("stderr: {}", from_utf8(&output.stderr).unwrap_or("?")); - false - } else { - true - } -} - -fn insert_iptables_rule(tos: u8) -> bool { - let my_tos = tos.to_string(); - - // iptables -t mangle -A PREROUTING -p udp -j TOS --set-tos - run( - "iptables", - &[ - "-t", - "mangle", - "-A", - "OUTPUT", - "-p", - "udp", - "-j", - "TOS", - "--set-tos", - my_tos.as_str(), - ], - "Failed to add iptables rule", - "iptables", - false, - ) -} - -fn flush_iptables_rule() { - run( - "iptables", - &["-F", "-t", "mangle"], - "Failed to flush iptables", - "iptables flush", - true, - ); -} - -fn setup_ifb(interface: &str) -> bool { - // modprobe ifb numifbs=1 - run( - "modprobe", - &[ - "ifb", "numifbs=1", - ], - "Failed to load ifb module", - "modprobe ifb numifbs=1", - false - ) && - // ip link set dev ifb0 up - run( - "ip", - &[ - "link", "set", "dev", "ifb0", "up" - ], - "Failed to bring ifb0 online", - "ip link set dev ifb0 up", - false - ) && - // tc qdisc add dev handle ffff: ingress - run( - "tc", - &[ - "qdisc", "add", "dev", interface, "handle", "ffff:", "ingress" - ], - "Failed to setup ingress qdisc", - "tc qdisc add dev handle ffff: ingress", - false - ) - && - // tc filter add dev parent ffff: protocol ip u32 match u32 0 0 action mirred egress redirect dev ifb0 - run( - "tc", - &[ - "filter", "add", "dev", interface, "parent", "ffff:", "protocol", "ip", "u32", "match", "u32", "0", "0", "action", "mirred", "egress", "redirect", "dev", "ifb0" - ], - "Failed to redirect ingress traffc", - "tc filter add dev parent ffff: protocol ip u32 match u32 0 0 action mirred egress redirect dev ifb0", - false - ) -} - -fn delete_ifb(interface: &str) -> bool { - run( - "tc", - &[ - "qdisc", "delete", "dev", interface, "handle", "ffff:", "ingress", - ], - "Failed to setup ingress qdisc", - "tc qdisc delete dev handle ffff: ingress", - true, - ) && run( - "modprobe", - &["ifb", "--remove"], - "Failed to delete ifb module", - "modprobe ifb --remove", - true, - ) -} - -fn insert_tc_ifb_root(num_bands: &str) -> bool { - // tc qdisc add dev ifb0 root handle 1: prio bands - run( - "tc", - &[ - "qdisc", "add", "dev", "ifb0", "root", "handle", "1:", "prio", "bands", num_bands, - ], - "Failed to add root ifb qdisc", - "tc qdisc add dev ifb0 root handle 1: prio bands ", - false, - ) -} - -fn insert_tc_ifb_netem(class: &str, handle: &str, filter: &str) -> bool { - let mut filters: Vec<&str> = filter.split(' ').collect(); - let mut args = vec![ - "qdisc", "add", "dev", "ifb0", "parent", class, "handle", handle, "netem", - ]; - args.append(&mut filters); - // tc qdisc add dev ifb0 parent handle netem - run("tc", &args, "Failed to add tc child", "tc add child", false) -} - -fn insert_tos_ifb_filter(class: &str, tos: &str) -> bool { - // tc filter add dev ifb0 protocol ip parent 1: prio 1 u32 match ip tos 0xff flowid - run( - "tc", - &[ - "filter", "add", "dev", "ifb0", "protocol", "ip", "parent", "1:", "prio", "1", - "u32", "match", "ip", "tos", tos, "0xff", "flowid", class, - ], - "Failed to add tos filter", - "tc filter add dev ifb0 protocol ip parent 1: prio 1 u32 match ip tos 0xff flowid ", - false, - ) -} - -fn insert_default_ifb_filter(class: &str) -> bool { - // tc filter add dev ifb0 parent 1: protocol all prio 2 u32 match u32 0 0 flowid 1: - run( - "tc", - &[ - "filter", "add", "dev", "ifb0", "parent", "1:", "protocol", "all", "prio", "2", "u32", - "match", "u32", "0", "0", "flowid", class, - ], - "Failed to add catch-all filter", - "tc filter add dev ifb0 parent 1: protocol all prio 2 u32 match u32 0 0 flowid 1:", - false, - ) -} - -fn delete_all_filters(interface: &str) { - // tc filter delete dev - run( - "tc", - &["filter", "delete", "dev", interface], - "Failed to delete all filters", - "tc delete all filters", - true, - ); -} - -fn identify_my_partition(partitions: &[u8], index: u64, size: u64) -> usize { - let mut my_partition = 0; - let mut watermark = 0; - for (i, p) in partitions.iter().enumerate() { - watermark += *p; - if u64::from(watermark) >= index * 100 / size { - my_partition = i; - break; - } - } - - my_partition -} - -fn partition_id_to_tos(partition: usize) -> u8 { - if partition < 4 { - 2u8.pow(partition as u32 + 1) - } else { - 0 - } -} - -fn shape_network(matches: &ArgMatches) { - let config_path = PathBuf::from(matches.value_of_t_or_exit::("file")); - let config = fs::read_to_string(config_path).expect("Unable to read config file"); - let topology: NetworkTopology = - serde_json::from_str(&config).expect("Failed to parse log as JSON"); - let interface: String = matches.value_of_t_or_exit("iface"); - let network_size: u64 = matches.value_of_t_or_exit("size"); - let my_index: u64 = matches.value_of_t_or_exit("position"); - if !shape_network_steps(&topology, &interface, network_size, my_index) { - delete_ifb(interface.as_str()); - flush_iptables_rule(); - } -} - -fn shape_network_steps( - topology: &NetworkTopology, - interface: &str, - network_size: u64, - my_index: u64, -) -> bool { - // Integrity checks - assert!(topology.verify(), "Failed to verify the configuration file"); - assert!(my_index < network_size); - - // Figure out partition we belong in - let my_partition = identify_my_partition(&topology.partitions, my_index + 1, network_size); - - // Clear any lingering state - println!( - "my_index: {}, network_size: {}, partitions: {:?}", - my_index, network_size, topology.partitions - ); - println!("My partition is {my_partition}"); - - cleanup_network(interface); - - // Mark egress packets with our partition id - if !insert_iptables_rule(partition_id_to_tos(my_partition)) { - return false; - } - - let num_bands = topology.partitions.len() + 1; - let default_filter_class = format!("1:{num_bands}"); - if !topology.interconnects.is_empty() { - let num_bands_str = num_bands.to_string(); - // Redirect ingress traffic to the virtual interface ifb0 so we can - // apply egress rules - if !setup_ifb(interface) - // Setup root qdisc on ifb0 - || !insert_tc_ifb_root(num_bands_str.as_str()) - // Catch all so regular traffic/traffic within the same partition - // is not filtered out - || !insert_default_ifb_filter(default_filter_class.as_str()) - { - return false; - } - } - - println!("Setting up interconnects"); - for i in &topology.interconnects { - if i.b as usize == my_partition { - println!("interconnects: {i:#?}"); - let tos = partition_id_to_tos(i.a as usize); - if tos == 0 { - println!("Incorrect value of TOS/Partition in config {}", i.a); - return false; - } - let tos_string = tos.to_string(); - // First valid class is 1:1 - let class = format!("1:{}", i.a + 1); - if !insert_tc_ifb_netem(class.as_str(), tos_string.as_str(), i.config.as_str()) { - return false; - } - - if !insert_tos_ifb_filter(class.as_str(), tos_string.as_str()) { - return false; - } - } - } - - true -} - -fn parse_interface(interfaces: &str) -> &str { - for line in interfaces.lines() { - if line != "ifb0" { - return line; - } - } - - panic!("No valid interfaces"); -} - -fn cleanup_network(interface: &str) { - delete_all_filters("ifb0"); - delete_ifb(interface); - flush_iptables_rule(); -} - -fn configure(matches: &ArgMatches) { - let config = if !matches.is_present("random") { - NetworkTopology::new_from_stdin() - } else { - let max_partitions: usize = matches.value_of_t("max-partitions").unwrap_or(4); - let max_drop: u8 = matches.value_of_t("max-drop").unwrap_or(100); - let max_delay: u32 = matches.value_of_t("max-delay").unwrap_or(50); - NetworkTopology::new_random(max_partitions, max_drop, max_delay) - }; - - assert!(config.verify(), "Failed to verify the configuration"); - - let topology = serde_json::to_string(&config).expect("Failed to write as JSON"); - - println!("{topology}"); -} - -fn main() { - solana_logger::setup(); - - let matches = Command::new(crate_name!()) - .about(crate_description!()) - .version(crate_version!()) - .subcommand( - Command::new("shape") - .about("Shape the network using config file") - .arg( - Arg::new("file") - .short('f') - .long("file") - .value_name("config file") - .takes_value(true) - .required(true) - .help("Location of the network config file"), - ) - .arg( - Arg::new("size") - .short('s') - .long("size") - .value_name("network size") - .takes_value(true) - .required(true) - .help("Number of nodes in the network"), - ) - .arg( - Arg::new("iface") - .short('i') - .long("iface") - .value_name("network interface name") - .takes_value(true) - .required(true) - .help("Name of network interface"), - ) - .arg( - Arg::new("position") - .short('p') - .long("position") - .value_name("position of node") - .takes_value(true) - .required(true) - .help("Position of current node in the network"), - ), - ) - .subcommand( - Command::new("cleanup") - .about("Remove the network filters using config file") - .arg( - Arg::new("file") - .short('f') - .long("file") - .value_name("config file") - .takes_value(true) - .required(true) - .help("Location of the network config file"), - ) - .arg( - Arg::new("size") - .short('s') - .long("size") - .value_name("network size") - .takes_value(true) - .required(true) - .help("Number of nodes in the network"), - ) - .arg( - Arg::new("iface") - .short('i') - .long("iface") - .value_name("network interface name") - .takes_value(true) - .required(true) - .help("Name of network interface"), - ) - .arg( - Arg::new("position") - .short('p') - .long("position") - .value_name("position of node") - .takes_value(true) - .required(true) - .help("Position of current node in the network"), - ), - ) - .subcommand( - Command::new("configure") - .about("Generate a config file") - .arg( - Arg::new("random") - .short('r') - .long("random") - .required(false) - .help("Generate a random config file"), - ) - .arg( - Arg::new("max-partitions") - .short('p') - .long("max-partitions") - .value_name("count") - .takes_value(true) - .required(false) - .help("Maximum number of partitions. Used only with random configuration generation"), - ) - .arg( - Arg::new("max-drop") - .short('d') - .long("max-drop") - .value_name("percentage") - .takes_value(true) - .required(false) - .help("Maximum amount of packet drop. Used only with random configuration generation"), - ) - .arg( - Arg::new("max-delay") - .short('y') - .long("max-delay") - .value_name("ms") - .takes_value(true) - .required(false) - .help("Maximum amount of packet delay. Used only with random configuration generation"), - ), - ) - .get_matches(); - - match matches.subcommand() { - Some(("shape", args_matches)) => shape_network(args_matches), - Some(("cleanup", args_matches)) => { - let interfaces: String = args_matches.value_of_t_or_exit("iface"); - let iface = parse_interface(&interfaces); - cleanup_network(iface) - } - Some(("configure", args_matches)) => configure(args_matches), - _ => {} - }; -} diff --git a/net-utils/Cargo.toml b/net-utils/Cargo.toml index bf2fd6e47262a0..52ae14e973d7fd 100644 --- a/net-utils/Cargo.toml +++ b/net-utils/Cargo.toml @@ -16,13 +16,17 @@ targets = ["x86_64-unknown-linux-gnu"] name = "solana_net_utils" [features] +agave-unstable-api = [] default = [] dev-context-only-utils = ["dep:pcap-file", "dep:hxdmp"] +shuttle-test = ["dep:shuttle", "solana-svm-type-overrides/shuttle-test"] [dependencies] anyhow = { workspace = true } bincode = { workspace = true } bytes = { workspace = true } +cfg-if = { workspace = true } +dashmap = { workspace = true, features = ["raw-api"] } hxdmp = { version = "0.2.1", optional = true } itertools = { workspace = true } log = { workspace = true } @@ -31,8 +35,10 @@ pcap-file = { version = "2.0.0", optional = true } rand = { workspace = true } serde = { workspace = true } serde_derive = { workspace = true } +shuttle = { workspace = true, optional = true } socket2 = { workspace = true } solana-serde = { workspace = true } +solana-svm-type-overrides = { workspace = true } tokio = { workspace = true, features = ["full"] } url = { workspace = true } @@ -41,3 +47,7 @@ solana-logger = { workspace = true } [lints] workspace = true + +[[bench]] +name = "token_bucket" +harness = false diff --git a/net-utils/benches/token_bucket.rs b/net-utils/benches/token_bucket.rs new file mode 100644 index 00000000000000..803a373e6eb15f --- /dev/null +++ b/net-utils/benches/token_bucket.rs @@ -0,0 +1,177 @@ +#![allow(clippy::arithmetic_side_effects)] +use { + solana_net_utils::token_bucket::*, + std::{ + net::{IpAddr, Ipv4Addr}, + sync::atomic::{AtomicUsize, Ordering}, + time::{Duration, Instant}, + }, +}; + +fn bench_token_bucket() { + println!("Running bench_token_bucket..."); + let run_duration = Duration::from_secs(5); + let fill_rate = 10000.0; + let request_size = 3; + let target_rate = fill_rate / request_size as f64; + let tb = TokenBucket::new(1, 600, fill_rate); + + let accepted = AtomicUsize::new(0); + let rejected = AtomicUsize::new(0); + + let start = Instant::now(); + let workers = 8; + + std::thread::scope(|scope| { + for _ in 0..workers { + scope.spawn(|| loop { + if start.elapsed() > run_duration { + break; + } + match tb.consume_tokens(request_size) { + Ok(_) => accepted.fetch_add(1, Ordering::Relaxed), + Err(_) => rejected.fetch_add(1, Ordering::Relaxed), + }; + }); + } + // periodically check for races + let jh = scope.spawn(|| loop { + std::thread::sleep(Duration::from_millis(100)); + let elapsed = start.elapsed(); + if elapsed > run_duration { + break; + } + let acc = accepted.load(Ordering::Relaxed); + let rate = acc as f64 / elapsed.as_secs_f64(); + assert!( + tb.current_tokens() < request_size * 2, + "bucket should have no spare tokens" + ); + assert!( + // allow 1% error + (rate - target_rate).abs() < target_rate / 100.0, + "Accepted rate should be about {target_rate}, actual {rate}" + ); + }); + jh.join().expect("Rate checks should pass"); + }); + + let acc = accepted.load(Ordering::Relaxed); + let rej = rejected.load(Ordering::Relaxed); + println!("Run complete over {:?} seconds", run_duration.as_secs()); + println!("Accepted {acc}, Rejected: {rej}"); + println!( + "processed {} requests, {} per second", + acc + rej, + (acc + rej) as f32 / run_duration.as_secs_f32() + ); +} + +fn bench_token_bucket_eviction() { + println!("Running bench_token_bucket_eviction..."); + let run_duration = Duration::from_secs(5); + let target_size = 256; + let tb = TokenBucket::new(1, 60, 100.0); + let mut limiter = KeyedRateLimiter::new(target_size, tb, 8); + // make shrinking more aggressive than default + // since only one worker is shrinking the + // datastructure at any given moment so we do not flake this test + // too hard + limiter.set_shrink_interval(32); + + let accepted = AtomicUsize::new(0); + let rejected = AtomicUsize::new(0); + + let start = Instant::now(); + let ip_pool = 1024; + let workers = 8; + + let max_size = AtomicUsize::new(0); + std::thread::scope(|scope| { + for _ in 0..workers { + scope.spawn(|| { + for i in 1.. { + if Instant::now() > start + run_duration { + break; + } + let ip = IpAddr::V4(Ipv4Addr::from_bits(i % ip_pool as u32)); + if limiter.consume_tokens(ip, 1).is_ok() { + accepted.fetch_add(1, Ordering::Relaxed); + } else { + rejected.fetch_add(1, Ordering::Relaxed); + } + let len_approx = limiter.len_approx(); + max_size.fetch_max(len_approx, Ordering::Relaxed); + } + }); + } + }); + + let acc = accepted.load(Ordering::Relaxed); + let rej = rejected.load(Ordering::Relaxed); + println!("Run complete over {:?} seconds", run_duration.as_secs()); + eprintln!("Max observed size was {}", max_size.load(Ordering::Relaxed)); + assert!( + max_size.load(Ordering::Relaxed) <= target_size * 2, + "Max target size should never be exceeded" + ); + println!( + "processed {} requests, {} per second", + acc + rej, + (acc + rej) as f32 / run_duration.as_secs_f32() + ); + println!("Rejected: {rej}"); +} + +fn bench_keyed_rate_limiter() { + println!("Running bench_keyed_rate_limiter..."); + let run_duration = Duration::from_secs(5); + let tb = TokenBucket::new(1, 60, 100.0); + let limiter = KeyedRateLimiter::new(2048, tb, 8); + + let accepted = AtomicUsize::new(0); + let rejected = AtomicUsize::new(0); + + let start = Instant::now(); + let ip_pool = 2048; + let expected_total_accepts = (run_duration.as_secs() * 100 * ip_pool) as i64; + let workers = 32; + + std::thread::scope(|scope| { + for _ in 0..workers { + scope.spawn(|| { + for i in 1.. { + if Instant::now() > start + run_duration { + break; + } + let ip = IpAddr::V4(Ipv4Addr::from_bits(i % ip_pool as u32)); + if limiter.consume_tokens(ip, 1).is_ok() { + accepted.fetch_add(1, Ordering::Relaxed); + } else { + rejected.fetch_add(1, Ordering::Relaxed); + } + } + }); + } + }); + + let acc = accepted.load(Ordering::Relaxed); + let rej = rejected.load(Ordering::Relaxed); + println!("Run complete over {:?} seconds", run_duration.as_secs()); + println!("Accepted: {acc} (target {expected_total_accepts})"); + println!("Rejected: {rej}"); + println!( + "processed {} requests, {} per second", + acc + rej, + (acc + rej) as f32 / run_duration.as_secs_f32() + ); + assert!(((acc as i64) - expected_total_accepts).abs() < expected_total_accepts / 10); +} + +fn main() { + bench_token_bucket(); + println!("=========="); + bench_token_bucket_eviction(); + println!("=========="); + bench_keyed_rate_limiter(); +} diff --git a/net-utils/src/lib.rs b/net-utils/src/lib.rs index 1b223362a0f3e3..b3c9205909ade8 100644 --- a/net-utils/src/lib.rs +++ b/net-utils/src/lib.rs @@ -12,7 +12,9 @@ mod ip_echo_client; mod ip_echo_server; +pub mod multihomed_sockets; pub mod sockets; +pub mod token_bucket; #[cfg(feature = "dev-context-only-utils")] pub mod tooling_for_tests; @@ -44,8 +46,19 @@ pub struct UdpSocketPair { pub type PortRange = (u16, u16); +#[cfg(not(debug_assertions))] +/// Port range available to validator by default pub const VALIDATOR_PORT_RANGE: PortRange = (8000, 10_000); -pub const MINIMUM_VALIDATOR_PORT_RANGE_WIDTH: u16 = 17; // VALIDATOR_PORT_RANGE must be at least this wide + +// Sets the port range outside of the region used by other tests to avoid interference +// This arrangement is not ideal, but can be removed once ConnectionCache is deprecated +#[cfg(debug_assertions)] +pub const VALIDATOR_PORT_RANGE: PortRange = ( + crate::sockets::UNIQUE_ALLOC_BASE_PORT - 512, + crate::sockets::UNIQUE_ALLOC_BASE_PORT, +); + +pub const MINIMUM_VALIDATOR_PORT_RANGE_WIDTH: u16 = 25; // VALIDATOR_PORT_RANGE must be at least this wide pub(crate) const HEADER_LENGTH: usize = 4; pub(crate) const IP_ECHO_SERVER_RESPONSE_LENGTH: usize = HEADER_LENGTH + 23; @@ -240,7 +253,7 @@ pub fn is_host_port(string: String) -> Result<(), String> { } #[deprecated( - since = "2.3.2", + since = "3.0.0", note = "Please use the equivalent struct from solana-net-utils::sockets" )] #[derive(Clone, Copy, Debug, Default)] @@ -281,7 +294,7 @@ impl SocketConfig { } #[deprecated( - since = "2.3.2", + since = "3.0.0", note = "Please use the equivalent from solana-net-utils::sockets" )] #[allow(deprecated)] @@ -308,7 +321,7 @@ pub fn bind_in_range(ip_addr: IpAddr, range: PortRange) -> io::Result<(u16, UdpS } #[deprecated( - since = "2.3.2", + since = "3.0.0", note = "Please use the equivalent from solana-net-utils::sockets" )] #[allow(deprecated)] @@ -334,7 +347,7 @@ pub fn bind_in_range_with_config( } #[deprecated( - since = "2.3.2", + since = "3.0.0", note = "Please use the equivalent from solana-net-utils::sockets" )] #[allow(deprecated)] @@ -352,7 +365,7 @@ pub fn bind_with_any_port_with_config( } #[deprecated( - since = "2.3.2", + since = "3.0.0", note = "Please use the equivalent from solana-net-utils::sockets" )] #[allow(deprecated)] @@ -377,7 +390,7 @@ pub fn multi_bind_in_range_with_config( } #[deprecated( - since = "2.3.2", + since = "3.0.0", note = "Please use the eqiuvalent from solana-net-utils::sockets" )] #[allow(deprecated)] @@ -400,7 +413,7 @@ pub fn bind_to_unspecified() -> io::Result { } #[deprecated( - since = "2.3.2", + since = "3.0.0", note = "Please avoid this function in favor of sockets::bind_to_with_config" )] #[allow(deprecated)] @@ -415,7 +428,7 @@ pub fn bind_to_with_config( } #[deprecated( - since = "2.3.2", + since = "3.0.0", note = "Please avoid this function, it is easy to misuse" )] #[allow(deprecated)] @@ -434,7 +447,7 @@ pub fn bind_to_with_config_non_blocking( } #[deprecated( - since = "2.3.2", + since = "3.0.0", note = "Please avoid this function in favor of sockets::bind_common_with_config" )] /// binds both a UdpSocket and a TcpListener @@ -444,7 +457,7 @@ pub fn bind_common(ip_addr: IpAddr, port: u16) -> io::Result<(UdpSocket, TcpList } #[deprecated( - since = "2.3.2", + since = "3.0.0", note = "Please avoid this function in favor of sockets::bind_common_with_config" )] #[allow(deprecated)] @@ -463,7 +476,7 @@ pub fn bind_common_with_config( } #[deprecated( - since = "2.3.2", + since = "3.0.0", note = "Please avoid this function, in favor of \ sockets::bind_two_in_range_with_offset_and_config" )] @@ -484,7 +497,7 @@ pub fn bind_two_in_range_with_offset( } #[deprecated( - since = "2.3.2", + since = "3.0.0", note = "Please avoid this function, in favor of \ sockets::bind_two_in_range_with_offset_and_config" )] @@ -537,7 +550,7 @@ pub fn find_available_port_in_range(ip_addr: IpAddr, range: PortRange) -> io::Re /// Searches for several ports on a given binding ip_addr in the provided range. /// -/// This will start at a random point in the range provided, and search sequencially. +/// This will start at a random point in the range provided, and search sequentially. /// If it can not find anything, an Error is returned. pub fn find_available_ports_in_range( ip_addr: IpAddr, @@ -572,7 +585,7 @@ pub fn find_available_ports_in_range( } #[deprecated( - since = "2.3.2", + since = "3.0.0", note = "Please avoid this function, in favor of sockets::bind_more_with_config" )] #[allow(deprecated)] @@ -599,22 +612,12 @@ pub fn bind_more_with_config( } #[cfg(test)] -#[allow(deprecated)] mod tests { use { - super::*, - ip_echo_server::IpEchoServerResponse, - itertools::Itertools, - std::{net::Ipv4Addr, time::Duration}, - tokio::runtime::Runtime, + super::*, crate::sockets::unique_port_range_for_tests, + ip_echo_server::IpEchoServerResponse, itertools::Itertools, std::net::Ipv4Addr, }; - fn runtime() -> Runtime { - tokio::runtime::Builder::new_current_thread() - .enable_all() - .build() - .expect("Can not create a runtime") - } #[test] fn test_response_length() { let resp = IpEchoServerResponse { @@ -709,53 +712,11 @@ mod tests { assert!(is_host_port("localhost".to_string()).is_err()); } - #[test] - fn test_bind() { - let (pr_s, pr_e) = sockets::localhost_port_range_for_tests(); - let ip_addr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); - let s = bind_in_range(ip_addr, (pr_s, pr_e)).unwrap(); - assert_eq!(s.0, pr_s, "bind_in_range should use first available port"); - let ip_addr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); - let config = SocketConfig::default().reuseport(true); - let x = bind_to_with_config(ip_addr, pr_s + 1, config).unwrap(); - let y = bind_to_with_config(ip_addr, pr_s + 1, config).unwrap(); - assert_eq!( - x.local_addr().unwrap().port(), - y.local_addr().unwrap().port() - ); - bind_to(ip_addr, pr_s, false).unwrap_err(); - bind_in_range(ip_addr, (pr_s, pr_s + 2)).unwrap_err(); - - let (port, v) = - multi_bind_in_range_with_config(ip_addr, (pr_s + 5, pr_e), config, 10).unwrap(); - for sock in &v { - assert_eq!(port, sock.local_addr().unwrap().port()); - } - } - - #[test] - fn test_bind_with_any_port() { - let ip_addr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); - let config = SocketConfig::default(); - let x = bind_with_any_port_with_config(ip_addr, config).unwrap(); - let y = bind_with_any_port_with_config(ip_addr, config).unwrap(); - assert_ne!( - x.local_addr().unwrap().port(), - y.local_addr().unwrap().port() - ); - } - - #[test] - fn test_bind_in_range_nil() { - let ip_addr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); - bind_in_range(ip_addr, (2000, 2000)).unwrap_err(); - bind_in_range(ip_addr, (2000, 1999)).unwrap_err(); - } - #[test] fn test_find_available_port_in_range() { let ip_addr = IpAddr::V4(Ipv4Addr::LOCALHOST); - let (pr_s, pr_e) = sockets::localhost_port_range_for_tests(); + let range = sockets::unique_port_range_for_tests(4); + let (pr_s, pr_e) = (range.start, range.end); assert_eq!( find_available_port_in_range(ip_addr, (pr_s, pr_s + 1)).unwrap(), pr_s @@ -763,7 +724,7 @@ mod tests { let port = find_available_port_in_range(ip_addr, (pr_s, pr_e)).unwrap(); assert!((pr_s..pr_e).contains(&port)); - let _socket = bind_to(ip_addr, port, false).unwrap(); + let _socket = sockets::bind_to(ip_addr, port).unwrap(); find_available_port_in_range(ip_addr, (port, port + 1)).unwrap_err(); } @@ -773,7 +734,12 @@ mod tests { let port_range = sockets::localhost_port_range_for_tests(); assert!(port_range.1 - port_range.0 > 16); // reserve 1 port to make it non-trivial - let sock = bind_to_with_config(ip_addr, port_range.0 + 2, SocketConfig::default()).unwrap(); + let sock = sockets::bind_to_with_config( + ip_addr, + port_range.0 + 2, + sockets::SocketConfiguration::default(), + ) + .unwrap(); let ports: [u16; 15] = find_available_ports_in_range(ip_addr, port_range).unwrap(); let mut ports_vec = Vec::from(ports); ports_vec.push(sock.local_addr().unwrap().port()); @@ -781,253 +747,19 @@ mod tests { assert_eq!(res.len(), 16, "Should reserve 16 unique ports"); } - #[test] - fn test_bind_common_in_range() { - let ip_addr = IpAddr::V4(Ipv4Addr::LOCALHOST); - let (pr_s, pr_e) = sockets::localhost_port_range_for_tests(); - let config = SocketConfig::default(); - let (port, _sockets) = - bind_common_in_range_with_config(ip_addr, (pr_s, pr_e), config).unwrap(); - assert!((pr_s..pr_e).contains(&port)); - - bind_common_in_range_with_config(ip_addr, (port, port + 1), config).unwrap_err(); - } - - #[test] - fn test_get_public_ip_addr_none() { - solana_logger::setup(); - let ip_addr = IpAddr::V4(Ipv4Addr::LOCALHOST); - let (pr_s, pr_e) = sockets::localhost_port_range_for_tests(); - let config = SocketConfig::default(); - let (_server_port, (server_udp_socket, server_tcp_listener)) = - bind_common_in_range_with_config(ip_addr, (pr_s, pr_e), config).unwrap(); - - let _runtime = ip_echo_server( - server_tcp_listener, - DEFAULT_IP_ECHO_SERVER_THREADS, - /*shred_version=*/ Some(42), - ); - - let server_ip_echo_addr = server_udp_socket.local_addr().unwrap(); - assert_eq!( - get_public_ip_addr_with_binding( - &server_ip_echo_addr, - IpAddr::V4(Ipv4Addr::UNSPECIFIED) - ) - .unwrap(), - parse_host("127.0.0.1").unwrap(), - ); - assert_eq!(get_cluster_shred_version(&server_ip_echo_addr).unwrap(), 42); - assert!(verify_all_reachable_tcp(&server_ip_echo_addr, vec![],)); - assert!(verify_all_reachable_udp(&server_ip_echo_addr, &[],)); - } - - #[test] - fn test_get_public_ip_addr_reachable() { - solana_logger::setup(); - let ip_addr = IpAddr::V4(Ipv4Addr::LOCALHOST); - let port_range = sockets::localhost_port_range_for_tests(); - let config = SocketConfig::default(); - let (_server_port, (server_udp_socket, server_tcp_listener)) = - bind_common_in_range_with_config(ip_addr, port_range, config).unwrap(); - let (_client_port, (client_udp_socket, client_tcp_listener)) = - bind_common_in_range_with_config(ip_addr, port_range, config).unwrap(); - - let _runtime = ip_echo_server( - server_tcp_listener, - DEFAULT_IP_ECHO_SERVER_THREADS, - /*shred_version=*/ Some(65535), - ); - - let ip_echo_server_addr = server_udp_socket.local_addr().unwrap(); - assert_eq!( - get_public_ip_addr_with_binding( - &ip_echo_server_addr, - IpAddr::V4(Ipv4Addr::UNSPECIFIED) - ) - .unwrap(), - parse_host("127.0.0.1").unwrap(), - ); - assert_eq!( - get_cluster_shred_version(&ip_echo_server_addr).unwrap(), - 65535 - ); - assert!(verify_all_reachable_tcp( - &ip_echo_server_addr, - vec![client_tcp_listener], - )); - assert!(verify_all_reachable_udp( - &ip_echo_server_addr, - &[&client_udp_socket], - )); - } - - #[test] - fn test_verify_ports_tcp_unreachable() { - solana_logger::setup(); - let ip_addr = IpAddr::V4(Ipv4Addr::LOCALHOST); - let port_range = sockets::localhost_port_range_for_tests(); - let config = SocketConfig::default(); - let (_server_port, (server_udp_socket, _server_tcp_listener)) = - bind_common_in_range_with_config(ip_addr, port_range, config).unwrap(); - - // make the socket unreachable by not running the ip echo server! - let server_ip_echo_addr = server_udp_socket.local_addr().unwrap(); - - let (_, (_client_udp_socket, client_tcp_listener)) = - bind_common_in_range_with_config(ip_addr, port_range, config).unwrap(); - - let rt = runtime(); - assert!(!rt.block_on(ip_echo_client::verify_all_reachable_tcp( - server_ip_echo_addr, - vec![client_tcp_listener], - Duration::from_secs(2), - ))); - } - - #[test] - fn test_verify_ports_udp_unreachable() { - solana_logger::setup(); - let ip_addr = IpAddr::V4(Ipv4Addr::LOCALHOST); - let port_range = sockets::localhost_port_range_for_tests(); - let config = SocketConfig::default(); - let (_server_port, (server_udp_socket, _server_tcp_listener)) = - bind_common_in_range_with_config(ip_addr, port_range, config).unwrap(); - - // make the socket unreachable by not running the ip echo server! - let server_ip_echo_addr = server_udp_socket.local_addr().unwrap(); - - let (_correct_client_port, (client_udp_socket, _client_tcp_listener)) = - bind_common_in_range_with_config(ip_addr, port_range, config).unwrap(); - - let rt = runtime(); - assert!(!rt.block_on(ip_echo_client::verify_all_reachable_udp( - server_ip_echo_addr, - &[&client_udp_socket], - Duration::from_secs(2), - 3, - ))); - } - - #[test] - fn test_verify_many_ports_reachable() { - solana_logger::setup(); - let ip_addr = IpAddr::V4(Ipv4Addr::LOCALHOST); - let config = SocketConfig::default(); - let mut tcp_listeners = vec![]; - let mut udp_sockets = vec![]; - - let (_server_port, (_, server_tcp_listener)) = - bind_common_in_range_with_config(ip_addr, (2200, 2300), config).unwrap(); - for _ in 0..MAX_PORT_VERIFY_THREADS * 2 { - let (_client_port, (client_udp_socket, client_tcp_listener)) = - bind_common_in_range_with_config( - ip_addr, - (2300, 2300 + (MAX_PORT_VERIFY_THREADS * 3) as u16), - config, - ) - .unwrap(); - tcp_listeners.push(client_tcp_listener); - udp_sockets.push(client_udp_socket); - } - - let ip_echo_server_addr = server_tcp_listener.local_addr().unwrap(); - - let _runtime = ip_echo_server( - server_tcp_listener, - DEFAULT_IP_ECHO_SERVER_THREADS, - Some(65535), - ); - - assert_eq!( - get_public_ip_addr_with_binding( - &ip_echo_server_addr, - IpAddr::V4(Ipv4Addr::UNSPECIFIED) - ) - .unwrap(), - parse_host("127.0.0.1").unwrap(), - ); - - let socket_refs = udp_sockets.iter().collect_vec(); - assert!(verify_all_reachable_tcp( - &ip_echo_server_addr, - tcp_listeners, - )); - assert!(verify_all_reachable_udp(&ip_echo_server_addr, &socket_refs)); - } - - #[test] - fn test_bind_two_in_range_with_offset() { - solana_logger::setup(); - let ip_addr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); - let offset = 6; - if let Ok(((port1, _), (port2, _))) = - bind_two_in_range_with_offset(ip_addr, (1024, 65535), offset) - { - assert!(port2 == port1 + offset); - } - let offset = 42; - if let Ok(((port1, _), (port2, _))) = - bind_two_in_range_with_offset(ip_addr, (1024, 65535), offset) - { - assert!(port2 == port1 + offset); - } - assert!(bind_two_in_range_with_offset(ip_addr, (1024, 1044), offset).is_err()); - } - + #[allow(deprecated)] #[test] fn test_multi_bind_in_range_with_config_reuseport_disabled() { let ip_addr: IpAddr = IpAddr::V4(Ipv4Addr::LOCALHOST); let config = SocketConfig::default(); //reuseport is false by default - let result = multi_bind_in_range_with_config(ip_addr, (2010, 2110), config, 2); + let port_range = unique_port_range_for_tests(3); + let result = + multi_bind_in_range_with_config(ip_addr, (port_range.start, port_range.end), config, 2); assert!( result.is_err(), "Expected an error when reuseport is not set to true" ); } - - #[test] - fn test_verify_udp_multiple_ips_reachable() { - solana_logger::setup(); - let config = SocketConfig::default(); - let ip_a = IpAddr::V4(Ipv4Addr::LOCALHOST); - let ip_b = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)); - - let server_ports = sockets::localhost_port_range_for_tests(); - let (_srv_udp_port, (srv_udp_sock, srv_tcp_listener)) = - bind_common_in_range_with_config(ip_a, server_ports, config).unwrap(); - - let ip_echo_server_addr = srv_udp_sock.local_addr().unwrap(); - let _runtime = ip_echo_server( - srv_tcp_listener, - DEFAULT_IP_ECHO_SERVER_THREADS, - /*shred_version=*/ Some(42), - ); - - let mut udp_sockets = Vec::new(); - let (_p1, (sock_a, _tl_a)) = bind_common_in_range_with_config( - ip_a, - sockets::localhost_port_range_for_tests(), - config, - ) - .unwrap(); - let (_p2, (sock_b, _tl_b)) = bind_common_in_range_with_config( - ip_b, - sockets::localhost_port_range_for_tests(), - config, - ) - .unwrap(); - - udp_sockets.push(sock_a); - udp_sockets.push(sock_b); - - let socket_refs: Vec<&UdpSocket> = udp_sockets.iter().collect(); - - assert!( - verify_all_reachable_udp(&ip_echo_server_addr, &socket_refs), - "all UDP ports on both 127.0.0.1 and 127.0.0.2 should be reachable" - ); - } } diff --git a/net-utils/src/multihomed_sockets.rs b/net-utils/src/multihomed_sockets.rs new file mode 100644 index 00000000000000..3162e65685d76f --- /dev/null +++ b/net-utils/src/multihomed_sockets.rs @@ -0,0 +1,155 @@ +#![cfg(feature = "agave-unstable-api")] +use std::{ + net::{IpAddr, Ipv4Addr, UdpSocket}, + ops::Deref, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, +}; + +pub enum CurrentSocket<'a> { + Same(&'a UdpSocket), + Changed(&'a UdpSocket), +} + +pub trait SocketProvider { + fn current_socket(&self) -> CurrentSocket<'_>; + + #[inline] + fn current_socket_ref(&self) -> &UdpSocket { + match self.current_socket() { + CurrentSocket::Same(sock) | CurrentSocket::Changed(sock) => sock, + } + } +} + +/// Fixed UDP Socket -> default +pub struct FixedSocketProvider { + socket: Arc, +} +impl FixedSocketProvider { + pub fn new(socket: Arc) -> Self { + Self { socket } + } +} +impl SocketProvider for FixedSocketProvider { + #[inline] + fn current_socket(&self) -> CurrentSocket<'_> { + CurrentSocket::Same(self.socket.as_ref()) + } +} + +pub struct MultihomedSocketProvider { + sockets: Arc<[UdpSocket]>, + bind_ip_addrs: Arc, + last_index: AtomicUsize, +} + +impl MultihomedSocketProvider { + pub fn new(sockets: Arc<[UdpSocket]>, bind_ip_addrs: Arc) -> Self { + Self { + sockets, + bind_ip_addrs, + last_index: AtomicUsize::new(usize::MAX), + } + } +} + +impl SocketProvider for MultihomedSocketProvider { + #[inline] + fn current_socket(&self) -> CurrentSocket<'_> { + let idx = self.bind_ip_addrs.active_index(); + let last = self.last_index.swap(idx, Ordering::AcqRel); + + let sock = &self.sockets[idx]; + if last == idx { + CurrentSocket::Same(sock) + } else { + CurrentSocket::Changed(sock) + } + } +} + +#[derive(Debug, Clone)] +pub struct BindIpAddrs { + /// The IP addresses this node may bind to + /// Index 0 is the public internet address + /// Index 1+ are secondary addresses (i.e. multihoming) + addrs: Vec, + active_index: Arc, +} + +impl Default for BindIpAddrs { + fn default() -> Self { + Self::new(vec![IpAddr::V4(Ipv4Addr::LOCALHOST)]).unwrap() + } +} + +impl BindIpAddrs { + pub fn new(addrs: Vec) -> Result { + if addrs.is_empty() { + return Err( + "BindIpAddrs requires at least one IP address (--bind-address)".to_string(), + ); + } + if addrs.len() > 1 { + for ip in &addrs { + if ip.is_loopback() || ip.is_unspecified() || ip.is_multicast() { + return Err(format!( + "Invalid configuration: {ip:?} is not allowed with multiple \ + --bind-address values (loopback, unspecified, or multicast)" + )); + } + } + } + + Ok(Self { + addrs, + active_index: Arc::new(AtomicUsize::new(0)), + }) + } + + #[inline] + pub fn active(&self) -> IpAddr { + self.addrs[self.active_index.load(Ordering::Acquire)] + } + + /// Change active to index (0 = public internet IP, 1+ = secondary IPs) + pub fn set_active(&self, index: usize) -> Result { + if index >= self.addrs.len() { + return Err(format!( + "Index {index} out of range, only {} IPs available", + self.addrs.len() + )); + } + self.active_index.store(index, Ordering::Release); + Ok(self.addrs[index]) + } + + #[inline] + pub fn active_index(&self) -> usize { + self.active_index.load(Ordering::Acquire) + } + + #[inline] + pub fn multihoming_enabled(&self) -> bool { + self.addrs.len() > 1 + } +} + +// Makes BindIpAddrs behave like &[IpAddr] +impl Deref for BindIpAddrs { + type Target = [IpAddr]; + + fn deref(&self) -> &Self::Target { + &self.addrs + } +} + +// For generic APIs expecting something like AsRef<[IpAddr]> +impl AsRef<[IpAddr]> for BindIpAddrs { + fn as_ref(&self) -> &[IpAddr] { + &self.addrs + } +} diff --git a/net-utils/src/sockets.rs b/net-utils/src/sockets.rs index 78b562574e0c7a..c42b2315e75083 100644 --- a/net-utils/src/sockets.rs +++ b/net-utils/src/sockets.rs @@ -12,10 +12,10 @@ use { }, }; // base port for deconflicted allocations -const BASE_PORT: u16 = 5000; +pub(crate) const UNIQUE_ALLOC_BASE_PORT: u16 = 2000; // how much to allocate per individual process. // we expect to have at most 64 concurrent tests in CI at any moment on a given host. -const SLICE_PER_PROCESS: u16 = (u16::MAX - BASE_PORT) / 64; +const SLICE_PER_PROCESS: u16 = (u16::MAX - UNIQUE_ALLOC_BASE_PORT) / 64; /// When running under nextest, this will try to provide /// a unique slice of port numbers (assuming no other nextest processes /// are running on the same host) based on NEXTEST_TEST_GLOBAL_SLOT variable @@ -26,7 +26,7 @@ const SLICE_PER_PROCESS: u16 = (u16::MAX - BASE_PORT) / 64; #[allow(clippy::arithmetic_side_effects)] pub fn unique_port_range_for_tests(size: u16) -> Range { static SLICE: AtomicU16 = AtomicU16::new(0); - let offset = SLICE.fetch_add(size, Ordering::Relaxed); + let offset = SLICE.fetch_add(size, Ordering::SeqCst); let start = offset + match std::env::var("NEXTEST_TEST_GLOBAL_SLOT") { Ok(slot) => { @@ -36,15 +36,15 @@ pub fn unique_port_range_for_tests(size: u16) -> Range { "Overrunning into the port range of another test! Consider using fewer ports \ per test." ); - BASE_PORT + slot * SLICE_PER_PROCESS + UNIQUE_ALLOC_BASE_PORT + slot * SLICE_PER_PROCESS } - Err(_) => BASE_PORT, + Err(_) => UNIQUE_ALLOC_BASE_PORT, }; assert!(start < u16::MAX - size, "Ran out of port numbers!"); start..start + size } -/// Retrieve a free 20-port slice for unit tests +/// Retrieve a free 25-port slice for unit tests /// /// When running under nextest, this will try to provide /// a unique slice of port numbers (assuming no other nextest processes @@ -54,7 +54,7 @@ pub fn unique_port_range_for_tests(size: u16) -> Range { /// When running without nextest, this will only bump an atomic and eventually /// panic when it runs out of port numbers to assign. pub fn localhost_port_range_for_tests() -> (u16, u16) { - let pr = unique_port_range_for_tests(20); + let pr = unique_port_range_for_tests(25); (pr.start, pr.end) } @@ -216,6 +216,7 @@ pub fn bind_in_range_with_config( ))) } +#[deprecated(since = "3.0.0", note = "Please bind to specific ports instead")] pub fn bind_with_any_port_with_config( ip_addr: IpAddr, config: SocketConfiguration, @@ -268,12 +269,14 @@ pub async fn bind_to_async(ip_addr: IpAddr, port: u16) -> io::Result io::Result { - bind_to_async(IpAddr::V4(Ipv4Addr::LOCALHOST), 0).await + let port = unique_port_range_for_tests(1).start; + bind_to_async(IpAddr::V4(Ipv4Addr::LOCALHOST), port).await } #[cfg(feature = "dev-context-only-utils")] pub async fn bind_to_unspecified_async() -> io::Result { - bind_to_async(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0).await + let port = unique_port_range_for_tests(1).start; + bind_to_async(IpAddr::V4(Ipv4Addr::UNSPECIFIED), port).await } pub fn bind_to_with_config( @@ -359,22 +362,36 @@ pub fn bind_more_with_config( } #[cfg(test)] -#[allow(deprecated)] mod tests { use { super::*, - crate::{bind_in_range, sockets::localhost_port_range_for_tests}, - std::net::Ipv4Addr, + crate::{ + bind_in_range, get_cluster_shred_version, get_public_ip_addr_with_binding, + ip_echo_client, ip_echo_server, parse_host, + sockets::{localhost_port_range_for_tests, unique_port_range_for_tests}, + verify_all_reachable_tcp, verify_all_reachable_udp, DEFAULT_IP_ECHO_SERVER_THREADS, + MAX_PORT_VERIFY_THREADS, + }, + itertools::Itertools, + std::{net::Ipv4Addr, time::Duration}, + tokio::runtime::Runtime, }; + fn runtime() -> Runtime { + tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .expect("Can not create a runtime") + } + #[test] fn test_bind() { let (pr_s, pr_e) = localhost_port_range_for_tests(); - let ip_addr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); + let ip_addr = IpAddr::V4(Ipv4Addr::LOCALHOST); let config = SocketConfiguration::default(); let s = bind_in_range(ip_addr, (pr_s, pr_e)).unwrap(); assert_eq!(s.0, pr_s, "bind_in_range should use first available port"); - let ip_addr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); + let ip_addr = IpAddr::V4(Ipv4Addr::LOCALHOST); let x = bind_to_with_config(ip_addr, pr_s + 1, config).unwrap(); let y = bind_more_with_config(x, 2, config).unwrap(); assert_eq!( @@ -393,10 +410,8 @@ mod tests { #[test] fn test_bind_with_any_port() { - let ip_addr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); - let config = SocketConfiguration::default(); - let x = bind_with_any_port_with_config(ip_addr, config).unwrap(); - let y = bind_with_any_port_with_config(ip_addr, config).unwrap(); + let x = bind_to_localhost_unique().unwrap(); + let y = bind_to_localhost_unique().unwrap(); assert_ne!( x.local_addr().unwrap().port(), y.local_addr().unwrap().port() @@ -405,9 +420,10 @@ mod tests { #[test] fn test_bind_in_range_nil() { - let ip_addr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); - bind_in_range(ip_addr, (2000, 2000)).unwrap_err(); - bind_in_range(ip_addr, (2000, 1999)).unwrap_err(); + let ip_addr = IpAddr::V4(Ipv4Addr::LOCALHOST); + let range = unique_port_range_for_tests(2); + bind_in_range(ip_addr, (range.end, range.end)).unwrap_err(); + bind_in_range(ip_addr, (range.end, range.start)).unwrap_err(); } #[test] @@ -424,12 +440,11 @@ mod tests { #[test] fn test_bind_common_in_range() { let ip_addr = IpAddr::V4(Ipv4Addr::LOCALHOST); - let (pr_s, pr_e) = localhost_port_range_for_tests(); + let range = unique_port_range_for_tests(5); let config = SocketConfiguration::default(); let (port, _sockets) = - bind_common_in_range_with_config(ip_addr, (pr_s, pr_e), config).unwrap(); - assert!((pr_s..pr_e).contains(&port)); - + bind_common_in_range_with_config(ip_addr, (range.start, range.end), config).unwrap(); + assert!(range.contains(&port)); bind_common_in_range_with_config(ip_addr, (port, port + 1), config).unwrap_err(); } @@ -437,26 +452,242 @@ mod tests { fn test_bind_two_in_range_with_offset() { solana_logger::setup(); let config = SocketConfiguration::default(); - let ip_addr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); + let ip_addr = IpAddr::V4(Ipv4Addr::LOCALHOST); let offset = 6; - if let Ok(((port1, _), (port2, _))) = - bind_two_in_range_with_offset_and_config(ip_addr, (1024, 65535), offset, config, config) - { + let port_range = unique_port_range_for_tests(10); + if let Ok(((port1, _), (port2, _))) = bind_two_in_range_with_offset_and_config( + ip_addr, + (port_range.start, port_range.end), + offset, + config, + config, + ) { assert!(port2 == port1 + offset); } let offset = 42; - if let Ok(((port1, _), (port2, _))) = - bind_two_in_range_with_offset_and_config(ip_addr, (1024, 65535), offset, config, config) - { + if let Ok(((port1, _), (port2, _))) = bind_two_in_range_with_offset_and_config( + ip_addr, + (port_range.start, port_range.end), + offset, + config, + config, + ) { assert!(port2 == port1 + offset); } assert!(bind_two_in_range_with_offset_and_config( ip_addr, - (1024, 1044), + (port_range.start, port_range.start + 5), offset, config, config ) .is_err()); } + + #[test] + fn test_get_public_ip_addr_none() { + solana_logger::setup(); + let ip_addr = IpAddr::V4(Ipv4Addr::LOCALHOST); + let (pr_s, pr_e) = localhost_port_range_for_tests(); + let config = SocketConfiguration::default(); + let (_server_port, (server_udp_socket, server_tcp_listener)) = + bind_common_in_range_with_config(ip_addr, (pr_s, pr_e), config).unwrap(); + + let _runtime = ip_echo_server( + server_tcp_listener, + DEFAULT_IP_ECHO_SERVER_THREADS, + /*shred_version=*/ Some(42), + ); + + let server_ip_echo_addr = server_udp_socket.local_addr().unwrap(); + assert_eq!( + get_public_ip_addr_with_binding( + &server_ip_echo_addr, + IpAddr::V4(Ipv4Addr::UNSPECIFIED) + ) + .unwrap(), + parse_host("127.0.0.1").unwrap(), + ); + assert_eq!(get_cluster_shred_version(&server_ip_echo_addr).unwrap(), 42); + assert!(verify_all_reachable_tcp(&server_ip_echo_addr, vec![],)); + assert!(verify_all_reachable_udp(&server_ip_echo_addr, &[],)); + } + + #[test] + fn test_get_public_ip_addr_reachable() { + solana_logger::setup(); + let ip_addr = IpAddr::V4(Ipv4Addr::LOCALHOST); + let port_range = localhost_port_range_for_tests(); + let config = SocketConfiguration::default(); + let (_server_port, (server_udp_socket, server_tcp_listener)) = + bind_common_in_range_with_config(ip_addr, port_range, config).unwrap(); + let (_client_port, (client_udp_socket, client_tcp_listener)) = + bind_common_in_range_with_config(ip_addr, port_range, config).unwrap(); + + let _runtime = ip_echo_server( + server_tcp_listener, + DEFAULT_IP_ECHO_SERVER_THREADS, + /*shred_version=*/ Some(65535), + ); + + let ip_echo_server_addr = server_udp_socket.local_addr().unwrap(); + assert_eq!( + get_public_ip_addr_with_binding( + &ip_echo_server_addr, + IpAddr::V4(Ipv4Addr::UNSPECIFIED) + ) + .unwrap(), + parse_host("127.0.0.1").unwrap(), + ); + assert_eq!( + get_cluster_shred_version(&ip_echo_server_addr).unwrap(), + 65535 + ); + assert!(verify_all_reachable_tcp( + &ip_echo_server_addr, + vec![client_tcp_listener], + )); + assert!(verify_all_reachable_udp( + &ip_echo_server_addr, + &[&client_udp_socket], + )); + } + + #[test] + fn test_verify_ports_tcp_unreachable() { + solana_logger::setup(); + let ip_addr = IpAddr::V4(Ipv4Addr::LOCALHOST); + let port_range = localhost_port_range_for_tests(); + let config = SocketConfiguration::default(); + let (_server_port, (server_udp_socket, _server_tcp_listener)) = + bind_common_in_range_with_config(ip_addr, port_range, config).unwrap(); + + // make the socket unreachable by not running the ip echo server! + let server_ip_echo_addr = server_udp_socket.local_addr().unwrap(); + + let (_, (_client_udp_socket, client_tcp_listener)) = + bind_common_in_range_with_config(ip_addr, port_range, config).unwrap(); + + let rt = runtime(); + assert!(!rt.block_on(ip_echo_client::verify_all_reachable_tcp( + server_ip_echo_addr, + vec![client_tcp_listener], + Duration::from_secs(2), + ))); + } + + #[test] + fn test_verify_ports_udp_unreachable() { + solana_logger::setup(); + let ip_addr = IpAddr::V4(Ipv4Addr::LOCALHOST); + let port_range = unique_port_range_for_tests(2); + let config = SocketConfiguration::default(); + let (_server_port, (server_udp_socket, _server_tcp_listener)) = + bind_common_in_range_with_config(ip_addr, (port_range.start, port_range.end), config) + .unwrap(); + + // make the socket unreachable by not running the ip echo server! + let server_ip_echo_addr = server_udp_socket.local_addr().unwrap(); + + let (_correct_client_port, (client_udp_socket, _client_tcp_listener)) = + bind_common_in_range_with_config(ip_addr, (port_range.start, port_range.end), config) + .unwrap(); + + let rt = runtime(); + assert!(!rt.block_on(ip_echo_client::verify_all_reachable_udp( + server_ip_echo_addr, + &[&client_udp_socket], + Duration::from_secs(2), + 3, + ))); + } + + #[test] + fn test_verify_many_ports_reachable() { + solana_logger::setup(); + let ip_addr = IpAddr::V4(Ipv4Addr::LOCALHOST); + let config = SocketConfiguration::default(); + let mut tcp_listeners = vec![]; + let mut udp_sockets = vec![]; + + let port_range = unique_port_range_for_tests(1); + let (_server_port, (_, server_tcp_listener)) = + bind_common_in_range_with_config(ip_addr, (port_range.start, port_range.end), config) + .unwrap(); + for _ in 0..MAX_PORT_VERIFY_THREADS * 2 { + let port_range = unique_port_range_for_tests(1); + let (_client_port, (client_udp_socket, client_tcp_listener)) = + bind_common_in_range_with_config( + ip_addr, + (port_range.start, port_range.end), + config, + ) + .unwrap(); + tcp_listeners.push(client_tcp_listener); + udp_sockets.push(client_udp_socket); + } + + let ip_echo_server_addr = server_tcp_listener.local_addr().unwrap(); + + let _runtime = ip_echo_server( + server_tcp_listener, + DEFAULT_IP_ECHO_SERVER_THREADS, + Some(65535), + ); + + assert_eq!( + get_public_ip_addr_with_binding( + &ip_echo_server_addr, + IpAddr::V4(Ipv4Addr::UNSPECIFIED) + ) + .unwrap(), + parse_host("127.0.0.1").unwrap(), + ); + + let socket_refs = udp_sockets.iter().collect_vec(); + assert!(verify_all_reachable_tcp( + &ip_echo_server_addr, + tcp_listeners, + )); + assert!(verify_all_reachable_udp(&ip_echo_server_addr, &socket_refs)); + } + + // This test is gated for non-macOS platforms because it requires binding to 127.0.0.2, + // which is not supported on macOS by default. + #[cfg(not(target_os = "macos"))] + #[test] + fn test_verify_udp_multiple_ips_reachable() { + solana_logger::setup(); + let config = SocketConfiguration::default(); + let ip_a = IpAddr::V4(Ipv4Addr::LOCALHOST); + let ip_b = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)); + + let port_range = localhost_port_range_for_tests(); + + let (_srv_udp_port, (srv_udp_sock, srv_tcp_listener)) = + bind_common_in_range_with_config(ip_a, port_range, config).unwrap(); + + let ip_echo_server_addr = srv_udp_sock.local_addr().unwrap(); + let _runtime = ip_echo_server( + srv_tcp_listener, + DEFAULT_IP_ECHO_SERVER_THREADS, + /*shred_version=*/ Some(42), + ); + + let mut udp_sockets = Vec::new(); + let (_p1, (sock_a, _tl_a)) = + bind_common_in_range_with_config(ip_a, port_range, config).unwrap(); + let (_p2, (sock_b, _tl_b)) = + bind_common_in_range_with_config(ip_b, port_range, config).unwrap(); + + udp_sockets.push(sock_a); + udp_sockets.push(sock_b); + + let socket_refs: Vec<&UdpSocket> = udp_sockets.iter().collect(); + + assert!( + verify_all_reachable_udp(&ip_echo_server_addr, &socket_refs), + "all UDP ports on both 127.0.0.1 and 127.0.0.2 should be reachable" + ); + } } diff --git a/net-utils/src/token_bucket.rs b/net-utils/src/token_bucket.rs new file mode 100644 index 00000000000000..cedf3c2379c4ec --- /dev/null +++ b/net-utils/src/token_bucket.rs @@ -0,0 +1,480 @@ +//! This module contains [`TokenBucket`], which provides ability to limit +//! rate of certain events, while allowing bursts through. +//! [`KeyedRateLimiter`] allows to rate-limit multiple keyed items, such +//! as connections. +use { + cfg_if::cfg_if, + dashmap::{mapref::entry::Entry, DashMap}, + solana_svm_type_overrides::sync::atomic::{AtomicU64, AtomicUsize, Ordering}, + std::{borrow::Borrow, cmp::Reverse, hash::Hash, time::Instant}, +}; + +/// Enforces a rate limit on the volume of requests per unit time. +/// +/// Instances update the amount of tokens upon access, and thus does not need to +/// be constantly polled to refill. Uses atomics internally so should be +/// relatively cheap to access from many threads +pub struct TokenBucket { + new_tokens_per_us: f64, + max_tokens: u64, + /// bucket creation + base_time: Instant, + tokens: AtomicU64, + /// time of last update in us since base_time + last_update: AtomicU64, + /// time unused in last token creation round + credit_time_us: AtomicU64, +} + +#[cfg(feature = "shuttle-test")] +static TIME_US: AtomicU64 = AtomicU64::new(0); //used to override Instant::now() + +// If changing this impl, make sure to run benches and ensure they do not panic. +// much of the testing is impossible outside of real multithreading in release mode. +impl TokenBucket { + /// Allocate a new TokenBucket + pub fn new(initial_tokens: u64, max_tokens: u64, new_tokens_per_second: f64) -> Self { + assert!( + new_tokens_per_second > 0.0, + "Token bucket can not have zero influx rate" + ); + assert!( + initial_tokens <= max_tokens, + "Can not have more initial tokens than max tokens" + ); + let base_time = Instant::now(); + TokenBucket { + // recompute into us to avoid FP division on every update + new_tokens_per_us: new_tokens_per_second / 1e6, + max_tokens, + tokens: AtomicU64::new(initial_tokens), + last_update: AtomicU64::new(0), + base_time, + credit_time_us: AtomicU64::new(0), + } + } + + /// Return current amount of tokens in the bucket. + /// This may be somewhat inconsistent across threads + /// due to Relaxed atomics. + #[inline] + pub fn current_tokens(&self) -> u64 { + let now = self.time_us(); + self.update_state(now); + self.tokens.load(Ordering::Relaxed) + } + + /// Attempts to consume tokens from bucket. + /// + /// On success, returns Ok(amount of tokens left in the bucket). + /// On failure, returns Err(amount of tokens missing to fill request). + #[inline] + pub fn consume_tokens(&self, request_size: u64) -> Result { + let now = self.time_us(); + self.update_state(now); + match self.tokens.fetch_update( + Ordering::AcqRel, // winner publishes new amount + Ordering::Acquire, // everyone observed correct number + |tokens| { + if tokens >= request_size { + Some(tokens.saturating_sub(request_size)) + } else { + None + } + }, + ) { + Ok(prev) => Ok(prev.saturating_sub(request_size)), + Err(prev) => Err(request_size.saturating_sub(prev)), + } + } + + /// Retrieves monotonic time since bucket creation. + fn time_us(&self) -> u64 { + cfg_if! { + if #[cfg(feature="shuttle-test")] { + TIME_US.load(Ordering::Relaxed) + } else { + let now = Instant::now(); + let elapsed = now.saturating_duration_since(self.base_time); + elapsed.as_micros() as u64 + } + } + } + + /// Updates internal state of the bucket by + /// depositing new tokens (if appropriate) + fn update_state(&self, now: u64) { + // fetch last update time + let last = self.last_update.load(Ordering::SeqCst); + + // If time has not advanced, nothing to do. + if now <= last { + return; + } + + // Try to claim the interval [last, now]. + // If we can not claim it, someone else will claim [last..some other time] when they + // touch the bucket. + // If we can claim interval [last, now], no other thread can credit tokens for it anymore. + // If [last, now] is too short to mint any tokens, spare time will be preserved in credit_time_us. + match self.last_update.compare_exchange( + last, + now, + Ordering::AcqRel, // winner publishes new timestamp + Ordering::Acquire, // loser observes updates + ) { + Ok(_) => { + // This thread won the race and is responsible for minting tokens + let elapsed = now.saturating_sub(last); + + // also add leftovers from previous conversion attempts. + // we do not care about who uses the spare_time_us, so relaxed is ok here. + let elapsed = + elapsed.saturating_add(self.credit_time_us.swap(0, Ordering::Relaxed)); + + let new_tokens_f64 = elapsed as f64 * self.new_tokens_per_us; + + // amount of full tokens to be minted + let new_tokens = new_tokens_f64.floor() as u64; + + let time_to_return = if new_tokens >= 1 { + // Credit tokens, saturating at max_tokens + let _ = self.tokens.fetch_update( + Ordering::AcqRel, // writer publishes new amount + Ordering::Acquire, //we fetch the correct amount + |tokens| Some(tokens.saturating_add(new_tokens).min(self.max_tokens)), + ); + // Fractional remainder of elapsed time (not enough to mint a whole token) + // that will be credited to other minters + (new_tokens_f64.fract() / self.new_tokens_per_us) as u64 + } else { + // No whole tokens minted → return whole interval + elapsed + }; + // Save unused elapsed time for other threads + self.credit_time_us + .fetch_add(time_to_return, Ordering::Relaxed); + } + Err(_) => { + // Another thread advanced last_update first → nothing we can do now. + } + } + } +} + +impl Clone for TokenBucket { + /// Clones the TokenBucket with approximate state + /// of the original. While this will never return an object in an + /// invalid state, using this in a contended environment is not recommended. + fn clone(&self) -> Self { + Self { + new_tokens_per_us: self.new_tokens_per_us, + max_tokens: self.max_tokens, + base_time: self.base_time, + tokens: AtomicU64::new(self.tokens.load(Ordering::Relaxed)), + last_update: AtomicU64::new(self.last_update.load(Ordering::Relaxed)), + credit_time_us: AtomicU64::new(self.credit_time_us.load(Ordering::Relaxed)), + } + } +} + +/// Provides rate limiting for multiple contexts at the same time +/// +/// This can use e.g. IP address as a Key. +/// Internally this is a [DashMap] of [TokenBucket] instances +/// that are created on demand using a prototype [TokenBucket] +/// to copy initial state from. +/// Uses LazyLru logic under the hood to keep the amount of items +/// under control. +pub struct KeyedRateLimiter +where + K: Hash + Eq, +{ + data: DashMap, + target_capacity: usize, + prototype_bucket: TokenBucket, + countdown_to_shrink: AtomicUsize, + approx_len: AtomicUsize, + shrink_interval: usize, +} + +impl KeyedRateLimiter +where + K: Hash + Eq, +{ + /// Creates a new KeyedRateLimiter with a specified taget capacity and shard amount for the + /// underlying DashMap. This uses a LazyLRU style eviction policy, so actual memory consumption + /// will be 2 * target_capacity. + /// + /// shard_amount should be greater than 0 and be a power of two. + /// If a shard_amount which is not a power of two is provided, the function will panic. + #[allow(clippy::arithmetic_side_effects)] + pub fn new(target_capacity: usize, prototype_bucket: TokenBucket, shard_amount: usize) -> Self { + let shrink_interval = target_capacity / 4; + Self { + data: DashMap::with_capacity_and_shard_amount(target_capacity * 2, shard_amount), + target_capacity, + prototype_bucket, + countdown_to_shrink: AtomicUsize::new(shrink_interval), + approx_len: AtomicUsize::new(0), + shrink_interval, + } + } + + /// Fetches amount of tokens available for key. + /// + /// Returns None if no bucket exists for the key provided + #[inline] + pub fn current_tokens(&self, key: impl Borrow) -> Option { + let bucket = self.data.get(key.borrow())?; + Some(bucket.current_tokens()) + } + + /// Consumes request_size tokens from a bucket at given key. + /// + /// On success, returns Ok(amount of tokens left in the bucket) + /// On failure, returns Err(amount of tokens missing to fill request) + /// If no bucket exists at key, a new bucket will be allocated, and normal policy will be applied to it + /// Outdated buckets may be evicted on an LRU basis. + pub fn consume_tokens(&self, key: K, request_size: u64) -> Result { + let (entry_added, res) = { + let bucket = self.data.entry(key); + match bucket { + Entry::Occupied(entry) => (false, entry.get().consume_tokens(request_size)), + Entry::Vacant(entry) => { + // if the key is not in the LRU, we need to allocate a new bucket + let bucket = self.prototype_bucket.clone(); + let res = bucket.consume_tokens(request_size); + entry.insert(bucket); + (true, res) + } + } + }; + + if entry_added { + if let Ok(count) = + self.countdown_to_shrink + .fetch_update(Ordering::Relaxed, Ordering::Relaxed, |v| { + if v == 0 { + // reset the countup to starting position + // thus preventing other threads from racing for locks + None + } else { + Some(v.saturating_sub(1)) + } + }) + { + if count == 1 { + // the last "previous" value we will see before counter reaches zero + self.maybe_shrink(); + self.countdown_to_shrink + .store(self.shrink_interval, Ordering::Relaxed); + } + } else { + self.approx_len.fetch_add(1, Ordering::Relaxed); + } + } + res + } + + /// Returns approximate amount of entries in the datastructure. + /// Should be within ~10% of the true amount. + #[inline] + pub fn len_approx(&self) -> usize { + self.approx_len.load(Ordering::Relaxed) + } + + // apply lazy-LRU eviction policy to each DashMap shard. + // Allowing side-effects here since overflows here are not + // actually possible + #[allow(clippy::arithmetic_side_effects)] + fn maybe_shrink(&self) { + let mut actual_len = 0; + let target_shard_size = self.target_capacity / self.data.shards().len(); + let mut entries = Vec::with_capacity(target_shard_size * 2); + for shardlock in self.data.shards() { + let mut shard = shardlock.write(); + + if shard.len() <= target_shard_size * 3 / 2 { + actual_len += shard.len(); + continue; + } + entries.clear(); + entries.extend( + shard.drain().map(|(key, value)| { + (key, value.get().last_update.load(Ordering::SeqCst), value) + }), + ); + + entries.select_nth_unstable_by_key(target_shard_size, |(_, last_update, _)| { + Reverse(*last_update) + }); + + shard.extend( + entries + .drain(..) + .take(target_shard_size) + .map(|(key, _last_update, value)| (key, value)), + ); + debug_assert!(shard.len() <= target_shard_size); + actual_len += shard.len(); + } + self.approx_len.store(actual_len, Ordering::Relaxed); + } + + /// Set the auto-shrink interval. Set to 0 to disable shrinking. + /// During writes we want to check for length, but not too often + /// to reduce probability of lock contention, so keeping this + /// large is good for perf (at cost of memory use) + pub fn set_shrink_interval(&mut self, interval: usize) { + self.shrink_interval = interval; + } + + /// Get the auto-shrink interval. + pub fn shrink_interval(&self) -> usize { + self.shrink_interval + } +} + +#[cfg(test)] +pub mod test { + use { + super::*, + solana_svm_type_overrides::thread, + std::{ + net::{IpAddr, Ipv4Addr}, + time::Duration, + }, + }; + + #[test] + fn test_token_bucket() { + let tb = TokenBucket::new(100, 100, 1000.0); + assert_eq!(tb.current_tokens(), 100); + tb.consume_tokens(50).expect("Bucket is initially full"); + tb.consume_tokens(50) + .expect("We should still have >50 tokens left"); + tb.consume_tokens(50) + .expect_err("There should not be enough tokens now"); + thread::sleep(Duration::from_millis(50)); + assert!( + tb.current_tokens() > 40, + "We should be refilling at ~1 token per millisecond" + ); + assert!( + tb.current_tokens() < 70, + "We should be refilling at ~1 token per millisecond" + ); + tb.consume_tokens(40) + .expect("Bucket should have enough for another request now"); + thread::sleep(Duration::from_millis(120)); + assert_eq!(tb.current_tokens(), 100, "Bucket should not overfill"); + } + #[test] + fn test_keyed_rate_limiter() { + let prototype_bucket = TokenBucket::new(100, 100, 1000.0); + let rl = KeyedRateLimiter::new(8, prototype_bucket, 2); + let ip1 = IpAddr::V4(Ipv4Addr::from_bits(1234)); + let ip2 = IpAddr::V4(Ipv4Addr::from_bits(4321)); + assert_eq!(rl.current_tokens(ip1), None, "Initially no buckets exist"); + rl.consume_tokens(ip1, 50) + .expect("Bucket is initially full"); + rl.consume_tokens(ip1, 50) + .expect("We should still have >50 tokens left"); + rl.consume_tokens(ip1, 50) + .expect_err("There should not be enough tokens now"); + rl.consume_tokens(ip2, 50) + .expect("Bucket is initially full"); + rl.consume_tokens(ip2, 50) + .expect("We should still have >50 tokens left"); + rl.consume_tokens(ip2, 50) + .expect_err("There should not be enough tokens now"); + std::thread::sleep(Duration::from_millis(50)); + assert!( + rl.current_tokens(ip1).unwrap() > 40, + "We should be refilling at ~1 token per millisecond" + ); + assert!( + rl.current_tokens(ip1).unwrap() < 70, + "We should be refilling at ~1 token per millisecond" + ); + rl.consume_tokens(ip1, 40) + .expect("Bucket should have enough for another request now"); + thread::sleep(Duration::from_millis(120)); + assert_eq!( + rl.current_tokens(ip1), + Some(100), + "Bucket should not overfill" + ); + assert_eq!( + rl.current_tokens(ip2), + Some(100), + "Bucket should not overfill" + ); + + rl.consume_tokens(ip2, 100).expect("Bucket should be full"); + // go several times over the capacity of the TB to make sure old record + // is erased no matter in which bucket it lands + for ip in 0..64 { + let ip = IpAddr::V4(Ipv4Addr::from_bits(ip)); + rl.consume_tokens(ip, 50).unwrap(); + } + assert_eq!( + rl.current_tokens(ip1), + None, + "Very old record should have been erased" + ); + rl.consume_tokens(ip2, 100) + .expect("New bucket should have been made for ip2"); + } + + #[cfg(feature = "shuttle-test")] + #[test] + fn shuttle_test_token_bucket_race() { + use shuttle::sync::atomic::AtomicBool; + shuttle::check_random( + || { + TIME_US.store(0, Ordering::SeqCst); + let test_duration_us = 2500; + let run: &AtomicBool = Box::leak(Box::new(AtomicBool::new(true))); + let tb: &TokenBucket = Box::leak(Box::new(TokenBucket::new(10, 20, 5000.0))); + + // time advancement thread + let time_advancer = thread::spawn(move || { + let mut current_time = 0; + while current_time < test_duration_us && run.load(Ordering::SeqCst) { + let increment = 100; // microseconds + current_time += increment; + TIME_US.store(current_time, Ordering::SeqCst); + shuttle::thread::yield_now(); + } + run.store(false, Ordering::SeqCst); + }); + + let threads: Vec<_> = (0..2) + .map(|_| { + thread::spawn(move || { + let mut total = 0; + while run.load(Ordering::SeqCst) { + if tb.consume_tokens(5).is_ok() { + total += 1; + } + shuttle::thread::yield_now(); + } + total + }) + }) + .collect(); + + time_advancer.join().unwrap(); + let received = threads.into_iter().map(|t| t.join().unwrap()).sum(); + + // Initial tokens: 10, refill rate: 5000 tokens/sec (5 tokens/ms) + // In 2ms: 10 + (5 * 2) = 20 tokens total + // Each consumption: 5 tokens → 4 total consumptions expected + assert_eq!(4, received); + }, + 100, + ); + } +} diff --git a/net/net.sh b/net/net.sh index a64b7b2d913c0c..bb5e49db7216da 100755 --- a/net/net.sh +++ b/net/net.sh @@ -134,12 +134,6 @@ Operate a configured testnet logs-specific options: none - netem-specific options: - --config - Netem configuration (as a double quoted string) - --parition - Percentage of network that should be configured with netem - --config-file - Configuration file for partition and netem configuration - --netem-cmd - Optional command argument to netem. Default is "add". Use "cleanup" to remove rules. - update-specific options: --platform linux|osx|windows - Deploy the tarball using 'agave-install deploy ...' for the given platform (multiple platforms may be specified) @@ -838,10 +832,6 @@ debugBuild=false profileBuild=false doBuild=true gpuMode=auto -netemPartition="" -netemConfig="" -netemConfigFile="" -netemCommand="add" clientDelayStart=0 netLogDir= maybeWarpSlot= @@ -934,18 +924,6 @@ while [[ -n $1 ]]; do elif [[ $1 = --profile ]]; then profileBuild=true shift 1 - elif [[ $1 = --partition ]]; then - netemPartition=$2 - shift 2 - elif [[ $1 = --config ]]; then - netemConfig=$2 - shift 2 - elif [[ $1 == --config-file ]]; then - netemConfigFile=$2 - shift 2 - elif [[ $1 == --netem-cmd ]]; then - netemCommand=$2 - shift 2 elif [[ $1 = --gpu-mode ]]; then gpuMode=$2 case "$gpuMode" in @@ -1223,40 +1201,6 @@ logs) fetchRemoteLog "$ipAddress" validator done ;; -netem) - if [[ -n $netemConfigFile ]]; then - remoteNetemConfigFile="$(basename "$netemConfigFile")" - if [[ $netemCommand = "add" ]]; then - for ipAddress in "${validatorIpList[@]}"; do - remoteHome=$(remoteHomeDir "$ipAddress") - remoteSolanaHome="${remoteHome}/solana" - "$here"/scp.sh "$netemConfigFile" solana@"$ipAddress":"$remoteSolanaHome" - done - fi - for i in "${!validatorIpList[@]}"; do - "$here"/ssh.sh solana@"${validatorIpList[$i]}" 'solana/scripts/net-shaper.sh' \ - "$netemCommand" ~solana/solana/"$remoteNetemConfigFile" "${#validatorIpList[@]}" "$i" - done - else - num_nodes=$((${#validatorIpList[@]}*netemPartition/100)) - if [[ $((${#validatorIpList[@]}*netemPartition%100)) -gt 0 ]]; then - num_nodes=$((num_nodes+1)) - fi - if [[ "$num_nodes" -gt "${#validatorIpList[@]}" ]]; then - num_nodes=${#validatorIpList[@]} - fi - - # Stop netem on all nodes - for ipAddress in "${validatorIpList[@]}"; do - "$here"/ssh.sh solana@"$ipAddress" 'solana/scripts/netem.sh delete < solana/netem.cfg || true' - done - - # Start netem on required nodes - for ((i=0; i solana/netem.cfg; solana/scripts/netem.sh add \"$netemConfig\"" - done - fi - ;; *) echo "Internal error: Unknown command: $command" usage diff --git a/net/remote/cleanup.sh b/net/remote/cleanup.sh index 8252c840a87cbc..8c4eae0ff58358 100755 --- a/net/remote/cleanup.sh +++ b/net/remote/cleanup.sh @@ -14,11 +14,6 @@ for pid in solana/*.pid; do $sudo kill -- -"$pgid" fi done -if [[ -f solana/netem.cfg ]]; then - solana/scripts/netem.sh delete < solana/netem.cfg - rm -f solana/netem.cfg -fi -solana/scripts/net-shaper.sh cleanup for pattern in validator.sh boostrap-leader.sh solana- remote- iftop validator client node; do echo "killing $pattern" pkill -f $pattern diff --git a/notifier/src/lib.rs b/notifier/src/lib.rs index 23a66e2d5badb3..41beede3bd2052 100644 --- a/notifier/src/lib.rs +++ b/notifier/src/lib.rs @@ -112,7 +112,7 @@ impl Default for Notifier { impl Notifier { pub fn new(env_prefix: &str) -> Self { - info!("Initializing {}Notifier", env_prefix); + info!("Initializing {env_prefix}Notifier"); let mut notifiers = vec![]; @@ -143,10 +143,9 @@ impl Notifier { if let Ok(log_level) = env::var(format!("{env_prefix}LOG_NOTIFIER_LEVEL")) { match Level::from_str(&log_level) { Ok(level) => notifiers.push(NotificationChannel::Log(level)), - Err(e) => warn!( - "could not parse specified log notifier level string ({}): {}", - log_level, e - ), + Err(e) => { + warn!("could not parse specified log notifier level string ({log_level}): {e}") + } } } @@ -170,14 +169,14 @@ impl Notifier { // Discord rate limiting is aggressive, limit to 1 message a second sleep(Duration::from_millis(1000)); - info!("Sending {}", line); + info!("Sending {line}"); let data = json!({ "content": line }); loop { let response = self.client.post(webhook).json(&data).send(); if let Err(err) = response { - warn!("Failed to send Discord message: \"{}\": {:?}", line, err); + warn!("Failed to send Discord message: \"{line}\": {err:?}"); break; } else if let Ok(response) = response { info!("response status: {}", response.status()); @@ -195,7 +194,7 @@ impl Notifier { NotificationChannel::Slack(webhook) => { let data = json!({ "text": msg }); if let Err(err) = self.client.post(webhook).json(&data).send() { - warn!("Failed to send Slack message: {:?}", err); + warn!("Failed to send Slack message: {err:?}"); } } NotificationChannel::PagerDuty(routing_key) => { @@ -212,7 +211,7 @@ impl Notifier { let url = "https://events.pagerduty.com/v2/enqueue"; if let Err(err) = self.client.post(url).json(&data).send() { - warn!("Failed to send PagerDuty alert: {:?}", err); + warn!("Failed to send PagerDuty alert: {err:?}"); } } @@ -221,7 +220,7 @@ impl Notifier { let url = format!("https://api.telegram.org/bot{bot_token}/sendMessage"); if let Err(err) = self.client.post(url).json(&data).send() { - warn!("Failed to send Telegram message: {:?}", err); + warn!("Failed to send Telegram message: {err:?}"); } } @@ -236,11 +235,11 @@ impl Notifier { ); let params = [("To", to), ("From", from), ("Body", &msg.to_string())]; if let Err(err) = self.client.post(url).form(¶ms).send() { - warn!("Failed to send Twilio message: {:?}", err); + warn!("Failed to send Twilio message: {err:?}"); } } NotificationChannel::Log(level) => { - log!(*level, "{}", msg) + log!(*level, "{msg}") } } } diff --git a/perf/benches/sigverify.rs b/perf/benches/sigverify.rs index fb9491f3460bb3..66e3a9a985acc3 100644 --- a/perf/benches/sigverify.rs +++ b/perf/benches/sigverify.rs @@ -149,7 +149,7 @@ fn bench_sigverify_uneven(b: &mut Bencher) { } batches.push(PacketBatch::from(batch)); } - info!("num_packets: {} valid: {}", num_packets, num_valid); + info!("num_packets: {num_packets} valid: {num_valid}"); let recycler = Recycler::default(); let recycler_out = Recycler::default(); diff --git a/perf/src/sigverify.rs b/perf/src/sigverify.rs index 80a7f7e80dc862..118a26f6802ecb 100644 --- a/perf/src/sigverify.rs +++ b/perf/src/sigverify.rs @@ -1387,7 +1387,7 @@ mod tests { solana_logger::setup(); let mut rng = rand::thread_rng(); - // tansfer tx is not + // transfer tx is not { let mut tx = test_tx(); tx.message.instructions[0].data = vec![1, 2, 3]; diff --git a/perf/src/thread.rs b/perf/src/thread.rs index 7a101390ee5024..db3c08c4d44681 100644 --- a/perf/src/thread.rs +++ b/perf/src/thread.rs @@ -58,7 +58,7 @@ pub fn is_renice_allowed(adjustment: i8) -> bool { } else { nix::unistd::geteuid().is_root() || caps::has_cap(None, CapSet::Effective, Capability::CAP_SYS_NICE) - .map_err(|err| warn!("Failed to get thread's capabilities: {}", err)) + .map_err(|err| warn!("Failed to get thread's capabilities: {err}")) .unwrap_or(false) } } diff --git a/platform-tools-sdk/cargo-build-sbf/Cargo.toml b/platform-tools-sdk/cargo-build-sbf/Cargo.toml index 44fe1b1cecc153..11811a7c14d3a7 100644 --- a/platform-tools-sdk/cargo-build-sbf/Cargo.toml +++ b/platform-tools-sdk/cargo-build-sbf/Cargo.toml @@ -23,11 +23,11 @@ clap = { version = "3.1.5", features = ["cargo", "env"] } itertools = { workspace = true } log = { workspace = true, features = ["std"] } regex = { workspace = true } -reqwest = { workspace = true, features = ["blocking", "rustls-tls"] } +reqwest = { workspace = true, features = ["blocking", "rustls-tls", "rustls-tls-native-roots" ] } semver = { workspace = true } -solana-file-download = "=2.2.2" -solana-keypair = "=2.2.1" -solana-logger = "=2.3.1" +solana-file-download = "=3.0.0" +solana-keypair = "=3.0.1" +solana-logger = "=3.0.0" tar = { workspace = true } [dev-dependencies] diff --git a/platform-tools-sdk/cargo-build-sbf/src/main.rs b/platform-tools-sdk/cargo-build-sbf/src/main.rs index 7e67d6841eea06..ae96a5d52cad77 100644 --- a/platform-tools-sdk/cargo-build-sbf/src/main.rs +++ b/platform-tools-sdk/cargo-build-sbf/src/main.rs @@ -7,9 +7,9 @@ use { post_processing::post_process, toolchain::{ corrupted_toolchain, generate_toolchain_name, get_base_rust_version, install_tools, - DEFAULT_PLATFORM_TOOLS_VERSION, + rust_target_triple, DEFAULT_PLATFORM_TOOLS_VERSION, }, - utils::{rust_target_triple, spawn}, + utils::spawn, }, cargo_metadata::camino::Utf8PathBuf, clap::{crate_description, crate_name, crate_version, Arg}, @@ -86,7 +86,7 @@ impl Default for Config<'_> { } pub fn is_version_string(arg: &str) -> Result<(), String> { - let semver_re = Regex::new(r"^v?[0-9]+\.[0-9]+(\.[0-9]+)?").unwrap(); + let semver_re = Regex::new(r"^v?[0-9]+\.[0-9]+(\.[0-9]+)?$").unwrap(); if semver_re.is_match(arg) { return Ok(()); } @@ -120,22 +120,6 @@ fn home_dir() -> PathBuf { ) } -fn semver_version(version: &str) -> String { - let starts_with_v = version.starts_with('v'); - let dots = version.as_bytes().iter().fold( - 0, - |n: u32, c| if *c == b'.' { n.saturating_add(1) } else { n }, - ); - match (dots, starts_with_v) { - (0, false) => format!("{version}.0.0"), - (0, true) => format!("{}.0.0", &version[1..]), - (1, false) => format!("{version}.0"), - (1, true) => format!("{}.0", &version[1..]), - (_, false) => version.to_string(), - (_, true) => version[1..].to_string(), - } -} - fn prepare_environment( config: &Config, package: Option<&cargo_metadata::Package>, @@ -639,3 +623,50 @@ fn main() { } build_solana(config, manifest_path); } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_is_version_string_valid_versions() { + // Test valid versions that should pass validation + assert!(is_version_string("1.2.3").is_ok()); + assert!(is_version_string("v2.1.0").is_ok()); + assert!(is_version_string("1.32").is_ok()); + assert!(is_version_string("v1.32").is_ok()); + assert!(is_version_string("0.1").is_ok()); + assert!(is_version_string("v0.1").is_ok()); + assert!(is_version_string("10.20.30").is_ok()); + assert!(is_version_string("v10.20.30").is_ok()); + } + + #[test] + fn test_is_version_string_invalid_versions() { + // Test invalid versions that should fail validation + assert!(is_version_string("1.2.3abc").is_err()); + assert!(is_version_string("v2.1.0-extra").is_err()); + assert!(is_version_string("abc1.2.3").is_err()); + assert!(is_version_string("1").is_err()); + assert!(is_version_string("v1").is_err()); + assert!(is_version_string("1.2.3.4.5").is_err()); + assert!(is_version_string("").is_err()); + assert!(is_version_string("v").is_err()); + assert!(is_version_string("1.").is_err()); + assert!(is_version_string("v1.").is_err()); + assert!(is_version_string(".1.2").is_err()); + assert!(is_version_string("1.2.3-beta").is_err()); + assert!(is_version_string("v1.2.3+build").is_err()); + } + + #[test] + fn test_is_version_string_error_message() { + // Test that error message is descriptive + let result = is_version_string("invalid"); + assert!(result.is_err()); + let error_msg = result.unwrap_err(); + assert!(error_msg.contains("version string may start with 'v'")); + assert!(error_msg.contains("major and minor version numbers")); + assert!(error_msg.contains("separated by a dot")); + } +} diff --git a/platform-tools-sdk/cargo-build-sbf/src/post_processing.rs b/platform-tools-sdk/cargo-build-sbf/src/post_processing.rs index 170d808805dd84..0c5933f42a492a 100644 --- a/platform-tools-sdk/cargo-build-sbf/src/post_processing.rs +++ b/platform-tools-sdk/cargo-build-sbf/src/post_processing.rs @@ -1,5 +1,5 @@ use { - crate::{spawn, utils::rust_target_triple, Config}, + crate::{spawn, toolchain::rust_target_triple, Config}, log::{debug, error, info, warn}, regex::Regex, solana_keypair::{write_keypair_file, Keypair}, diff --git a/platform-tools-sdk/cargo-build-sbf/src/toolchain.rs b/platform-tools-sdk/cargo-build-sbf/src/toolchain.rs index db9e8a4da4c347..9135485bac8dcc 100644 --- a/platform-tools-sdk/cargo-build-sbf/src/toolchain.rs +++ b/platform-tools-sdk/cargo-build-sbf/src/toolchain.rs @@ -1,9 +1,5 @@ use { - crate::{ - home_dir, semver_version, - utils::{rust_target_triple, spawn}, - Config, - }, + crate::{home_dir, utils::spawn, Config}, bzip2::bufread::BzDecoder, log::{debug, error, warn}, regex::Regex, @@ -18,7 +14,7 @@ use { tar::Archive, }; -pub(crate) const DEFAULT_PLATFORM_TOOLS_VERSION: &str = "v1.50"; +pub(crate) const DEFAULT_PLATFORM_TOOLS_VERSION: &str = "v1.51"; pub(crate) const DEFAULT_RUST_VERSION: &str = "1.84.1"; fn find_installed_platform_tools() -> Vec { @@ -58,6 +54,22 @@ fn downloadable_version(version: &str) -> String { } } +fn semver_version(version: &str) -> String { + let starts_with_v = version.starts_with('v'); + let dots = version.as_bytes().iter().fold( + 0, + |n: u32, c| if *c == b'.' { n.saturating_add(1) } else { n }, + ); + match (dots, starts_with_v) { + (0, false) => format!("{version}.0.0"), + (0, true) => format!("{}.0.0", &version[1..]), + (1, false) => format!("{version}.0"), + (1, true) => format!("{}.0", &version[1..]), + (_, false) => version.to_string(), + (_, true) => version[1..].to_string(), + } +} + fn validate_platform_tools_version(requested_version: &str, builtin_version: &str) -> String { // Early return here in case it's the first time we're running `cargo build-sbf` // and we need to create the cache folders @@ -428,3 +440,21 @@ fn check_solana_target_installed(target: &str) { exit(1); } } + +pub(crate) fn rust_target_triple(config: &Config) -> String { + let tools_version = semver::Version::parse(&semver_version( + config + .platform_tools_version + .unwrap_or(DEFAULT_PLATFORM_TOOLS_VERSION), + )) + .unwrap(); + let sbpf_minimum_version = semver::Version::parse(&semver_version("v1.44")).unwrap(); + + if config.arch == "v0" && tools_version < sbpf_minimum_version { + "sbf-solana-solana".to_string() + } else if config.arch == "v0" { + "sbpf-solana-solana".to_string() + } else { + format!("sbpf{}-solana-solana", config.arch) + } +} diff --git a/platform-tools-sdk/cargo-build-sbf/src/utils.rs b/platform-tools-sdk/cargo-build-sbf/src/utils.rs index 598e5bf31f17e3..b09a0b6739b57b 100644 --- a/platform-tools-sdk/cargo-build-sbf/src/utils.rs +++ b/platform-tools-sdk/cargo-build-sbf/src/utils.rs @@ -1,5 +1,4 @@ use { - crate::Config, itertools::Itertools, log::{error, info}, std::{ @@ -61,11 +60,3 @@ where .map(|&c| c as char) .collect::() } - -pub(crate) fn rust_target_triple(config: &Config) -> String { - if config.arch == "v0" { - "sbpf-solana-solana".to_string() - } else { - format!("sbpf{}-solana-solana", config.arch) - } -} diff --git a/platform-tools-sdk/cargo-build-sbf/tests/crates.rs b/platform-tools-sdk/cargo-build-sbf/tests/crates.rs index 2c0ae686ac4fed..46a0be6af790ef 100644 --- a/platform-tools-sdk/cargo-build-sbf/tests/crates.rs +++ b/platform-tools-sdk/cargo-build-sbf/tests/crates.rs @@ -1,18 +1,56 @@ use { assert_cmd::assert::Assert, predicates::prelude::*, - std::{ - env, fs, - path::PathBuf, - str::FromStr, - sync::atomic::{AtomicBool, Ordering}, - }, + std::{env, fs, path::PathBuf, str::FromStr}, }; #[macro_use] extern crate serial_test; -static SBF_TOOLS_INSTALL: AtomicBool = AtomicBool::new(true); +fn should_install_tools() -> bool { + let tools_path = env::var("HOME").unwrap(); + let toolchain_path = PathBuf::from(tools_path) + .join(".cache") + .join("solana") + .join("v1.50") + .join("platform-tools"); + + let rust_path = toolchain_path.join("rust"); + let llvm_path = toolchain_path.join("llvm"); + let binaries = rust_path.join("bin"); + + let rustc = binaries.join(if cfg!(windows) { "rustc.exe" } else { "rustc" }); + let cargo = binaries.join(if cfg!(windows) { "cargo.exe" } else { "cargo" }); + + if !toolchain_path.try_exists().unwrap_or(false) + || !rust_path.try_exists().unwrap_or(false) + || !llvm_path.try_exists().unwrap_or(false) + || !binaries.try_exists().unwrap_or(false) + || !rustc.try_exists().unwrap_or(false) + || !cargo.try_exists().unwrap_or(false) + { + return true; + } + + let Ok(folder_metadata) = fs::metadata(rust_path) else { + return true; + }; + let Ok(creation_time) = folder_metadata.created() else { + return true; + }; + + let now = std::time::SystemTime::now(); + let Ok(elapsed_time) = now.duration_since(creation_time) else { + return true; + }; + + if elapsed_time.as_secs() > 300 { + return true; + } + + false +} + fn run_cargo_build(crate_name: &str, extra_args: &[&str], fail: bool) { let cwd = env::current_dir().expect("Unable to get current working directory"); let toml = cwd @@ -22,7 +60,7 @@ fn run_cargo_build(crate_name: &str, extra_args: &[&str], fail: bool) { .join("Cargo.toml"); let toml = format!("{}", toml.display()); let mut args = vec!["-v", "--sbf-sdk", "../sbf", "--manifest-path", &toml]; - if SBF_TOOLS_INSTALL.fetch_and(false, Ordering::SeqCst) { + if should_install_tools() { args.push("--force-tools-install"); } for arg in extra_args { diff --git a/platform-tools-sdk/cargo-build-sbf/tests/crates/fail/Cargo.toml b/platform-tools-sdk/cargo-build-sbf/tests/crates/fail/Cargo.toml index a038f50da83b19..3f8d630c17a98b 100644 --- a/platform-tools-sdk/cargo-build-sbf/tests/crates/fail/Cargo.toml +++ b/platform-tools-sdk/cargo-build-sbf/tests/crates/fail/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "fail" -version = "3.0.0" +version = "3.1.0" description = "Solana SBF test program written in Rust" authors = ["Anza Maintainers "] repository = "https://github.com/anza-xyz/agave" diff --git a/platform-tools-sdk/cargo-build-sbf/tests/crates/noop/Cargo.toml b/platform-tools-sdk/cargo-build-sbf/tests/crates/noop/Cargo.toml index 9e05e35e6af7d8..3c25e285b5d977 100644 --- a/platform-tools-sdk/cargo-build-sbf/tests/crates/noop/Cargo.toml +++ b/platform-tools-sdk/cargo-build-sbf/tests/crates/noop/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "noop" -version = "3.0.0" +version = "3.1.0" description = "Solana SBF test program written in Rust" authors = ["Anza Maintainers "] repository = "https://github.com/anza-xyz/agave" diff --git a/platform-tools-sdk/cargo-build-sbf/tests/crates/package-metadata/Cargo.toml b/platform-tools-sdk/cargo-build-sbf/tests/crates/package-metadata/Cargo.toml index 0828afd359038e..383c5e11e416bb 100644 --- a/platform-tools-sdk/cargo-build-sbf/tests/crates/package-metadata/Cargo.toml +++ b/platform-tools-sdk/cargo-build-sbf/tests/crates/package-metadata/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "package-metadata" -version = "3.0.0" +version = "3.1.0" description = "Solana SBF test program with tools version in package metadata" authors = ["Anza Maintainers "] repository = "https://github.com/anza-xyz/agave" @@ -10,7 +10,7 @@ edition = "2021" publish = false [package.metadata.solana] -tools-version = "v1.50" +tools-version = "v1.51" program-id = "MyProgram1111111111111111111111111111111111" [dependencies] diff --git a/platform-tools-sdk/cargo-build-sbf/tests/crates/workspace-metadata/Cargo.toml b/platform-tools-sdk/cargo-build-sbf/tests/crates/workspace-metadata/Cargo.toml index 538d9c11f040e6..8bc6af67b2783d 100644 --- a/platform-tools-sdk/cargo-build-sbf/tests/crates/workspace-metadata/Cargo.toml +++ b/platform-tools-sdk/cargo-build-sbf/tests/crates/workspace-metadata/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "workspace-metadata" -version = "3.0.0" +version = "3.1.0" description = "Solana SBF test program with tools version in workspace metadata" authors = ["Anza Maintainers "] repository = "https://github.com/anza-xyz/agave" @@ -27,4 +27,4 @@ check-cfg = [ [workspace] [workspace.metadata.solana] -tools-version = "v1.50" +tools-version = "v1.51" diff --git a/platform-tools-sdk/cargo-test-sbf/Cargo.toml b/platform-tools-sdk/cargo-test-sbf/Cargo.toml index fa0fb2fe2cbc7c..cf1583c9066027 100644 --- a/platform-tools-sdk/cargo-test-sbf/Cargo.toml +++ b/platform-tools-sdk/cargo-test-sbf/Cargo.toml @@ -19,4 +19,4 @@ clap = { version = "3.1.5", features = ["cargo"] } itertools = { workspace = true } log = { workspace = true, features = ["std"] } regex = { workspace = true } -solana-logger = "=2.3.1" +solana-logger = "=3.0.0" diff --git a/platform-tools-sdk/sbf/scripts/install.sh b/platform-tools-sdk/sbf/scripts/install.sh index 8f6df516cbedff..d7386b070a558c 100755 --- a/platform-tools-sdk/sbf/scripts/install.sh +++ b/platform-tools-sdk/sbf/scripts/install.sh @@ -109,7 +109,7 @@ if [[ ! -e criterion-$version.md || ! -e criterion ]]; then fi # Install platform tools -tools_version=v1.50 +tools_version=v1.51 rust_version=1.84.1 if [[ ! -e platform-tools-$tools_version.md || ! -e platform-tools ]]; then ( diff --git a/poh/benches/poh.rs b/poh/benches/poh.rs index 93f21c2bf0ee56..10fac75b75e9c8 100644 --- a/poh/benches/poh.rs +++ b/poh/benches/poh.rs @@ -125,6 +125,7 @@ fn bench_poh_recorder_record_transaction_index(bencher: &mut Bencher) { vec![test::black_box(txs.clone())], ) .unwrap() + .starting_transaction_index .unwrap(); }); poh_recorder.tick(); diff --git a/poh/benches/transaction_recorder.rs b/poh/benches/transaction_recorder.rs index 33e3ff876978b3..098392ce8af723 100644 --- a/poh/benches/transaction_recorder.rs +++ b/poh/benches/transaction_recorder.rs @@ -7,13 +7,14 @@ use { get_tmp_ledger_path_auto_delete, leader_schedule_cache::LeaderScheduleCache, }, solana_poh::{ + poh_controller::PohController, poh_recorder::PohRecorder, poh_service::{PohService, DEFAULT_HASHES_PER_BATCH, DEFAULT_PINNED_CPU_CORE}, transaction_recorder::TransactionRecorder, }, solana_poh_config::PohConfig, solana_pubkey::Pubkey, - solana_runtime::{bank::Bank, installed_scheduler_pool::BankWithScheduler}, + solana_runtime::bank::Bank, solana_transaction::versioned::VersionedTransaction, std::{ sync::{atomic::AtomicBool, Arc, RwLock}, @@ -57,7 +58,7 @@ fn bench_record_transactions(c: &mut Criterion) { &genesis_config_info.genesis_config.poh_config, exit.clone(), ); - poh_recorder.set_bank(BankWithScheduler::new_without_scheduler(bank.clone())); + poh_recorder.set_bank_for_test(bank.clone()); let (record_sender, record_receiver) = crossbeam_channel::unbounded(); let transaction_recorder = TransactionRecorder::new(record_sender, exit.clone()); @@ -74,6 +75,7 @@ fn bench_record_transactions(c: &mut Criterion) { .collect(); let poh_recorder = Arc::new(RwLock::new(poh_recorder)); + let (_poh_controller, poh_service_message_receiver) = PohController::new(); let poh_service = PohService::new( poh_recorder.clone(), &genesis_config_info.genesis_config.poh_config, @@ -82,6 +84,7 @@ fn bench_record_transactions(c: &mut Criterion) { DEFAULT_PINNED_CPU_CORE, DEFAULT_HASHES_PER_BATCH, record_receiver, + poh_service_message_receiver, ); let mut group = c.benchmark_group("record_transactions"); @@ -103,7 +106,7 @@ fn bench_record_transactions(c: &mut Criterion) { poh_recorder .write() .unwrap() - .set_bank(BankWithScheduler::new_without_scheduler(bank.clone())); + .set_bank_for_test(bank.clone()); let start = Instant::now(); for txs in tx_batches { diff --git a/poh/src/lib.rs b/poh/src/lib.rs index 0180b04bc835a5..103e945cd3ac48 100644 --- a/poh/src/lib.rs +++ b/poh/src/lib.rs @@ -1,4 +1,5 @@ #![allow(clippy::arithmetic_side_effects)] +pub mod poh_controller; pub mod poh_recorder; pub mod poh_service; pub mod transaction_recorder; diff --git a/poh/src/poh_controller.rs b/poh/src/poh_controller.rs new file mode 100644 index 00000000000000..eafcddf5a08d06 --- /dev/null +++ b/poh/src/poh_controller.rs @@ -0,0 +1,151 @@ +use { + crossbeam_channel::{Receiver, SendError, Sender, TryRecvError}, + solana_clock::Slot, + solana_runtime::{bank::Bank, installed_scheduler_pool::BankWithScheduler}, + std::sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, +}; + +pub enum PohServiceMessage { + Reset { + reset_bank: Arc, + next_leader_slot: Option<(Slot, Slot)>, + }, + SetBank { + bank: BankWithScheduler, + }, +} + +/// Handle to control the Bank/slot that the PoH service is operating on. +pub struct PohController { + sender: Sender, + /// Used to indicate if there are any pending messages in the channel + /// OR that the receiver is currently processing. + /// This is necessary because crossbeam does not support peeking the + /// channel. + pending_message: Arc, +} + +impl PohController { + pub fn new() -> (Self, PohServiceMessageReceiver) { + const CHANNEL_SIZE: usize = 16; // small size, we should never hit this. + let (sender, receiver) = crossbeam_channel::bounded(CHANNEL_SIZE); + let pending_message = Arc::new(AtomicUsize::new(0)); + let receiver = PohServiceMessageReceiver { + receiver, + pending_message: pending_message.clone(), + }; + ( + Self { + sender, + pending_message, + }, + receiver, + ) + } + + pub fn has_pending_message(&self) -> bool { + self.pending_message.load(Ordering::Acquire) > 0 + } + + /// Signal to PoH to use a new bank. + pub fn set_bank_sync( + &mut self, + bank: BankWithScheduler, + ) -> Result<(), SendError> { + self.send_and_wait_on_pending_message(PohServiceMessage::SetBank { bank }) + } + + pub fn set_bank( + &mut self, + bank: BankWithScheduler, + ) -> Result<(), SendError> { + self.send_message(PohServiceMessage::SetBank { bank }) + } + + /// Signal to reset PoH to specified bank. + pub fn reset_sync( + &mut self, + reset_bank: Arc, + next_leader_slot: Option<(Slot, Slot)>, + ) -> Result<(), SendError> { + self.send_and_wait_on_pending_message(PohServiceMessage::Reset { + reset_bank, + next_leader_slot, + }) + } + + pub fn reset( + &mut self, + reset_bank: Arc, + next_leader_slot: Option<(Slot, Slot)>, + ) -> Result<(), SendError> { + self.send_message(PohServiceMessage::Reset { + reset_bank, + next_leader_slot, + }) + } + + fn send_and_wait_on_pending_message( + &self, + message: PohServiceMessage, + ) -> Result<(), SendError> { + self.send_message(message)?; + while self.has_pending_message() { + core::hint::spin_loop(); + } + Ok(()) + } + + fn send_message(&self, message: PohServiceMessage) -> Result<(), SendError> { + self.pending_message.fetch_add(1, Ordering::AcqRel); + self.sender.send(message)?; + Ok(()) + } +} + +pub struct PohServiceMessageReceiver { + receiver: Receiver, + /// Used to indicate if there are any pending messages in the channel + /// OR that the receiver is currently processing. + /// This is necessary because crossbeam does not support peeking the + /// channel. + pending_message: Arc, +} + +impl PohServiceMessageReceiver { + pub(crate) fn try_recv(&self) -> Result { + self.receiver + .try_recv() + .map(|message| PohServiceMessageGuard { + message_receiver: self, + message: Some(message), + }) + } +} + +pub(crate) struct PohServiceMessageGuard<'a> { + message_receiver: &'a PohServiceMessageReceiver, + message: Option, +} + +impl PohServiceMessageGuard<'_> { + pub(crate) fn take(&mut self) -> PohServiceMessage { + self.message.take().unwrap() + } +} + +impl Drop for PohServiceMessageGuard<'_> { + fn drop(&mut self) { + // If the message was taken (processed), decrement the pending count. + if self.message.is_none() { + self.message_receiver + .pending_message + .fetch_sub(1, Ordering::AcqRel); + } else { + panic!("PohServiceMessageGuard dropped without processing the message"); + } + } +} diff --git a/poh/src/poh_recorder.rs b/poh/src/poh_recorder.rs index 092a23c72bded5..9b990b7ae857f9 100644 --- a/poh/src/poh_recorder.rs +++ b/poh/src/poh_recorder.rs @@ -13,7 +13,10 @@ #[cfg(feature = "dev-context-only-utils")] use qualifier_attr::qualifiers; use { - crate::{poh_service::PohService, transaction_recorder::TransactionRecorder}, + crate::{ + poh_controller::PohController, poh_service::PohService, + transaction_recorder::TransactionRecorder, + }, arc_swap::ArcSwapOption, crossbeam_channel::{unbounded, Receiver, SendError, Sender, TrySendError}, log::*, @@ -31,7 +34,10 @@ use { solana_transaction::versioned::VersionedTransaction, std::{ cmp, - sync::{atomic::AtomicBool, Arc, Mutex, RwLock}, + sync::{ + atomic::{AtomicBool, AtomicU64, Ordering}, + Arc, Mutex, RwLock, + }, time::Instant, }, thiserror::Error, @@ -56,25 +62,16 @@ pub(crate) type Result = std::result::Result; pub type WorkingBankEntry = (Arc, (Entry, u64)); -#[derive(Debug, Clone)] -pub struct BankStart { - pub working_bank: Arc, - pub bank_creation_time: Arc, -} - -impl BankStart { - pub fn should_working_bank_still_be_processing_txs(&self) -> bool { - Bank::should_bank_still_be_processing_txs( - &self.bank_creation_time, - self.working_bank.ns_per_slot, - ) - } -} - // Sends the Result of the record operation, including the index in the slot of the first // transaction, if being tracked by WorkingBank type RecordResultSender = Sender>>; +#[derive(Debug)] +pub struct RecordSummary { + pub remaining_hashes_in_slot: u64, + pub starting_transaction_index: Option, +} + pub struct Record { pub mixins: Vec, pub transaction_batches: Vec>, @@ -169,7 +166,7 @@ impl PohRecorderMetrics { pub struct PohRecorder { pub(crate) poh: Arc>, - tick_height: u64, + tick_height: SharedTickHeight, clear_bank_signal: Option>, start_bank: Arc, // parent slot start_bank_active_descendants: Vec, @@ -184,7 +181,7 @@ pub struct PohRecorder { /// the `working_bank` field of this struct. shared_working_bank: SharedWorkingBank, working_bank_sender: Sender, - leader_first_tick_height: Option, + leader_first_tick_height: SharedLeaderFirstTickHeight, leader_last_tick_height: u64, // zero if none grace_ticks: u64, blockstore: Arc, @@ -199,6 +196,9 @@ pub struct PohRecorder { // Allocation to hold PohEntrys recorded into PoHStream. entries: Vec, track_transaction_indexes: bool, + + // Alpenglow related migration things + pub is_alpenglow_enabled: bool, } impl PohRecorder { @@ -263,7 +263,7 @@ impl PohRecorder { ( Self { poh, - tick_height, + tick_height: SharedTickHeight::new(tick_height), tick_cache: vec![], working_bank: None, shared_working_bank: SharedWorkingBank::empty(), @@ -272,7 +272,9 @@ impl PohRecorder { start_bank, start_bank_active_descendants: vec![], start_tick_height: tick_height + 1, - leader_first_tick_height, + leader_first_tick_height: SharedLeaderFirstTickHeight::new( + leader_first_tick_height, + ), leader_last_tick_height, grace_ticks, blockstore, @@ -285,6 +287,7 @@ impl PohRecorder { is_exited, entries: Vec::with_capacity(64), track_transaction_indexes: false, + is_alpenglow_enabled: false, }, working_bank_receiver, ) @@ -295,14 +298,15 @@ impl PohRecorder { } // synchronize PoH with a bank - pub fn reset(&mut self, reset_bank: Arc, next_leader_slot: Option<(Slot, Slot)>) { + pub(crate) fn reset(&mut self, reset_bank: Arc, next_leader_slot: Option<(Slot, Slot)>) { self.clear_bank(); self.reset_poh(reset_bank, true); let (leader_first_tick_height, leader_last_tick_height, grace_ticks) = Self::compute_leader_slot_tick_heights(next_leader_slot, self.ticks_per_slot); self.grace_ticks = grace_ticks; - self.leader_first_tick_height = leader_first_tick_height; + self.leader_first_tick_height + .store(leader_first_tick_height); self.leader_last_tick_height = leader_last_tick_height; } @@ -313,7 +317,7 @@ impl PohRecorder { bank_slot: Slot, mixins: Vec, transaction_batches: Vec>, - ) -> Result> { + ) -> Result { // Entries without transactions are used to track real-time passing in the ledger and // cannot be generated by `record()` assert!( @@ -333,6 +337,7 @@ impl PohRecorder { self.metrics.flush_cache_no_tick_us += flush_cache_us; flush_cache_res?; + let tick_height = self.tick_height(); // cannot change until next loop iteration. let working_bank = self .working_bank .as_mut() @@ -347,6 +352,8 @@ impl PohRecorder { let (mixed_in, record_mixin_us) = measure_us!(poh_lock.record_batches(&mixins, &mut self.entries)); self.metrics.record_us += record_mixin_us; + let remaining_hashes_in_slot = + poh_lock.remaining_hashes_in_slot(working_bank.bank.ticks_per_slot()); drop(poh_lock); @@ -366,7 +373,7 @@ impl PohRecorder { hash: entry.hash, transactions, }, - self.tick_height, // `record_batches` guarantees that mixins are **not** split across ticks. + tick_height, // `record_batches` guarantees that mixins are **not** split across ticks. ), ))); self.metrics.send_entry_us += send_batches_us; @@ -379,7 +386,10 @@ impl PohRecorder { transaction_index.saturating_add(num_transactions); working_bank.transaction_index = Some(next_starting_transaction_index); }); - return Ok(starting_transaction_index); + return Ok(RecordSummary { + remaining_hashes_in_slot, + starting_transaction_index, + }); } // record() might fail if the next PoH hash needs to be a tick. But that's ok, tick() @@ -404,10 +414,10 @@ impl PohRecorder { self.metrics.tick_lock_contention_us += tick_lock_contention_us; if let Some(poh_entry) = poh_entry { - self.tick_height += 1; - trace!("tick_height {}", self.tick_height); + self.tick_height.increment(); + trace!("tick_height {}", self.tick_height()); - if self.leader_first_tick_height.is_none() { + if self.leader_first_tick_height.load().is_none() { return; } @@ -417,7 +427,7 @@ impl PohRecorder { hash: poh_entry.hash, transactions: vec![], }, - self.tick_height, + self.tick_height(), )); let (_flush_res, flush_cache_and_tick_us) = measure_us!(self.flush_cache(true)); @@ -436,7 +446,7 @@ impl PohRecorder { } } - pub fn set_bank(&mut self, bank: BankWithScheduler) { + pub(crate) fn set_bank(&mut self, bank: BankWithScheduler) { assert!(self.working_bank.is_none()); let working_bank = WorkingBank { min_tick_height: bank.tick_height(), @@ -486,7 +496,8 @@ impl PohRecorder { let (leader_first_tick_height, leader_last_tick_height, grace_ticks) = Self::compute_leader_slot_tick_heights(next_leader_slot, self.ticks_per_slot); self.grace_ticks = grace_ticks; - self.leader_first_tick_height = leader_first_tick_height; + self.leader_first_tick_height + .store(leader_first_tick_height); self.leader_last_tick_height = leader_last_tick_height; datapoint_info!( @@ -519,7 +530,7 @@ impl PohRecorder { info!( "reset poh from: {},{},{} to: {},{}", poh_hash, - self.tick_height, + self.tick_height(), self.start_slot(), blockhash, reset_bank.slot() @@ -530,8 +541,9 @@ impl PohRecorder { self.start_bank = reset_bank; self.start_bank_active_descendants = vec![]; } - self.tick_height = (self.start_slot() + 1) * self.ticks_per_slot; - self.start_tick_height = self.tick_height + 1; + self.tick_height + .store((self.start_slot() + 1) * self.ticks_per_slot); + self.start_tick_height = self.tick_height() + 1; } // Flush cache will delay flushing the cache for a bank until it past the WorkingBank::min_tick_height @@ -545,10 +557,10 @@ impl PohRecorder { .working_bank .as_ref() .ok_or(PohRecorderError::MaxHeightReached)?; - if self.tick_height < working_bank.min_tick_height { + if self.tick_height() < working_bank.min_tick_height { return Err(PohRecorderError::MinHeightNotReached); } - if tick && self.tick_height == working_bank.min_tick_height { + if tick && self.tick_height() == working_bank.min_tick_height { return Err(PohRecorderError::MinHeightNotReached); } @@ -578,7 +590,7 @@ impl PohRecorder { } } } - if self.tick_height >= working_bank.max_tick_height { + if self.tick_height() >= working_bank.max_tick_height { info!( "poh_record: max_tick_height {} reached, clearing working_bank {}", working_bank.max_tick_height, @@ -590,7 +602,7 @@ impl PohRecorder { self.clear_bank(); } if send_result.is_err() { - info!("WorkingBank::sender disconnected {:?}", send_result); + info!("WorkingBank::sender disconnected {send_result:?}"); // revert the cache, but clear the working bank self.clear_bank(); } else { @@ -605,9 +617,10 @@ impl PohRecorder { self.has_bank() || self .leader_first_tick_height + .load() .is_some_and(|leader_first_tick_height| { - self.tick_height + within_next_n_ticks >= leader_first_tick_height - && self.tick_height <= self.leader_last_tick_height + self.tick_height() + within_next_n_ticks >= leader_first_tick_height + && self.tick_height() <= self.leader_last_tick_height }) } @@ -625,7 +638,7 @@ impl PohRecorder { // bank and generally indicates what tick height has already been // reached so use the next tick height to determine which slot poh is // ticking through. - let next_tick_height = self.tick_height.saturating_add(1); + let next_tick_height = self.tick_height().saturating_add(1); self.slot_for_tick_height(next_tick_height) } @@ -649,19 +662,20 @@ impl PohRecorder { self.working_bank.as_ref().map(|w| w.bank.clone()) } - pub fn bank_start(&self) -> Option { - self.working_bank.as_ref().map(|w| BankStart { - working_bank: w.bank.clone(), - bank_creation_time: w.start.clone(), - }) - } - pub fn has_bank(&self) -> bool { self.working_bank.is_some() } pub fn tick_height(&self) -> u64 { - self.tick_height + self.tick_height.load() + } + + pub fn shared_tick_height(&self) -> SharedTickHeight { + self.tick_height.clone() + } + + pub fn shared_leader_first_tick_height(&self) -> SharedLeaderFirstTickHeight { + self.leader_first_tick_height.clone() } pub fn ticks_per_slot(&self) -> u64 { @@ -684,16 +698,17 @@ impl PohRecorder { /// leaders needed to be skipped). pub fn reached_leader_slot(&self, my_pubkey: &Pubkey) -> PohLeaderStatus { trace!( - "tick_height {}, start_tick_height {}, leader_first_tick_height {:?}, grace_ticks {}, has_bank {}", - self.tick_height, + "tick_height {}, start_tick_height {}, leader_first_tick_height {:?}, grace_ticks {}, \ + has_bank {}", + self.tick_height(), self.start_tick_height, - self.leader_first_tick_height, + self.leader_first_tick_height.load(), self.grace_ticks, self.has_bank() ); let current_poh_slot = self.current_poh_slot(); - let Some(leader_first_tick_height) = self.leader_first_tick_height else { + let Some(leader_first_tick_height) = self.leader_first_tick_height.load() else { // No next leader slot, so no leader slot has been reached. return PohLeaderStatus::NotReached; }; @@ -730,12 +745,12 @@ impl PohRecorder { } let ideal_target_tick_height = leader_first_tick_height.saturating_sub(1); - if self.tick_height < ideal_target_tick_height { + if self.tick_height() < ideal_target_tick_height { // We haven't ticked to our leader slot yet. return false; } - if self.tick_height >= ideal_target_tick_height.saturating_add(self.grace_ticks) { + if self.tick_height() >= ideal_target_tick_height.saturating_add(self.grace_ticks) { // We have finished waiting for grace ticks. return true; } @@ -894,6 +909,7 @@ impl PohRecorder { } } +#[allow(clippy::type_complexity)] fn do_create_test_recorder( bank: Arc, blockstore: Arc, @@ -903,6 +919,7 @@ fn do_create_test_recorder( ) -> ( Arc, Arc>, + PohController, TransactionRecorder, PohService, Receiver, @@ -934,6 +951,7 @@ fn do_create_test_recorder( let (record_sender, record_receiver) = unbounded(); let transaction_recorder = TransactionRecorder::new(record_sender, exit.clone()); let poh_recorder = Arc::new(RwLock::new(poh_recorder)); + let (poh_controller, poh_service_message_receiver) = PohController::new(); let poh_service = PohService::new( poh_recorder.clone(), &poh_config, @@ -942,17 +960,20 @@ fn do_create_test_recorder( crate::poh_service::DEFAULT_PINNED_CPU_CORE, crate::poh_service::DEFAULT_HASHES_PER_BATCH, record_receiver, + poh_service_message_receiver, ); ( exit, poh_recorder, + poh_controller, transaction_recorder, poh_service, entry_receiver, ) } +#[allow(clippy::type_complexity)] pub fn create_test_recorder( bank: Arc, blockstore: Arc, @@ -961,6 +982,7 @@ pub fn create_test_recorder( ) -> ( Arc, Arc>, + PohController, TransactionRecorder, PohService, Receiver, @@ -968,6 +990,7 @@ pub fn create_test_recorder( do_create_test_recorder(bank, blockstore, poh_config, leader_schedule_cache, false) } +#[allow(clippy::type_complexity)] pub fn create_test_recorder_with_index_tracking( bank: Arc, blockstore: Arc, @@ -976,6 +999,7 @@ pub fn create_test_recorder_with_index_tracking( ) -> ( Arc, Arc>, + PohController, TransactionRecorder, PohService, Receiver, @@ -993,19 +1017,93 @@ impl SharedWorkingBank { self.0.load_full() } - fn store(&self, bank: Arc) { + // Mutable access not needed for this function. + // However we use it to guarantee only used when PohRecorder is + // write locked. + #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] + fn store(&mut self, bank: Arc) { self.0.store(Some(bank)); } - fn clear(&self) { + // Mutable access not needed for this function. + // However we use it to guarantee only used when PohRecorder is + // write locked. + #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] + fn clear(&mut self) { self.0.store(None); } + #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] fn empty() -> Self { Self(Arc::new(ArcSwapOption::empty())) } } +/// Wrapper around a atomic-u64 that prevents modifying outside +/// of `PohRecorder`. +#[derive(Clone)] +pub struct SharedTickHeight(Arc); + +impl SharedTickHeight { + pub fn load(&self) -> u64 { + self.0.load(Ordering::Acquire) + } + + #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] + fn new(tick_height: u64) -> Self { + Self(Arc::new(AtomicU64::new(tick_height))) + } + + // Mutable access not needed for this function. + // However we use it to guarantee only used when PohRecorder is + // write locked. + #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] + fn store(&mut self, tick_height: u64) { + self.0.store(tick_height, Ordering::Release); + } + + // Mutable access not needed for this function. + // However we use it to guarantee only used when PohRecorder is + // write locked. + #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] + fn increment(&mut self) { + self.0.fetch_add(1, Ordering::Release); + } +} + +/// Wrapper around a atomic-u64 that may be None. +// Uses the flag of u64::MAX to indicate None; this does not +// need to be observable outside of PohRecorder. +#[derive(Clone)] +pub struct SharedLeaderFirstTickHeight(Arc); +const SHARED_LEADER_FIRST_TICK_HEIGHT_NONE: u64 = u64::MAX; + +impl SharedLeaderFirstTickHeight { + pub fn load(&self) -> Option { + let v = self.0.load(Ordering::Acquire); + if v == SHARED_LEADER_FIRST_TICK_HEIGHT_NONE { + None + } else { + Some(v) + } + } + + #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] + fn new(tick_height: Option) -> Self { + let v = tick_height.unwrap_or(SHARED_LEADER_FIRST_TICK_HEIGHT_NONE); + Self(Arc::new(AtomicU64::new(v))) + } + + // Mutable access not needed for this function. + // However we use it to guarantee only used when PohRecorder is + // write locked. + #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] + fn store(&mut self, tick_height: Option) { + let v = tick_height.unwrap_or(SHARED_LEADER_FIRST_TICK_HEIGHT_NONE); + self.0.store(v, Ordering::Release); + } +} + #[cfg(test)] mod tests { use { @@ -1045,7 +1143,7 @@ mod tests { poh_recorder.tick(); assert_eq!(poh_recorder.tick_cache.len(), 1); assert_eq!(poh_recorder.tick_cache[0].1, 1); - assert_eq!(poh_recorder.tick_height, 1); + assert_eq!(poh_recorder.tick_height(), 1); } #[test] @@ -1072,7 +1170,7 @@ mod tests { poh_recorder.tick(); assert_eq!(poh_recorder.tick_cache.len(), 2); assert_eq!(poh_recorder.tick_cache[1].1, 2); - assert_eq!(poh_recorder.tick_height, 2); + assert_eq!(poh_recorder.tick_height(), 2); } #[test] @@ -1172,7 +1270,7 @@ mod tests { // all ticks are sent after height > min let tick_height_before = poh_recorder.tick_height(); poh_recorder.tick(); - assert_eq!(poh_recorder.tick_height, tick_height_before + 1); + assert_eq!(poh_recorder.tick_height(), tick_height_before + 1); assert_eq!(poh_recorder.tick_cache.len(), 0); let mut num_entries = 0; while let Ok((wbank, (_entry, _tick_height))) = entry_receiver.try_recv() { @@ -1212,12 +1310,12 @@ mod tests { poh_recorder.tick_cache.last().unwrap().1, bank.max_tick_height() + 1 ); - assert_eq!(poh_recorder.tick_height, bank.max_tick_height() + 1); + assert_eq!(poh_recorder.tick_height(), bank.max_tick_height() + 1); poh_recorder.set_bank_for_test(bank.clone()); poh_recorder.tick(); - assert_eq!(poh_recorder.tick_height, bank.max_tick_height() + 2); + assert_eq!(poh_recorder.tick_height(), bank.max_tick_height() + 2); assert!(poh_recorder.working_bank.is_none()); let mut num_entries = 0; while entry_receiver.try_recv().is_ok() { @@ -1340,7 +1438,7 @@ mod tests { // Check record succeeds on boundary condition where // poh_recorder.tick height == poh_recorder.working_bank.min_tick_height - assert_eq!(poh_recorder.tick_height, min_tick_height); + assert_eq!(poh_recorder.tick_height(), min_tick_height); let tx = test_tx(); let h1 = hash(b"hello world!"); assert!(poh_recorder @@ -1379,7 +1477,7 @@ mod tests { ); poh_recorder.set_bank_for_test(bank.clone()); - let num_ticks_to_max = bank.max_tick_height() - poh_recorder.tick_height; + let num_ticks_to_max = bank.max_tick_height() - poh_recorder.tick_height(); for _ in 0..num_ticks_to_max { poh_recorder.tick(); } @@ -1433,6 +1531,7 @@ mod tests { let record_result = poh_recorder .record(bank.slot(), vec![h1], vec![vec![tx0.into(), tx1.into()]]) .unwrap() + .starting_transaction_index .unwrap(); assert_eq!(record_result, 0); assert_eq!( @@ -1447,11 +1546,12 @@ mod tests { let tx = test_tx(); let h2 = hash(b"foobar"); - let record_result = poh_recorder + let starting_transaction_index = poh_recorder .record(bank.slot(), vec![h2], vec![vec![tx.into()]]) .unwrap() + .starting_transaction_index .unwrap(); - assert_eq!(record_result, 2); + assert_eq!(starting_transaction_index, 2); assert_eq!( poh_recorder .working_bank @@ -1493,7 +1593,7 @@ mod tests { for _ in 0..remaining_ticks_to_min { poh_recorder.tick(); } - assert_eq!(poh_recorder.tick_height, remaining_ticks_to_min); + assert_eq!(poh_recorder.tick_height(), remaining_ticks_to_min); assert_eq!( poh_recorder.tick_cache.len(), remaining_ticks_to_min as usize @@ -1590,11 +1690,11 @@ mod tests { poh_recorder.tick(); poh_recorder.tick(); assert_eq!(poh_recorder.tick_cache.len(), 4); - assert_eq!(poh_recorder.tick_height, 4); + assert_eq!(poh_recorder.tick_height(), 4); poh_recorder.reset(bank, Some((4, 4))); // parent slot 0 implies tick_height of 3 assert_eq!(poh_recorder.tick_cache.len(), 0); poh_recorder.tick(); - assert_eq!(poh_recorder.tick_height, DEFAULT_TICKS_PER_SLOT + 1); + assert_eq!(poh_recorder.tick_height(), DEFAULT_TICKS_PER_SLOT + 1); } #[test] @@ -1846,7 +1946,7 @@ mod tests { assert!(!poh_recorder.reached_leader_tick(&leader_b_pubkey, leader_b_start_tick)); // Tick through Leader A's remaining slots. - for _ in poh_recorder.tick_height..ticks_in_leader_slot_set { + for _ in poh_recorder.tick_height()..ticks_in_leader_slot_set { poh_recorder.tick(); } diff --git a/poh/src/poh_service.rs b/poh/src/poh_service.rs index f10ba29620beae..a3e6e2cd20a191 100644 --- a/poh/src/poh_service.rs +++ b/poh/src/poh_service.rs @@ -1,7 +1,10 @@ //! The `poh_service` module implements a service that records the passing of //! "ticks", a measure of time in the PoH stream use { - crate::poh_recorder::{PohRecorder, Record}, + crate::{ + poh_controller::{PohServiceMessage, PohServiceMessageGuard, PohServiceMessageReceiver}, + poh_recorder::{PohRecorder, Record}, + }, crossbeam_channel::Receiver, log::*, solana_clock::DEFAULT_HASHES_PER_SECOND, @@ -105,6 +108,7 @@ impl PohService { pinned_cpu_core: usize, hashes_per_batch: u64, record_receiver: Receiver, + poh_service_receiver: PohServiceMessageReceiver, ) -> Self { let poh_config = poh_config.clone(); let tick_producer = Builder::new() @@ -117,6 +121,7 @@ impl PohService { &poh_config, &poh_exit, record_receiver, + poh_service_receiver, ); } else { Self::short_lived_low_power_tick_producer( @@ -124,6 +129,7 @@ impl PohService { &poh_config, &poh_exit, record_receiver, + poh_service_receiver, ); } } else { @@ -139,6 +145,7 @@ impl PohService { ticks_per_slot, hashes_per_batch, record_receiver, + poh_service_receiver, Self::target_ns_per_tick( ticks_per_slot, poh_config.target_tick_duration.as_nanos() as u64, @@ -168,9 +175,12 @@ impl PohService { poh_config: &PohConfig, poh_exit: &AtomicBool, record_receiver: Receiver, + poh_service_receiver: PohServiceMessageReceiver, ) { let mut last_tick = Instant::now(); while !poh_exit.load(Ordering::Relaxed) { + let service_message = poh_service_receiver.try_recv(); + let remaining_tick_time = poh_config .target_tick_duration .saturating_sub(last_tick.elapsed()); @@ -183,6 +193,10 @@ impl PohService { last_tick = Instant::now(); poh_recorder.write().unwrap().tick(); } + + if let Ok(service_message) = service_message { + Self::handle_service_message(&poh_recorder, service_message); + } } } @@ -195,11 +209,13 @@ impl PohService { if let Ok(record) = record { if record .sender - .send(poh_recorder.write().unwrap().record( - record.slot, - record.mixins, - record.transaction_batches, - )) + .send( + poh_recorder + .write() + .unwrap() + .record(record.slot, record.mixins, record.transaction_batches) + .map(|summary| summary.starting_transaction_index), + ) .is_err() { panic!("Error returning mixin hash"); @@ -212,12 +228,15 @@ impl PohService { poh_config: &PohConfig, poh_exit: &AtomicBool, record_receiver: Receiver, + poh_service_receiver: PohServiceMessageReceiver, ) { let mut warned = false; let mut elapsed_ticks = 0; let mut last_tick = Instant::now(); let num_ticks = poh_config.target_tick_count.unwrap(); while elapsed_ticks < num_ticks { + let service_message = poh_service_receiver.try_recv(); + let remaining_tick_time = poh_config .target_tick_duration .saturating_sub(last_tick.elapsed()); @@ -235,6 +254,10 @@ impl PohService { warned = true; warn!("exit signal is ignored because PohService is scheduled to exit soon"); } + + if let Ok(service_message) = service_message { + Self::handle_service_message(&poh_recorder, service_message); + } } } @@ -263,7 +286,9 @@ impl PohService { record.mixins, std::mem::take(&mut record.transaction_batches), ); - let (send_res, send_record_result_us) = measure_us!(record.sender.send(res)); + let (send_res, send_record_result_us) = measure_us!(record + .sender + .send(res.map(|summary| summary.starting_transaction_index))); debug_assert!(send_res.is_ok(), "Record wasn't sent."); timing.total_send_record_result_us += send_record_result_us; @@ -333,12 +358,15 @@ impl PohService { ticks_per_slot: u64, hashes_per_batch: u64, record_receiver: Receiver, + poh_service_receiver: PohServiceMessageReceiver, target_ns_per_tick: u64, ) { let poh = poh_recorder.read().unwrap().poh.clone(); let mut timing = PohTiming::new(); let mut next_record = None; + loop { + let service_message = poh_service_receiver.try_recv(); let should_tick = Self::record_or_hash( &mut next_record, &poh_recorder, @@ -367,6 +395,30 @@ impl PohService { break; } } + + if let Ok(service_message) = service_message { + Self::handle_service_message(&poh_recorder, service_message); + } + } + } + + fn handle_service_message( + poh_recorder: &RwLock, + mut service_message: PohServiceMessageGuard, + ) { + { + let mut recorder = poh_recorder.write().unwrap(); + match service_message.take() { + PohServiceMessage::Reset { + reset_bank, + next_leader_slot, + } => { + recorder.reset(reset_bank, next_leader_slot); + } + PohServiceMessage::SetBank { bank } => { + recorder.set_bank(bank); + } + } } } @@ -379,7 +431,7 @@ impl PohService { mod tests { use { super::*, - crate::poh_recorder::PohRecorderError::MaxHeightReached, + crate::{poh_controller::PohController, poh_recorder::PohRecorderError::MaxHeightReached}, crossbeam_channel::unbounded, rand::{thread_rng, Rng}, solana_clock::{DEFAULT_HASHES_PER_TICK, DEFAULT_MS_PER_SLOT}, @@ -510,6 +562,7 @@ mod tests { .map(|x| x.parse().unwrap()) .unwrap_or(DEFAULT_HASHES_PER_BATCH); let (_record_sender, record_receiver) = unbounded(); + let (_poh_controller, poh_service_message_receiver) = PohController::new(); let poh_service = PohService::new( poh_recorder.clone(), &poh_config, @@ -518,6 +571,7 @@ mod tests { DEFAULT_PINNED_CPU_CORE, hashes_per_batch, record_receiver, + poh_service_message_receiver, ); poh_recorder.write().unwrap().set_bank_for_test(bank); diff --git a/precompiles/Cargo.toml b/precompiles/Cargo.toml index c2a5a854af7696..d67c1f2d7c4a98 100644 --- a/precompiles/Cargo.toml +++ b/precompiles/Cargo.toml @@ -19,7 +19,7 @@ agave-feature-set = { workspace = true } bincode = { workspace = true } digest = { workspace = true } ed25519-dalek = { workspace = true } -libsecp256k1 = { workspace = true } +libsecp256k1 = { workspace = true, features = ["hmac"] } openssl = { workspace = true } sha3 = { workspace = true } solana-ed25519-program = { workspace = true } @@ -35,7 +35,7 @@ bytemuck = { workspace = true } hex = { workspace = true } rand0-7 = { workspace = true } solana-instruction = { workspace = true } -solana-keccak-hasher = { workspace = true } +solana-keccak-hasher = { workspace = true, features = ["sha3"] } solana-logger = { workspace = true } solana-secp256k1-program = { workspace = true, features = ["bincode"] } diff --git a/precompiles/src/secp256k1.rs b/precompiles/src/secp256k1.rs index f647aa4e08d5c0..13be6de184a3ba 100644 --- a/precompiles/src/secp256k1.rs +++ b/precompiles/src/secp256k1.rs @@ -355,7 +355,7 @@ pub mod tests { hasher.result() }; - let secp_message = libsecp256k1::Message::parse(&message_hash.0); + let secp_message = libsecp256k1::Message::parse(message_hash.as_bytes()); let (signature, recovery_id) = libsecp256k1::sign(&secp_message, &secret_key); // Flip the S value in the signature to make a different but valid signature. diff --git a/program-binaries/Cargo.toml b/program-binaries/Cargo.toml new file mode 100644 index 00000000000000..2c06b3ac629b88 --- /dev/null +++ b/program-binaries/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "solana-program-binaries" +description = "Prebuilt SPL and Core BPF programs" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +bincode = { workspace = true } +serde = { workspace = true } +solana-account = { workspace = true } +solana-loader-v3-interface = { workspace = true, features = ["bincode", "serde"] } +solana-pubkey = { workspace = true } +solana-rent = { workspace = true } +solana-sdk-ids = { workspace = true } +spl-generic-token = { workspace = true } diff --git a/program-test/src/programs.rs b/program-binaries/src/lib.rs similarity index 83% rename from program-test/src/programs.rs rename to program-binaries/src/lib.rs index cfe3a4c1cd422d..4776a98d33d3ac 100644 --- a/program-test/src/programs.rs +++ b/program-binaries/src/lib.rs @@ -1,5 +1,7 @@ +#![allow(clippy::arithmetic_side_effects)] + use { - solana_account::{Account, AccountSharedData}, + solana_account::{Account, AccountSharedData, ReadableAccount}, solana_loader_v3_interface::{get_program_data_address, state::UpgradeableLoaderState}, solana_pubkey::Pubkey, solana_rent::Rent, @@ -61,6 +63,11 @@ static CORE_BPF_PROGRAMS: &[(Pubkey, Option, &[u8])] = &[ None, include_bytes!("programs/core_bpf_feature_gate-0.0.1.so"), ), + ( + solana_sdk_ids::stake::ID, + None, + include_bytes!("programs/core_bpf_stake-1.0.0.so"), + ), // Add more programs here post-migration... ]; @@ -75,7 +82,7 @@ fn bpf_loader_program_account(program_id: &Pubkey, elf: &[u8], rent: &Rent) -> ( data: elf.to_vec(), owner: bpf_loader::id(), executable: true, - rent_epoch: 0, + rent_epoch: u64::MAX, }, ) } @@ -86,7 +93,7 @@ fn bpf_loader_program_account(program_id: &Pubkey, elf: &[u8], rent: &Rent) -> ( /// The second tuple is the program data account. It contains the program data /// address and an account with the program data - a valid BPF Loader Upgradeable /// program data account containing the ELF. -pub(crate) fn bpf_loader_upgradeable_program_accounts( +pub fn bpf_loader_upgradeable_program_accounts( program_id: &Pubkey, elf: &[u8], rent: &Rent, @@ -104,7 +111,7 @@ pub(crate) fn bpf_loader_upgradeable_program_accounts( data, owner: bpf_loader_upgradeable::id(), executable: true, - rent_epoch: 0, + rent_epoch: u64::MAX, } }; let programdata_account = { @@ -121,7 +128,7 @@ pub(crate) fn bpf_loader_upgradeable_program_accounts( data, owner: bpf_loader_upgradeable::id(), executable: false, - rent_epoch: 0, + rent_epoch: u64::MAX, } }; [ @@ -167,3 +174,27 @@ where }) .collect() } + +pub fn by_id(program_id: &Pubkey, rent: &Rent) -> Option> { + let programs = spl_programs(rent); + if let Some(i) = programs.iter().position(|(key, _)| key == program_id) { + let n = num_accounts(programs[i].1.owner()); + return Some(programs.into_iter().skip(i).take(n).collect()); + } + + let programs = core_bpf_programs(rent, |_| true); + if let Some(i) = programs.iter().position(|(key, _)| key == program_id) { + let n = num_accounts(programs[i].1.owner()); + return Some(programs.into_iter().skip(i).take(n).collect()); + } + + None +} + +fn num_accounts(owner_id: &Pubkey) -> usize { + if *owner_id == bpf_loader_upgradeable::id() { + 2 + } else { + 1 + } +} diff --git a/program-test/src/programs/core_bpf_address_lookup_table-3.0.0.so b/program-binaries/src/programs/core_bpf_address_lookup_table-3.0.0.so similarity index 100% rename from program-test/src/programs/core_bpf_address_lookup_table-3.0.0.so rename to program-binaries/src/programs/core_bpf_address_lookup_table-3.0.0.so diff --git a/program-test/src/programs/core_bpf_config-3.0.0.so b/program-binaries/src/programs/core_bpf_config-3.0.0.so similarity index 100% rename from program-test/src/programs/core_bpf_config-3.0.0.so rename to program-binaries/src/programs/core_bpf_config-3.0.0.so diff --git a/program-test/src/programs/core_bpf_feature_gate-0.0.1.so b/program-binaries/src/programs/core_bpf_feature_gate-0.0.1.so similarity index 100% rename from program-test/src/programs/core_bpf_feature_gate-0.0.1.so rename to program-binaries/src/programs/core_bpf_feature_gate-0.0.1.so diff --git a/program-binaries/src/programs/core_bpf_stake-1.0.0.so b/program-binaries/src/programs/core_bpf_stake-1.0.0.so new file mode 100755 index 00000000000000..e0dc8bdb51d163 Binary files /dev/null and b/program-binaries/src/programs/core_bpf_stake-1.0.0.so differ diff --git a/program-test/src/programs/spl_associated_token_account-1.1.1.so b/program-binaries/src/programs/spl_associated_token_account-1.1.1.so similarity index 100% rename from program-test/src/programs/spl_associated_token_account-1.1.1.so rename to program-binaries/src/programs/spl_associated_token_account-1.1.1.so diff --git a/program-test/src/programs/spl_memo-1.0.0.so b/program-binaries/src/programs/spl_memo-1.0.0.so similarity index 100% rename from program-test/src/programs/spl_memo-1.0.0.so rename to program-binaries/src/programs/spl_memo-1.0.0.so diff --git a/program-test/src/programs/spl_memo-3.0.0.so b/program-binaries/src/programs/spl_memo-3.0.0.so similarity index 100% rename from program-test/src/programs/spl_memo-3.0.0.so rename to program-binaries/src/programs/spl_memo-3.0.0.so diff --git a/program-test/src/programs/spl_token-3.5.0.so b/program-binaries/src/programs/spl_token-3.5.0.so similarity index 100% rename from program-test/src/programs/spl_token-3.5.0.so rename to program-binaries/src/programs/spl_token-3.5.0.so diff --git a/program-test/src/programs/spl_token_2022-8.0.0.so b/program-binaries/src/programs/spl_token_2022-8.0.0.so similarity index 100% rename from program-test/src/programs/spl_token_2022-8.0.0.so rename to program-binaries/src/programs/spl_token_2022-8.0.0.so diff --git a/program-runtime/Cargo.toml b/program-runtime/Cargo.toml index a74e780b573078..b10814821e6e14 100644 --- a/program-runtime/Cargo.toml +++ b/program-runtime/Cargo.toml @@ -20,19 +20,19 @@ name = "solana_program_runtime" dev-context-only-utils = [] dummy-for-ci-check = ["metrics"] frozen-abi = ["dep:solana-frozen-abi", "dep:solana-frozen-abi-macro"] -metrics = ["dep:solana-metrics"] -shuttle-test = ["solana-type-overrides/shuttle-test", "solana-sbpf/shuttle-test"] +metrics = [] +shuttle-test = ["solana-sbpf/shuttle-test", "solana-svm-type-overrides/shuttle-test"] [dependencies] base64 = { workspace = true } bincode = { workspace = true } -enum-iterator = { workspace = true } itertools = { workspace = true } log = { workspace = true } percentage = { workspace = true } rand = { workspace = true } serde = { workspace = true } solana-account = { workspace = true, features = ["bincode"] } +solana-account-info = { workspace = true } solana-clock = { workspace = true } solana-epoch-rewards = { workspace = true } solana-epoch-schedule = { workspace = true } @@ -46,33 +46,38 @@ solana-frozen-abi-macro = { workspace = true, optional = true, features = [ solana-hash = { workspace = true } solana-instruction = { workspace = true } solana-last-restart-slot = { workspace = true } -solana-log-collector = { workspace = true } -solana-measure = { workspace = true } -solana-metrics = { workspace = true, optional = true } +solana-loader-v3-interface = { workspace = true } solana-program-entrypoint = { workspace = true } solana-pubkey = { workspace = true } solana-rent = { workspace = true } -solana-sbpf = { workspace = true } +solana-sbpf = { workspace = true, features = ["jit"] } solana-sdk-ids = { workspace = true } solana-slot-hashes = { workspace = true } solana-stable-layout = { workspace = true } +solana-stake-interface = { workspace = true, features = ["bincode", "sysvar"] } solana-svm-callback = { workspace = true } solana-svm-feature-set = { workspace = true } +solana-svm-log-collector = { workspace = true } +solana-svm-measure = { workspace = true } +solana-svm-timings = { workspace = true } solana-svm-transaction = { workspace = true } +solana-svm-type-overrides = { workspace = true } solana-system-interface = { workspace = true } -solana-sysvar = { workspace = true } +solana-sysvar = { workspace = true, features = ["bincode"] } solana-sysvar-id = { workspace = true } -solana-timings = { workspace = true } solana-transaction-context = { workspace = true } -solana-type-overrides = { workspace = true } thiserror = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } solana-account-info = { workspace = true } solana-instruction = { workspace = true, features = ["bincode"] } +solana-instruction-error = { workspace = true, features = ["serde"] } +solana-keypair = { workspace = true } solana-program-runtime = { path = ".", features = ["dev-context-only-utils"] } solana-pubkey = { workspace = true, features = ["rand"] } +solana-signer = { workspace = true } +solana-transaction = { workspace = true, features = ["dev-context-only-utils"] } solana-transaction-context = { workspace = true, features = [ "dev-context-only-utils", ] } diff --git a/program-runtime/src/cpi.rs b/program-runtime/src/cpi.rs new file mode 100644 index 00000000000000..16be9678776de9 --- /dev/null +++ b/program-runtime/src/cpi.rs @@ -0,0 +1,2299 @@ +//! Cross-Program Invocation (CPI) error types + +use { + crate::{ + invoke_context::{InvokeContext, SerializedAccountMetadata}, + memory::{translate_slice, translate_type, translate_type_mut_for_cpi, translate_vm_slice}, + serialization::{create_memory_region_of_account, modify_memory_region_of_account}, + }, + solana_account_info::AccountInfo, + solana_instruction::{error::InstructionError, AccountMeta, Instruction}, + solana_loader_v3_interface::instruction as bpf_loader_upgradeable, + solana_program_entrypoint::MAX_PERMITTED_DATA_INCREASE, + solana_pubkey::{Pubkey, PubkeyError, MAX_SEEDS}, + solana_sbpf::{ebpf, memory_region::MemoryMapping}, + solana_sdk_ids::{bpf_loader, bpf_loader_deprecated, native_loader}, + solana_stable_layout::stable_instruction::StableInstruction, + solana_svm_log_collector::ic_msg, + solana_svm_measure::measure::Measure, + solana_svm_timings::ExecuteTimings, + solana_transaction_context::{ + vm_slice::VmSlice, BorrowedInstructionAccount, IndexOfAccount, + MAX_ACCOUNTS_PER_INSTRUCTION, MAX_INSTRUCTION_DATA_LEN, + }, + std::mem, + thiserror::Error, +}; + +/// CPI-specific error types +#[derive(Debug, Error, PartialEq, Eq)] +pub enum CpiError { + #[error("Invalid pointer")] + InvalidPointer, + #[error("Too many signers")] + TooManySigners, + #[error("Could not create program address with signer seeds: {0}")] + BadSeeds(PubkeyError), + #[error("InvalidLength")] + InvalidLength, + #[error("Invoked an instruction with too many accounts ({num_accounts} > {max_accounts})")] + MaxInstructionAccountsExceeded { + num_accounts: u64, + max_accounts: u64, + }, + #[error("Invoked an instruction with data that is too large ({data_len} > {max_data_len})")] + MaxInstructionDataLenExceeded { data_len: u64, max_data_len: u64 }, + #[error( + "Invoked an instruction with too many account info's ({num_account_infos} > \ + {max_account_infos})" + )] + MaxInstructionAccountInfosExceeded { + num_account_infos: u64, + max_account_infos: u64, + }, + #[error("Program {0} not supported by inner instructions")] + ProgramNotSupported(Pubkey), +} + +type Error = Box; + +const SUCCESS: u64 = 0; +/// Maximum signers +const MAX_SIGNERS: usize = 16; + +/// Rust representation of C's SolInstruction +#[derive(Debug)] +#[repr(C)] +struct SolInstruction { + pub program_id_addr: u64, + pub accounts_addr: u64, + pub accounts_len: u64, + pub data_addr: u64, + pub data_len: u64, +} + +/// Rust representation of C's SolAccountMeta +#[derive(Debug)] +#[repr(C)] +struct SolAccountMeta { + pub pubkey_addr: u64, + pub is_writable: bool, + pub is_signer: bool, +} + +/// Rust representation of C's SolAccountInfo +#[derive(Debug)] +#[repr(C)] +struct SolAccountInfo { + pub key_addr: u64, + pub lamports_addr: u64, + pub data_len: u64, + pub data_addr: u64, + pub owner_addr: u64, + pub rent_epoch: u64, + pub is_signer: bool, + pub is_writable: bool, + pub executable: bool, +} + +/// Rust representation of C's SolSignerSeed +#[derive(Debug)] +#[repr(C)] +struct SolSignerSeedC { + pub addr: u64, + pub len: u64, +} + +/// Rust representation of C's SolSignerSeeds +#[derive(Debug)] +#[repr(C)] +struct SolSignerSeedsC { + pub addr: u64, + pub len: u64, +} + +/// Maximum number of account info structs that can be used in a single CPI invocation +const MAX_CPI_ACCOUNT_INFOS: usize = 128; + +/// Check that an account info pointer field points to the expected address +fn check_account_info_pointer( + invoke_context: &InvokeContext, + vm_addr: u64, + expected_vm_addr: u64, + field: &str, +) -> Result<(), Error> { + if vm_addr != expected_vm_addr { + ic_msg!( + invoke_context, + "Invalid account info pointer `{}': {:#x} != {:#x}", + field, + vm_addr, + expected_vm_addr + ); + return Err(Box::new(CpiError::InvalidPointer)); + } + Ok(()) +} + +/// Check that an instruction's account and data lengths are within limits +fn check_instruction_size(num_accounts: usize, data_len: usize) -> Result<(), Error> { + if num_accounts > MAX_ACCOUNTS_PER_INSTRUCTION { + return Err(Box::new(CpiError::MaxInstructionAccountsExceeded { + num_accounts: num_accounts as u64, + max_accounts: MAX_ACCOUNTS_PER_INSTRUCTION as u64, + })); + } + if data_len > MAX_INSTRUCTION_DATA_LEN { + return Err(Box::new(CpiError::MaxInstructionDataLenExceeded { + data_len: data_len as u64, + max_data_len: MAX_INSTRUCTION_DATA_LEN as u64, + })); + } + Ok(()) +} + +/// Check that the number of account infos is within the CPI limit +fn check_account_infos( + num_account_infos: usize, + invoke_context: &mut InvokeContext, +) -> Result<(), Error> { + let max_cpi_account_infos = if invoke_context + .get_feature_set() + .increase_tx_account_lock_limit + { + MAX_CPI_ACCOUNT_INFOS + } else { + 64 + }; + let num_account_infos = num_account_infos as u64; + let max_account_infos = max_cpi_account_infos as u64; + if num_account_infos > max_account_infos { + return Err(Box::new(CpiError::MaxInstructionAccountInfosExceeded { + num_account_infos, + max_account_infos, + })); + } + Ok(()) +} + +/// Check whether a program is authorized for CPI +fn check_authorized_program( + program_id: &Pubkey, + instruction_data: &[u8], + invoke_context: &InvokeContext, +) -> Result<(), Error> { + if native_loader::check_id(program_id) + || bpf_loader::check_id(program_id) + || bpf_loader_deprecated::check_id(program_id) + || (solana_sdk_ids::bpf_loader_upgradeable::check_id(program_id) + && !(bpf_loader_upgradeable::is_upgrade_instruction(instruction_data) + || bpf_loader_upgradeable::is_set_authority_instruction(instruction_data) + || (invoke_context + .get_feature_set() + .enable_bpf_loader_set_authority_checked_ix + && bpf_loader_upgradeable::is_set_authority_checked_instruction( + instruction_data, + )) + || (invoke_context + .get_feature_set() + .enable_extend_program_checked + && bpf_loader_upgradeable::is_extend_program_checked_instruction( + instruction_data, + )) + || bpf_loader_upgradeable::is_close_instruction(instruction_data))) + || invoke_context.is_precompile(program_id) + { + return Err(Box::new(CpiError::ProgramNotSupported(*program_id))); + } + Ok(()) +} + +/// Host side representation of AccountInfo or SolAccountInfo passed to the CPI syscall. +/// +/// At the start of a CPI, this can be different from the data stored in the +/// corresponding BorrowedAccount, and needs to be synched. +pub struct CallerAccount<'a> { + pub lamports: &'a mut u64, + pub owner: &'a mut Pubkey, + // The original data length of the account at the start of the current + // instruction. We use this to determine wether an account was shrunk or + // grown before or after CPI, and to derive the vm address of the realloc + // region. + pub original_data_len: usize, + // This points to the data section for this account, as serialized and + // mapped inside the vm (see serialize_parameters() in + // BpfExecutor::execute). + // + // This is only set when account_data_direct_mapping is off. + pub serialized_data: &'a mut [u8], + // Given the corresponding input AccountInfo::data, vm_data_addr points to + // the pointer field and ref_to_len_in_vm points to the length field. + pub vm_data_addr: u64, + pub ref_to_len_in_vm: &'a mut u64, +} + +impl<'a> CallerAccount<'a> { + pub fn get_serialized_data( + memory_mapping: &solana_sbpf::memory_region::MemoryMapping<'_>, + vm_addr: u64, + len: u64, + stricter_abi_and_runtime_constraints: bool, + account_data_direct_mapping: bool, + ) -> Result<&'a mut [u8], Error> { + use crate::memory::translate_slice_mut_for_cpi; + + if stricter_abi_and_runtime_constraints && account_data_direct_mapping { + Ok(&mut []) + } else if stricter_abi_and_runtime_constraints { + // Workaround the memory permissions (as these are from the PoV of being inside the VM) + let serialization_ptr = translate_slice_mut_for_cpi::( + memory_mapping, + solana_sbpf::ebpf::MM_INPUT_START, + 1, + false, // Don't care since it is byte aligned + )? + .as_mut_ptr(); + unsafe { + Ok(std::slice::from_raw_parts_mut( + serialization_ptr + .add(vm_addr.saturating_sub(solana_sbpf::ebpf::MM_INPUT_START) as usize), + len as usize, + )) + } + } else { + translate_slice_mut_for_cpi::( + memory_mapping, + vm_addr, + len, + false, // Don't care since it is byte aligned + ) + } + } + + // Create a CallerAccount given an AccountInfo. + pub fn from_account_info( + invoke_context: &InvokeContext, + memory_mapping: &solana_sbpf::memory_region::MemoryMapping<'_>, + check_aligned: bool, + _vm_addr: u64, + account_info: &solana_account_info::AccountInfo, + account_metadata: &crate::invoke_context::SerializedAccountMetadata, + ) -> Result, Error> { + use crate::memory::{translate_type, translate_type_mut_for_cpi}; + + let stricter_abi_and_runtime_constraints = invoke_context + .get_feature_set() + .stricter_abi_and_runtime_constraints; + let account_data_direct_mapping = + invoke_context.get_feature_set().account_data_direct_mapping; + + if stricter_abi_and_runtime_constraints { + check_account_info_pointer( + invoke_context, + account_info.key as *const _ as u64, + account_metadata.vm_key_addr, + "key", + )?; + check_account_info_pointer( + invoke_context, + account_info.owner as *const _ as u64, + account_metadata.vm_owner_addr, + "owner", + )?; + } + + // account_info points to host memory. The addresses used internally are + // in vm space so they need to be translated. + let lamports = { + // Double translate lamports out of RefCell + let ptr = translate_type::( + memory_mapping, + account_info.lamports.as_ptr() as u64, + check_aligned, + )?; + if stricter_abi_and_runtime_constraints { + if account_info.lamports.as_ptr() as u64 >= solana_sbpf::ebpf::MM_INPUT_START { + return Err(Box::new(CpiError::InvalidPointer)); + } + + check_account_info_pointer( + invoke_context, + *ptr, + account_metadata.vm_lamports_addr, + "lamports", + )?; + } + translate_type_mut_for_cpi::(memory_mapping, *ptr, check_aligned)? + }; + + let owner = translate_type_mut_for_cpi::( + memory_mapping, + account_info.owner as *const _ as u64, + check_aligned, + )?; + + let (serialized_data, vm_data_addr, ref_to_len_in_vm) = { + if stricter_abi_and_runtime_constraints + && account_info.data.as_ptr() as u64 >= solana_sbpf::ebpf::MM_INPUT_START + { + return Err(Box::new(CpiError::InvalidPointer)); + } + + // Double translate data out of RefCell + let data = *translate_type::<&[u8]>( + memory_mapping, + account_info.data.as_ptr() as *const _ as u64, + check_aligned, + )?; + if stricter_abi_and_runtime_constraints { + check_account_info_pointer( + invoke_context, + data.as_ptr() as u64, + account_metadata.vm_data_addr, + "data", + )?; + } + + invoke_context.consume_checked( + (data.len() as u64) + .checked_div(invoke_context.get_execution_cost().cpi_bytes_per_unit) + .unwrap_or(u64::MAX), + )?; + + let vm_len_addr = (account_info.data.as_ptr() as *const u64 as u64) + .saturating_add(std::mem::size_of::() as u64); + if stricter_abi_and_runtime_constraints { + // In the same vein as the other check_account_info_pointer() checks, we don't lock + // this pointer to a specific address but we don't want it to be inside accounts, or + // callees might be able to write to the pointed memory. + if vm_len_addr >= solana_sbpf::ebpf::MM_INPUT_START { + return Err(Box::new(CpiError::InvalidPointer)); + } + } + let ref_to_len_in_vm = + translate_type_mut_for_cpi::(memory_mapping, vm_len_addr, false)?; + let vm_data_addr = data.as_ptr() as u64; + let serialized_data = CallerAccount::get_serialized_data( + memory_mapping, + vm_data_addr, + data.len() as u64, + stricter_abi_and_runtime_constraints, + account_data_direct_mapping, + )?; + (serialized_data, vm_data_addr, ref_to_len_in_vm) + }; + + Ok(CallerAccount { + lamports, + owner, + original_data_len: account_metadata.original_data_len, + serialized_data, + vm_data_addr, + ref_to_len_in_vm, + }) + } + + // Create a CallerAccount given a SolAccountInfo. + fn from_sol_account_info( + invoke_context: &InvokeContext, + memory_mapping: &solana_sbpf::memory_region::MemoryMapping<'_>, + check_aligned: bool, + vm_addr: u64, + account_info: &SolAccountInfo, + account_metadata: &crate::invoke_context::SerializedAccountMetadata, + ) -> Result, Error> { + use crate::memory::translate_type_mut_for_cpi; + + let stricter_abi_and_runtime_constraints = invoke_context + .get_feature_set() + .stricter_abi_and_runtime_constraints; + let account_data_direct_mapping = + invoke_context.get_feature_set().account_data_direct_mapping; + + if stricter_abi_and_runtime_constraints { + check_account_info_pointer( + invoke_context, + account_info.key_addr, + account_metadata.vm_key_addr, + "key", + )?; + + check_account_info_pointer( + invoke_context, + account_info.owner_addr, + account_metadata.vm_owner_addr, + "owner", + )?; + + check_account_info_pointer( + invoke_context, + account_info.lamports_addr, + account_metadata.vm_lamports_addr, + "lamports", + )?; + + check_account_info_pointer( + invoke_context, + account_info.data_addr, + account_metadata.vm_data_addr, + "data", + )?; + } + + // account_info points to host memory. The addresses used internally are + // in vm space so they need to be translated. + let lamports = translate_type_mut_for_cpi::( + memory_mapping, + account_info.lamports_addr, + check_aligned, + )?; + let owner = translate_type_mut_for_cpi::( + memory_mapping, + account_info.owner_addr, + check_aligned, + )?; + + invoke_context.consume_checked( + account_info + .data_len + .checked_div(invoke_context.get_execution_cost().cpi_bytes_per_unit) + .unwrap_or(u64::MAX), + )?; + + let serialized_data = CallerAccount::get_serialized_data( + memory_mapping, + account_info.data_addr, + account_info.data_len, + stricter_abi_and_runtime_constraints, + account_data_direct_mapping, + )?; + + // we already have the host addr we want: &mut account_info.data_len. + // The account info might be read only in the vm though, so we translate + // to ensure we can write. This is tested by programs/sbf/rust/ro_modify + // which puts SolAccountInfo in rodata. + let vm_len_addr = vm_addr + .saturating_add(&account_info.data_len as *const u64 as u64) + .saturating_sub(account_info as *const _ as *const u64 as u64); + let ref_to_len_in_vm = + translate_type_mut_for_cpi::(memory_mapping, vm_len_addr, false)?; + + Ok(CallerAccount { + lamports, + owner, + original_data_len: account_metadata.original_data_len, + serialized_data, + vm_data_addr: account_info.data_addr, + ref_to_len_in_vm, + }) + } +} + +/// Implemented by language specific data structure translators +pub trait SyscallInvokeSigned { + fn translate_instruction( + addr: u64, + memory_mapping: &MemoryMapping, + invoke_context: &mut InvokeContext, + check_aligned: bool, + ) -> Result; + fn translate_accounts<'a>( + account_infos_addr: u64, + account_infos_len: u64, + memory_mapping: &MemoryMapping<'_>, + invoke_context: &mut InvokeContext, + check_aligned: bool, + ) -> Result>, Error>; + fn translate_signers( + program_id: &Pubkey, + signers_seeds_addr: u64, + signers_seeds_len: u64, + memory_mapping: &MemoryMapping, + check_aligned: bool, + ) -> Result, Error>; +} + +pub fn translate_instruction_rust( + addr: u64, + memory_mapping: &MemoryMapping, + invoke_context: &mut InvokeContext, + check_aligned: bool, +) -> Result { + let ix = translate_type::(memory_mapping, addr, check_aligned)?; + let account_metas = translate_slice::( + memory_mapping, + ix.accounts.as_vaddr(), + ix.accounts.len(), + check_aligned, + )?; + let data = translate_slice::( + memory_mapping, + ix.data.as_vaddr(), + ix.data.len(), + check_aligned, + )? + .to_vec(); + + check_instruction_size(account_metas.len(), data.len())?; + + consume_compute_meter( + invoke_context, + (data.len() as u64) + .checked_div(invoke_context.get_execution_cost().cpi_bytes_per_unit) + .unwrap_or(u64::MAX), + )?; + + let mut accounts = Vec::with_capacity(account_metas.len()); + #[allow(clippy::needless_range_loop)] + for account_index in 0..account_metas.len() { + #[allow(clippy::indexing_slicing)] + let account_meta = &account_metas[account_index]; + if unsafe { + std::ptr::read_volatile(&account_meta.is_signer as *const _ as *const u8) > 1 + || std::ptr::read_volatile(&account_meta.is_writable as *const _ as *const u8) > 1 + } { + return Err(Box::new(InstructionError::InvalidArgument)); + } + accounts.push(account_meta.clone()); + } + + Ok(Instruction { + accounts, + data, + program_id: ix.program_id, + }) +} + +pub fn translate_accounts_rust<'a>( + account_infos_addr: u64, + account_infos_len: u64, + memory_mapping: &MemoryMapping<'_>, + invoke_context: &mut InvokeContext, + check_aligned: bool, +) -> Result>, Error> { + let (account_infos, account_info_keys) = translate_account_infos( + account_infos_addr, + account_infos_len, + |account_info: &AccountInfo| account_info.key as *const _ as u64, + memory_mapping, + invoke_context, + check_aligned, + )?; + + translate_and_update_accounts( + &account_info_keys, + account_infos, + account_infos_addr, + invoke_context, + memory_mapping, + check_aligned, + CallerAccount::from_account_info, + ) +} + +pub fn translate_signers_rust( + program_id: &Pubkey, + signers_seeds_addr: u64, + signers_seeds_len: u64, + memory_mapping: &MemoryMapping, + check_aligned: bool, +) -> Result, Error> { + let mut signers = Vec::new(); + if signers_seeds_len > 0 { + let signers_seeds = translate_slice::>>( + memory_mapping, + signers_seeds_addr, + signers_seeds_len, + check_aligned, + )?; + if signers_seeds.len() > MAX_SIGNERS { + return Err(Box::new(CpiError::TooManySigners)); + } + for signer_seeds in signers_seeds.iter() { + let untranslated_seeds = translate_slice::>( + memory_mapping, + signer_seeds.ptr(), + signer_seeds.len(), + check_aligned, + )?; + if untranslated_seeds.len() > MAX_SEEDS { + return Err(Box::new(InstructionError::MaxSeedLengthExceeded)); + } + let seeds = untranslated_seeds + .iter() + .map(|untranslated_seed| { + translate_vm_slice(untranslated_seed, memory_mapping, check_aligned) + }) + .collect::, Error>>()?; + let signer = + Pubkey::create_program_address(&seeds, program_id).map_err(CpiError::BadSeeds)?; + signers.push(signer); + } + Ok(signers) + } else { + Ok(vec![]) + } +} + +pub fn translate_instruction_c( + addr: u64, + memory_mapping: &MemoryMapping, + invoke_context: &mut InvokeContext, + check_aligned: bool, +) -> Result { + let ix_c = translate_type::(memory_mapping, addr, check_aligned)?; + + let program_id = translate_type::(memory_mapping, ix_c.program_id_addr, check_aligned)?; + let account_metas = translate_slice::( + memory_mapping, + ix_c.accounts_addr, + ix_c.accounts_len, + check_aligned, + )?; + let data = translate_slice::(memory_mapping, ix_c.data_addr, ix_c.data_len, check_aligned)? + .to_vec(); + + check_instruction_size(ix_c.accounts_len as usize, data.len())?; + + consume_compute_meter( + invoke_context, + (data.len() as u64) + .checked_div(invoke_context.get_execution_cost().cpi_bytes_per_unit) + .unwrap_or(u64::MAX), + )?; + + let mut accounts = Vec::with_capacity(ix_c.accounts_len as usize); + #[allow(clippy::needless_range_loop)] + for account_index in 0..ix_c.accounts_len as usize { + #[allow(clippy::indexing_slicing)] + let account_meta = &account_metas[account_index]; + if unsafe { + std::ptr::read_volatile(&account_meta.is_signer as *const _ as *const u8) > 1 + || std::ptr::read_volatile(&account_meta.is_writable as *const _ as *const u8) > 1 + } { + return Err(Box::new(InstructionError::InvalidArgument)); + } + let pubkey = + translate_type::(memory_mapping, account_meta.pubkey_addr, check_aligned)?; + accounts.push(AccountMeta { + pubkey: *pubkey, + is_signer: account_meta.is_signer, + is_writable: account_meta.is_writable, + }); + } + + Ok(Instruction { + accounts, + data, + program_id: *program_id, + }) +} + +pub fn translate_accounts_c<'a>( + account_infos_addr: u64, + account_infos_len: u64, + memory_mapping: &MemoryMapping<'_>, + invoke_context: &mut InvokeContext, + check_aligned: bool, +) -> Result>, Error> { + let (account_infos, account_info_keys) = translate_account_infos( + account_infos_addr, + account_infos_len, + |account_info: &SolAccountInfo| account_info.key_addr, + memory_mapping, + invoke_context, + check_aligned, + )?; + + translate_and_update_accounts( + &account_info_keys, + account_infos, + account_infos_addr, + invoke_context, + memory_mapping, + check_aligned, + CallerAccount::from_sol_account_info, + ) +} + +pub fn translate_signers_c( + program_id: &Pubkey, + signers_seeds_addr: u64, + signers_seeds_len: u64, + memory_mapping: &MemoryMapping, + check_aligned: bool, +) -> Result, Error> { + if signers_seeds_len > 0 { + let signers_seeds = translate_slice::( + memory_mapping, + signers_seeds_addr, + signers_seeds_len, + check_aligned, + )?; + if signers_seeds.len() > MAX_SIGNERS { + return Err(Box::new(CpiError::TooManySigners)); + } + Ok(signers_seeds + .iter() + .map(|signer_seeds| { + let seeds = translate_slice::( + memory_mapping, + signer_seeds.addr, + signer_seeds.len, + check_aligned, + )?; + if seeds.len() > MAX_SEEDS { + return Err(Box::new(InstructionError::MaxSeedLengthExceeded) as Error); + } + let seeds_bytes = seeds + .iter() + .map(|seed| { + translate_slice::(memory_mapping, seed.addr, seed.len, check_aligned) + }) + .collect::, Error>>()?; + Pubkey::create_program_address(&seeds_bytes, program_id) + .map_err(|err| Box::new(CpiError::BadSeeds(err)) as Error) + }) + .collect::, Error>>()?) + } else { + Ok(vec![]) + } +} + +/// Call process instruction, common to both Rust and C +pub fn cpi_common( + invoke_context: &mut InvokeContext, + instruction_addr: u64, + account_infos_addr: u64, + account_infos_len: u64, + signers_seeds_addr: u64, + signers_seeds_len: u64, + memory_mapping: &mut MemoryMapping, +) -> Result { + let check_aligned = invoke_context.get_check_aligned(); + + // CPI entry. + // + // Translate the inputs to the syscall and synchronize the caller's account + // changes so the callee can see them. + consume_compute_meter( + invoke_context, + invoke_context.get_execution_cost().invoke_units, + )?; + if let Some(execute_time) = invoke_context.execute_time.as_mut() { + execute_time.stop(); + invoke_context.timings.execute_us += execute_time.as_us(); + } + + let instruction = S::translate_instruction( + instruction_addr, + memory_mapping, + invoke_context, + check_aligned, + )?; + let transaction_context = &invoke_context.transaction_context; + let instruction_context = transaction_context.get_current_instruction_context()?; + let caller_program_id = instruction_context.get_program_key()?; + let signers = S::translate_signers( + caller_program_id, + signers_seeds_addr, + signers_seeds_len, + memory_mapping, + check_aligned, + )?; + check_authorized_program(&instruction.program_id, &instruction.data, invoke_context)?; + invoke_context.prepare_next_instruction(&instruction, &signers)?; + + let mut accounts = S::translate_accounts( + account_infos_addr, + account_infos_len, + memory_mapping, + invoke_context, + check_aligned, + )?; + + // Process the callee instruction + let mut compute_units_consumed = 0; + invoke_context + .process_instruction(&mut compute_units_consumed, &mut ExecuteTimings::default())?; + + // re-bind to please the borrow checker + let transaction_context = &invoke_context.transaction_context; + let instruction_context = transaction_context.get_current_instruction_context()?; + + // CPI exit. + // + // Synchronize the callee's account changes so the caller can see them. + let stricter_abi_and_runtime_constraints = invoke_context + .get_feature_set() + .stricter_abi_and_runtime_constraints; + let account_data_direct_mapping = invoke_context.get_feature_set().account_data_direct_mapping; + + for translate_account in accounts.iter_mut() { + let mut callee_account = instruction_context + .try_borrow_instruction_account(translate_account.index_in_caller)?; + if translate_account.update_caller_account_info { + update_caller_account( + invoke_context, + memory_mapping, + check_aligned, + &mut translate_account.caller_account, + &mut callee_account, + stricter_abi_and_runtime_constraints, + account_data_direct_mapping, + )?; + } + } + + if stricter_abi_and_runtime_constraints { + for translate_account in accounts.iter() { + let mut callee_account = instruction_context + .try_borrow_instruction_account(translate_account.index_in_caller)?; + if translate_account.update_caller_account_region { + update_caller_account_region( + memory_mapping, + check_aligned, + &translate_account.caller_account, + &mut callee_account, + account_data_direct_mapping, + )?; + } + } + } + + invoke_context.execute_time = Some(Measure::start("execute")); + Ok(SUCCESS) +} + +/// Account data and metadata that has been translated from caller space. +pub struct TranslatedAccount<'a> { + pub index_in_caller: IndexOfAccount, + pub caller_account: CallerAccount<'a>, + pub update_caller_account_region: bool, + pub update_caller_account_info: bool, +} + +fn translate_account_infos<'a, T, F>( + account_infos_addr: u64, + account_infos_len: u64, + key_addr: F, + memory_mapping: &'a MemoryMapping, + invoke_context: &mut InvokeContext, + check_aligned: bool, +) -> Result<(&'a [T], Vec<&'a Pubkey>), Error> +where + F: Fn(&T) -> u64, +{ + let stricter_abi_and_runtime_constraints = invoke_context + .get_feature_set() + .stricter_abi_and_runtime_constraints; + + // In the same vein as the other check_account_info_pointer() checks, we don't lock + // this pointer to a specific address but we don't want it to be inside accounts, or + // callees might be able to write to the pointed memory. + if stricter_abi_and_runtime_constraints + && account_infos_addr + .saturating_add(account_infos_len.saturating_mul(std::mem::size_of::() as u64)) + >= ebpf::MM_INPUT_START + { + return Err(CpiError::InvalidPointer.into()); + } + + let account_infos = translate_slice::( + memory_mapping, + account_infos_addr, + account_infos_len, + check_aligned, + )?; + check_account_infos(account_infos.len(), invoke_context)?; + let mut account_info_keys = Vec::with_capacity(account_infos_len as usize); + #[allow(clippy::needless_range_loop)] + for account_index in 0..account_infos_len as usize { + #[allow(clippy::indexing_slicing)] + let account_info = &account_infos[account_index]; + account_info_keys.push(translate_type::( + memory_mapping, + key_addr(account_info), + check_aligned, + )?); + } + Ok((account_infos, account_info_keys)) +} + +// Finish translating accounts, build CallerAccount values and update callee +// accounts in preparation of executing the callee. +fn translate_and_update_accounts<'a, T, F>( + account_info_keys: &[&Pubkey], + account_infos: &[T], + account_infos_addr: u64, + invoke_context: &mut InvokeContext, + memory_mapping: &MemoryMapping<'_>, + check_aligned: bool, + do_translate: F, +) -> Result>, Error> +where + F: Fn( + &InvokeContext, + &MemoryMapping<'_>, + bool, + u64, + &T, + &SerializedAccountMetadata, + ) -> Result, Error>, +{ + let transaction_context = &invoke_context.transaction_context; + let next_instruction_context = transaction_context.get_next_instruction_context()?; + let next_instruction_accounts = next_instruction_context.instruction_accounts(); + let instruction_context = transaction_context.get_current_instruction_context()?; + let mut accounts = Vec::with_capacity(next_instruction_accounts.len()); + + // unwrapping here is fine: we're in a syscall and the method below fails + // only outside syscalls + let accounts_metadata = &invoke_context + .get_syscall_context() + .unwrap() + .accounts_metadata; + + let stricter_abi_and_runtime_constraints = invoke_context + .get_feature_set() + .stricter_abi_and_runtime_constraints; + let account_data_direct_mapping = invoke_context.get_feature_set().account_data_direct_mapping; + + for (instruction_account_index, instruction_account) in + next_instruction_accounts.iter().enumerate() + { + if next_instruction_context + .is_instruction_account_duplicate(instruction_account_index as IndexOfAccount)? + .is_some() + { + continue; // Skip duplicate account + } + + let index_in_caller = instruction_context + .get_index_of_account_in_instruction(instruction_account.index_in_transaction)?; + let callee_account = instruction_context.try_borrow_instruction_account(index_in_caller)?; + let account_key = invoke_context + .transaction_context + .get_key_of_account_at_index(instruction_account.index_in_transaction)?; + + #[allow(deprecated)] + if callee_account.is_executable() { + // Use the known account + consume_compute_meter( + invoke_context, + (callee_account.get_data().len() as u64) + .checked_div(invoke_context.get_execution_cost().cpi_bytes_per_unit) + .unwrap_or(u64::MAX), + )?; + } else if let Some(caller_account_index) = + account_info_keys.iter().position(|key| *key == account_key) + { + let serialized_metadata = + accounts_metadata + .get(index_in_caller as usize) + .ok_or_else(|| { + ic_msg!( + invoke_context, + "Internal error: index mismatch for account {}", + account_key + ); + Box::new(InstructionError::MissingAccount) + })?; + + // build the CallerAccount corresponding to this account. + if caller_account_index >= account_infos.len() { + return Err(Box::new(CpiError::InvalidLength)); + } + #[allow(clippy::indexing_slicing)] + let caller_account = + do_translate( + invoke_context, + memory_mapping, + check_aligned, + account_infos_addr.saturating_add( + caller_account_index.saturating_mul(mem::size_of::()) as u64, + ), + &account_infos[caller_account_index], + serialized_metadata, + )?; + + // before initiating CPI, the caller may have modified the + // account (caller_account). We need to update the corresponding + // BorrowedAccount (callee_account) so the callee can see the + // changes. + let update_caller = update_callee_account( + check_aligned, + &caller_account, + callee_account, + stricter_abi_and_runtime_constraints, + account_data_direct_mapping, + )?; + + accounts.push(TranslatedAccount { + index_in_caller, + caller_account, + update_caller_account_region: instruction_account.is_writable() || update_caller, + update_caller_account_info: instruction_account.is_writable(), + }); + } else { + ic_msg!( + invoke_context, + "Instruction references an unknown account {}", + account_key + ); + return Err(Box::new(InstructionError::MissingAccount)); + } + } + + Ok(accounts) +} + +fn consume_compute_meter(invoke_context: &InvokeContext, amount: u64) -> Result<(), Error> { + invoke_context.consume_checked(amount)?; + Ok(()) +} + +// Update the given account before executing CPI. +// +// caller_account and callee_account describe the same account. At CPI entry +// caller_account might include changes the caller has made to the account +// before executing CPI. +// +// This method updates callee_account so the CPI callee can see the caller's +// changes. +// +// When true is returned, the caller account must be updated after CPI. This +// is only set for stricter_abi_and_runtime_constraints when the pointer may have changed. +fn update_callee_account( + check_aligned: bool, + caller_account: &CallerAccount, + mut callee_account: BorrowedInstructionAccount<'_>, + stricter_abi_and_runtime_constraints: bool, + account_data_direct_mapping: bool, +) -> Result { + let mut must_update_caller = false; + + if callee_account.get_lamports() != *caller_account.lamports { + callee_account.set_lamports(*caller_account.lamports)?; + } + + if stricter_abi_and_runtime_constraints { + let prev_len = callee_account.get_data().len(); + let post_len = *caller_account.ref_to_len_in_vm as usize; + if prev_len != post_len { + let is_caller_loader_deprecated = !check_aligned; + let address_space_reserved_for_account = if is_caller_loader_deprecated { + caller_account.original_data_len + } else { + caller_account + .original_data_len + .saturating_add(MAX_PERMITTED_DATA_INCREASE) + }; + if post_len > address_space_reserved_for_account { + return Err(InstructionError::InvalidRealloc.into()); + } + callee_account.set_data_length(post_len)?; + // pointer to data may have changed, so caller must be updated + must_update_caller = true; + } + if !account_data_direct_mapping && callee_account.can_data_be_changed().is_ok() { + callee_account.set_data_from_slice(caller_account.serialized_data)?; + } + } else { + // The redundant check helps to avoid the expensive data comparison if we can + match callee_account.can_data_be_resized(caller_account.serialized_data.len()) { + Ok(()) => callee_account.set_data_from_slice(caller_account.serialized_data)?, + Err(err) if callee_account.get_data() != caller_account.serialized_data => { + return Err(Box::new(err)); + } + _ => {} + } + } + + // Change the owner at the end so that we are allowed to change the lamports and data before + if callee_account.get_owner() != caller_account.owner { + callee_account.set_owner(caller_account.owner.as_ref())?; + // caller gave ownership and thus write access away, so caller must be updated + must_update_caller = true; + } + + Ok(must_update_caller) +} + +fn update_caller_account_region( + memory_mapping: &mut MemoryMapping, + check_aligned: bool, + caller_account: &CallerAccount, + callee_account: &mut BorrowedInstructionAccount<'_>, + account_data_direct_mapping: bool, +) -> Result<(), Error> { + let is_caller_loader_deprecated = !check_aligned; + let address_space_reserved_for_account = if is_caller_loader_deprecated { + caller_account.original_data_len + } else { + caller_account + .original_data_len + .saturating_add(MAX_PERMITTED_DATA_INCREASE) + }; + + if address_space_reserved_for_account > 0 { + // We can trust vm_data_addr to point to the correct region because we + // enforce that in CallerAccount::from_(sol_)account_info. + let (region_index, region) = memory_mapping + .find_region(caller_account.vm_data_addr) + .ok_or_else(|| Box::new(InstructionError::MissingAccount))?; + // vm_data_addr must always point to the beginning of the region + debug_assert_eq!(region.vm_addr, caller_account.vm_data_addr); + let mut new_region; + if !account_data_direct_mapping { + new_region = region.clone(); + modify_memory_region_of_account(callee_account, &mut new_region); + } else { + new_region = create_memory_region_of_account(callee_account, region.vm_addr)?; + } + memory_mapping.replace_region(region_index, new_region)?; + } + + Ok(()) +} + +// Update the given account after executing CPI. +// +// caller_account and callee_account describe to the same account. At CPI exit +// callee_account might include changes the callee has made to the account +// after executing. +// +// This method updates caller_account so the CPI caller can see the callee's +// changes. +// +// Safety: Once `stricter_abi_and_runtime_constraints` is enabled all fields of [CallerAccount] used +// in this function should never point inside the address space reserved for +// accounts (regardless of the current size of an account). +fn update_caller_account( + invoke_context: &InvokeContext, + memory_mapping: &MemoryMapping<'_>, + check_aligned: bool, + caller_account: &mut CallerAccount<'_>, + callee_account: &mut BorrowedInstructionAccount<'_>, + stricter_abi_and_runtime_constraints: bool, + account_data_direct_mapping: bool, +) -> Result<(), Error> { + *caller_account.lamports = callee_account.get_lamports(); + *caller_account.owner = *callee_account.get_owner(); + + let prev_len = *caller_account.ref_to_len_in_vm as usize; + let post_len = callee_account.get_data().len(); + let is_caller_loader_deprecated = !check_aligned; + let address_space_reserved_for_account = + if stricter_abi_and_runtime_constraints && is_caller_loader_deprecated { + caller_account.original_data_len + } else { + caller_account + .original_data_len + .saturating_add(MAX_PERMITTED_DATA_INCREASE) + }; + + if post_len > address_space_reserved_for_account + && (stricter_abi_and_runtime_constraints || prev_len != post_len) + { + let max_increase = + address_space_reserved_for_account.saturating_sub(caller_account.original_data_len); + ic_msg!( + invoke_context, + "Account data size realloc limited to {max_increase} in inner instructions", + ); + return Err(Box::new(InstructionError::InvalidRealloc)); + } + + if prev_len != post_len { + // when stricter_abi_and_runtime_constraints is enabled we don't cache the serialized data in + // caller_account.serialized_data. See CallerAccount::from_account_info. + if !(stricter_abi_and_runtime_constraints && account_data_direct_mapping) { + // If the account has been shrunk, we're going to zero the unused memory + // *that was previously used*. + if post_len < prev_len { + caller_account + .serialized_data + .get_mut(post_len..) + .ok_or_else(|| Box::new(InstructionError::AccountDataTooSmall))? + .fill(0); + } + // Set the length of caller_account.serialized_data to post_len. + caller_account.serialized_data = CallerAccount::get_serialized_data( + memory_mapping, + caller_account.vm_data_addr, + post_len as u64, + stricter_abi_and_runtime_constraints, + account_data_direct_mapping, + )?; + } + // this is the len field in the AccountInfo::data slice + *caller_account.ref_to_len_in_vm = post_len as u64; + + // this is the len field in the serialized parameters + let serialized_len_ptr = translate_type_mut_for_cpi::( + memory_mapping, + caller_account + .vm_data_addr + .saturating_sub(std::mem::size_of::() as u64), + check_aligned, + )?; + *serialized_len_ptr = post_len as u64; + } + + if !(stricter_abi_and_runtime_constraints && account_data_direct_mapping) { + // Propagate changes in the callee up to the caller. + let to_slice = &mut caller_account.serialized_data; + let from_slice = callee_account + .get_data() + .get(0..post_len) + .ok_or(CpiError::InvalidLength)?; + if to_slice.len() != from_slice.len() { + return Err(Box::new(InstructionError::AccountDataTooSmall)); + } + to_slice.copy_from_slice(from_slice); + } + + Ok(()) +} + +#[allow(clippy::indexing_slicing)] +#[allow(clippy::arithmetic_side_effects)] +#[cfg(test)] +mod tests { + use { + super::*, + crate::{ + invoke_context::{BpfAllocator, SerializedAccountMetadata, SyscallContext}, + memory::translate_type, + with_mock_invoke_context_with_feature_set, + }, + assert_matches::assert_matches, + solana_account::{Account, AccountSharedData, ReadableAccount}, + solana_account_info::AccountInfo, + solana_sbpf::{ + ebpf::MM_INPUT_START, memory_region::MemoryRegion, program::SBPFVersion, vm::Config, + }, + solana_sdk_ids::{bpf_loader, system_program}, + solana_svm_feature_set::SVMFeatureSet, + solana_transaction_context::{ + transaction_accounts::TransactionAccount, IndexOfAccount, InstructionAccount, + }, + std::{ + cell::{Cell, RefCell}, + mem, ptr, + rc::Rc, + slice, + }, + test_case::test_matrix, + }; + + macro_rules! mock_invoke_context { + ($invoke_context:ident, + $transaction_context:ident, + $instruction_data:expr, + $transaction_accounts:expr, + $program_account:expr, + $instruction_accounts:expr) => { + let instruction_data = $instruction_data; + let instruction_accounts = $instruction_accounts + .iter() + .map(|index_in_transaction| { + InstructionAccount::new( + *index_in_transaction as IndexOfAccount, + false, + $transaction_accounts[*index_in_transaction as usize].2, + ) + }) + .collect::>(); + let transaction_accounts = $transaction_accounts + .into_iter() + .map(|a| (a.0, a.1)) + .collect::>(); + let mut feature_set = SVMFeatureSet::all_enabled(); + feature_set.stricter_abi_and_runtime_constraints = false; + let feature_set = &feature_set; + with_mock_invoke_context_with_feature_set!( + $invoke_context, + $transaction_context, + feature_set, + transaction_accounts + ); + $invoke_context + .transaction_context + .configure_next_instruction_for_tests( + $program_account, + instruction_accounts, + instruction_data, + ) + .unwrap(); + $invoke_context.push().unwrap(); + }; + } + + macro_rules! borrow_instruction_account { + ($borrowed_account:ident, $invoke_context:expr, $index:expr) => { + let instruction_context = $invoke_context + .transaction_context + .get_current_instruction_context() + .unwrap(); + let $borrowed_account = instruction_context + .try_borrow_instruction_account($index) + .unwrap(); + }; + } + + fn is_zeroed(data: &[u8]) -> bool { + data.iter().all(|b| *b == 0) + } + + struct MockCallerAccount { + lamports: u64, + owner: Pubkey, + vm_addr: u64, + data: Vec, + len: u64, + regions: Vec, + stricter_abi_and_runtime_constraints: bool, + } + + impl MockCallerAccount { + fn new( + lamports: u64, + owner: Pubkey, + data: &[u8], + stricter_abi_and_runtime_constraints: bool, + ) -> MockCallerAccount { + let vm_addr = MM_INPUT_START; + let mut region_addr = vm_addr; + let region_len = mem::size_of::() + + if stricter_abi_and_runtime_constraints { + 0 + } else { + data.len() + MAX_PERMITTED_DATA_INCREASE + }; + let mut d = vec![0; region_len]; + let mut regions = vec![]; + + // always write the [len] part even when stricter_abi_and_runtime_constraints + unsafe { ptr::write_unaligned::(d.as_mut_ptr().cast(), data.len() as u64) }; + + // write the account data when not stricter_abi_and_runtime_constraints + if !stricter_abi_and_runtime_constraints { + d[mem::size_of::()..][..data.len()].copy_from_slice(data); + } + + // create a region for [len][data+realloc if !stricter_abi_and_runtime_constraints] + regions.push(MemoryRegion::new_writable(&mut d[..region_len], vm_addr)); + region_addr += region_len as u64; + + if stricter_abi_and_runtime_constraints { + // create a region for the directly mapped data + regions.push(MemoryRegion::new_readonly(data, region_addr)); + region_addr += data.len() as u64; + + // create a region for the realloc padding + regions.push(MemoryRegion::new_writable( + &mut d[mem::size_of::()..], + region_addr, + )); + } else { + // caller_account.serialized_data must have the actual data length + d.truncate(mem::size_of::() + data.len()); + } + + MockCallerAccount { + lamports, + owner, + vm_addr, + data: d, + len: data.len() as u64, + regions, + stricter_abi_and_runtime_constraints, + } + } + + fn data_slice<'a>(&self) -> &'a [u8] { + // lifetime crimes + unsafe { + slice::from_raw_parts( + self.data[mem::size_of::()..].as_ptr(), + self.data.capacity() - mem::size_of::(), + ) + } + } + + fn caller_account(&mut self) -> CallerAccount { + let data = if self.stricter_abi_and_runtime_constraints { + &mut [] + } else { + &mut self.data[mem::size_of::()..] + }; + CallerAccount { + lamports: &mut self.lamports, + owner: &mut self.owner, + original_data_len: self.len as usize, + serialized_data: data, + vm_data_addr: self.vm_addr + mem::size_of::() as u64, + ref_to_len_in_vm: &mut self.len, + } + } + } + + struct MockAccountInfo<'a> { + key: Pubkey, + is_signer: bool, + is_writable: bool, + lamports: u64, + data: &'a [u8], + owner: Pubkey, + executable: bool, + _unused: u64, + } + + impl MockAccountInfo<'_> { + fn new(key: Pubkey, account: &AccountSharedData) -> MockAccountInfo { + MockAccountInfo { + key, + is_signer: false, + is_writable: false, + lamports: account.lamports(), + data: account.data(), + owner: *account.owner(), + executable: account.executable(), + _unused: account.rent_epoch(), + } + } + + fn into_region(self, vm_addr: u64) -> (Vec, MemoryRegion, SerializedAccountMetadata) { + let size = mem::size_of::() + + mem::size_of::() * 2 + + mem::size_of::>>() + + mem::size_of::() + + mem::size_of::>>() + + self.data.len(); + let mut data = vec![0; size]; + + let vm_addr = vm_addr as usize; + let key_addr = vm_addr + mem::size_of::(); + let lamports_cell_addr = key_addr + mem::size_of::(); + let lamports_addr = lamports_cell_addr + mem::size_of::>>(); + let owner_addr = lamports_addr + mem::size_of::(); + let data_cell_addr = owner_addr + mem::size_of::(); + let data_addr = data_cell_addr + mem::size_of::>>(); + + #[allow(deprecated)] + #[allow(clippy::used_underscore_binding)] + let info = AccountInfo { + key: unsafe { (key_addr as *const Pubkey).as_ref() }.unwrap(), + is_signer: self.is_signer, + is_writable: self.is_writable, + lamports: unsafe { + Rc::from_raw((lamports_cell_addr + RcBox::<&mut u64>::VALUE_OFFSET) as *const _) + }, + data: unsafe { + Rc::from_raw((data_cell_addr + RcBox::<&mut [u8]>::VALUE_OFFSET) as *const _) + }, + owner: unsafe { (owner_addr as *const Pubkey).as_ref() }.unwrap(), + executable: self.executable, + _unused: self._unused, + }; + + unsafe { + ptr::write_unaligned(data.as_mut_ptr().cast(), info); + ptr::write_unaligned( + (data.as_mut_ptr() as usize + key_addr - vm_addr) as *mut _, + self.key, + ); + ptr::write_unaligned( + (data.as_mut_ptr() as usize + lamports_cell_addr - vm_addr) as *mut _, + RcBox::new(RefCell::new((lamports_addr as *mut u64).as_mut().unwrap())), + ); + ptr::write_unaligned( + (data.as_mut_ptr() as usize + lamports_addr - vm_addr) as *mut _, + self.lamports, + ); + ptr::write_unaligned( + (data.as_mut_ptr() as usize + owner_addr - vm_addr) as *mut _, + self.owner, + ); + ptr::write_unaligned( + (data.as_mut_ptr() as usize + data_cell_addr - vm_addr) as *mut _, + RcBox::new(RefCell::new(slice::from_raw_parts_mut( + data_addr as *mut u8, + self.data.len(), + ))), + ); + data[data_addr - vm_addr..].copy_from_slice(self.data); + } + + let region = MemoryRegion::new_writable(data.as_mut_slice(), vm_addr as u64); + ( + data, + region, + SerializedAccountMetadata { + original_data_len: self.data.len(), + vm_key_addr: key_addr as u64, + vm_lamports_addr: lamports_addr as u64, + vm_owner_addr: owner_addr as u64, + vm_data_addr: data_addr as u64, + }, + ) + } + } + + struct MockInstruction { + program_id: Pubkey, + accounts: Vec, + data: Vec, + } + + impl MockInstruction { + fn into_region(self, vm_addr: u64) -> (Vec, MemoryRegion) { + let accounts_len = mem::size_of::() * self.accounts.len(); + + let size = mem::size_of::() + accounts_len + self.data.len(); + + let mut data = vec![0; size]; + + let vm_addr = vm_addr as usize; + let accounts_addr = vm_addr + mem::size_of::(); + let data_addr = accounts_addr + accounts_len; + + let ins = Instruction { + program_id: self.program_id, + accounts: unsafe { + Vec::from_raw_parts( + accounts_addr as *mut _, + self.accounts.len(), + self.accounts.len(), + ) + }, + data: unsafe { + Vec::from_raw_parts(data_addr as *mut _, self.data.len(), self.data.len()) + }, + }; + let ins = StableInstruction::from(ins); + + unsafe { + ptr::write_unaligned(data.as_mut_ptr().cast(), ins); + data[accounts_addr - vm_addr..][..accounts_len].copy_from_slice( + slice::from_raw_parts(self.accounts.as_ptr().cast(), accounts_len), + ); + data[data_addr - vm_addr..].copy_from_slice(&self.data); + } + + let region = MemoryRegion::new_writable(data.as_mut_slice(), vm_addr as u64); + (data, region) + } + } + + #[repr(C)] + struct RcBox { + strong: Cell, + weak: Cell, + value: T, + } + + impl RcBox { + const VALUE_OFFSET: usize = mem::size_of::>() * 2; + fn new(value: T) -> RcBox { + RcBox { + strong: Cell::new(0), + weak: Cell::new(0), + value, + } + } + } + + type TestTransactionAccount = (Pubkey, AccountSharedData, bool); + + fn transaction_with_one_writable_instruction_account( + data: Vec, + ) -> Vec { + let program_id = Pubkey::new_unique(); + let account = AccountSharedData::from(Account { + lamports: 1, + data, + owner: program_id, + executable: false, + rent_epoch: 100, + }); + vec![ + ( + program_id, + AccountSharedData::from(Account { + lamports: 0, + data: vec![], + owner: bpf_loader::id(), + executable: true, + rent_epoch: 0, + }), + false, + ), + (Pubkey::new_unique(), account, true), + ] + } + + fn transaction_with_one_readonly_instruction_account( + data: Vec, + ) -> Vec { + let program_id = Pubkey::new_unique(); + let account_owner = Pubkey::new_unique(); + let account = AccountSharedData::from(Account { + lamports: 1, + data, + owner: account_owner, + executable: false, + rent_epoch: 100, + }); + vec![ + ( + program_id, + AccountSharedData::from(Account { + lamports: 0, + data: vec![], + owner: bpf_loader::id(), + executable: true, + rent_epoch: 0, + }), + false, + ), + (Pubkey::new_unique(), account, true), + ] + } + + fn mock_signers(signers: &[&[u8]], vm_addr: u64) -> (Vec, MemoryRegion) { + let vm_addr = vm_addr as usize; + + // calculate size + let fat_ptr_size_of_slice = mem::size_of::<&[()]>(); // pointer size + length size + let singers_length = signers.len(); + let sum_signers_data_length: usize = signers.iter().map(|s| s.len()).sum(); + + // init data vec + let total_size = fat_ptr_size_of_slice + + singers_length * fat_ptr_size_of_slice + + sum_signers_data_length; + let mut data = vec![0; total_size]; + + // data is composed by 3 parts + // A. + // [ singers address, singers length, ..., + // B. | + // signer1 address, signer1 length, signer2 address ..., + // ^ p1 ---> + // C. | + // signer1 data, signer2 data, ... ] + // ^ p2 ---> + + // A. + data[..fat_ptr_size_of_slice / 2] + .clone_from_slice(&(fat_ptr_size_of_slice + vm_addr).to_le_bytes()); + data[fat_ptr_size_of_slice / 2..fat_ptr_size_of_slice] + .clone_from_slice(&(singers_length).to_le_bytes()); + + // B. + C. + let (mut p1, mut p2) = ( + fat_ptr_size_of_slice, + fat_ptr_size_of_slice + singers_length * fat_ptr_size_of_slice, + ); + for signer in signers.iter() { + let signer_length = signer.len(); + + // B. + data[p1..p1 + fat_ptr_size_of_slice / 2] + .clone_from_slice(&(p2 + vm_addr).to_le_bytes()); + data[p1 + fat_ptr_size_of_slice / 2..p1 + fat_ptr_size_of_slice] + .clone_from_slice(&(signer_length).to_le_bytes()); + p1 += fat_ptr_size_of_slice; + + // C. + data[p2..p2 + signer_length].clone_from_slice(signer); + p2 += signer_length; + } + + let region = MemoryRegion::new_writable(data.as_mut_slice(), vm_addr as u64); + (data, region) + } + + #[test] + fn test_translate_instruction() { + let transaction_accounts = + transaction_with_one_writable_instruction_account(b"foo".to_vec()); + mock_invoke_context!( + invoke_context, + transaction_context, + b"instruction data", + transaction_accounts, + 0, + &[1] + ); + + let program_id = Pubkey::new_unique(); + let accounts = vec![AccountMeta { + pubkey: Pubkey::new_unique(), + is_signer: true, + is_writable: false, + }]; + let data = b"ins data".to_vec(); + let vm_addr = MM_INPUT_START; + let (_mem, region) = MockInstruction { + program_id, + accounts: accounts.clone(), + data: data.clone(), + } + .into_region(vm_addr); + + let config = Config { + aligned_memory_mapping: false, + ..Config::default() + }; + let memory_mapping = MemoryMapping::new(vec![region], &config, SBPFVersion::V3).unwrap(); + + let ins = translate_instruction_rust( + vm_addr, + &memory_mapping, + &mut invoke_context, + true, // check_aligned + ) + .unwrap(); + assert_eq!(ins.program_id, program_id); + assert_eq!(ins.accounts, accounts); + assert_eq!(ins.data, data); + } + + #[test] + fn test_translate_signers() { + let transaction_accounts = + transaction_with_one_writable_instruction_account(b"foo".to_vec()); + mock_invoke_context!( + invoke_context, + transaction_context, + b"instruction data", + transaction_accounts, + 0, + &[1] + ); + + let program_id = Pubkey::new_unique(); + let (derived_key, bump_seed) = Pubkey::find_program_address(&[b"foo"], &program_id); + + let vm_addr = MM_INPUT_START; + let (_mem, region) = mock_signers(&[b"foo", &[bump_seed]], vm_addr); + + let config = Config { + aligned_memory_mapping: false, + ..Config::default() + }; + let memory_mapping = MemoryMapping::new(vec![region], &config, SBPFVersion::V3).unwrap(); + + let signers = translate_signers_rust( + &program_id, + vm_addr, + 1, + &memory_mapping, + true, // check_aligned + ) + .unwrap(); + assert_eq!(signers[0], derived_key); + } + + #[test] + fn test_translate_accounts_rust() { + let transaction_accounts = + transaction_with_one_writable_instruction_account(b"foobar".to_vec()); + let account = transaction_accounts[1].1.clone(); + let key = transaction_accounts[1].0; + let original_data_len = account.data().len(); + + let vm_addr = MM_INPUT_START; + let (_mem, region, account_metadata) = + MockAccountInfo::new(key, &account).into_region(vm_addr); + + let config = Config { + aligned_memory_mapping: false, + ..Config::default() + }; + let memory_mapping = MemoryMapping::new(vec![region], &config, SBPFVersion::V3).unwrap(); + + mock_invoke_context!( + invoke_context, + transaction_context, + b"instruction data", + transaction_accounts, + 0, + &[1, 1] + ); + + invoke_context + .set_syscall_context(SyscallContext { + allocator: BpfAllocator::new(solana_program_entrypoint::HEAP_LENGTH as u64), + accounts_metadata: vec![account_metadata], + trace_log: Vec::new(), + }) + .unwrap(); + + invoke_context + .transaction_context + .configure_next_instruction_for_tests( + 0, + vec![ + InstructionAccount::new(1, false, true), + InstructionAccount::new(1, false, true), + ], + &[], + ) + .unwrap(); + let accounts = translate_accounts_rust( + vm_addr, + 1, + &memory_mapping, + &mut invoke_context, + true, // check_aligned + ) + .unwrap(); + assert_eq!(accounts.len(), 1); + let caller_account = &accounts[0].caller_account; + assert_eq!(caller_account.serialized_data, account.data()); + assert_eq!(caller_account.original_data_len, original_data_len); + } + + #[test] + fn test_caller_account_from_account_info() { + let transaction_accounts = + transaction_with_one_writable_instruction_account(b"foo".to_vec()); + let account = transaction_accounts[1].1.clone(); + mock_invoke_context!( + invoke_context, + transaction_context, + b"instruction data", + transaction_accounts, + 0, + &[1] + ); + + let key = Pubkey::new_unique(); + let vm_addr = MM_INPUT_START; + let (_mem, region, account_metadata) = + MockAccountInfo::new(key, &account).into_region(vm_addr); + + let config = Config { + aligned_memory_mapping: false, + ..Config::default() + }; + let memory_mapping = MemoryMapping::new(vec![region], &config, SBPFVersion::V3).unwrap(); + + let account_info = translate_type::(&memory_mapping, vm_addr, false).unwrap(); + + let caller_account = CallerAccount::from_account_info( + &invoke_context, + &memory_mapping, + true, // check_aligned + vm_addr, + account_info, + &account_metadata, + ) + .unwrap(); + assert_eq!(*caller_account.lamports, account.lamports()); + assert_eq!(caller_account.owner, account.owner()); + assert_eq!(caller_account.original_data_len, account.data().len()); + assert_eq!( + *caller_account.ref_to_len_in_vm as usize, + account.data().len() + ); + assert_eq!(caller_account.serialized_data, account.data()); + } + + #[test_matrix([false, true])] + fn test_update_caller_account_lamports_owner(stricter_abi_and_runtime_constraints: bool) { + let transaction_accounts = transaction_with_one_writable_instruction_account(vec![]); + let account = transaction_accounts[1].1.clone(); + mock_invoke_context!( + invoke_context, + transaction_context, + b"instruction data", + transaction_accounts, + 0, + &[1] + ); + + let mut mock_caller_account = + MockCallerAccount::new(1234, *account.owner(), account.data(), false); + + let config = Config { + aligned_memory_mapping: false, + ..Config::default() + }; + let memory_mapping = MemoryMapping::new( + mock_caller_account.regions.split_off(0), + &config, + SBPFVersion::V3, + ) + .unwrap(); + + let mut caller_account = mock_caller_account.caller_account(); + let instruction_context = invoke_context + .transaction_context + .get_current_instruction_context() + .unwrap(); + let mut callee_account = instruction_context + .try_borrow_instruction_account(0) + .unwrap(); + callee_account.set_lamports(42).unwrap(); + callee_account + .set_owner(Pubkey::new_unique().as_ref()) + .unwrap(); + + update_caller_account( + &invoke_context, + &memory_mapping, + true, // check_aligned + &mut caller_account, + &mut callee_account, + stricter_abi_and_runtime_constraints, + stricter_abi_and_runtime_constraints, + ) + .unwrap(); + + assert_eq!(*caller_account.lamports, 42); + assert_eq!(caller_account.owner, callee_account.get_owner()); + } + + #[test] + fn test_update_caller_account_data() { + let transaction_accounts = + transaction_with_one_writable_instruction_account(b"foobar".to_vec()); + let account = transaction_accounts[1].1.clone(); + let original_data_len = account.data().len(); + + mock_invoke_context!( + invoke_context, + transaction_context, + b"instruction data", + transaction_accounts, + 0, + &[1] + ); + + let mut mock_caller_account = + MockCallerAccount::new(account.lamports(), *account.owner(), account.data(), false); + + let config = Config { + aligned_memory_mapping: false, + ..Config::default() + }; + let memory_mapping = MemoryMapping::new( + mock_caller_account.regions.clone(), + &config, + SBPFVersion::V3, + ) + .unwrap(); + + let data_slice = mock_caller_account.data_slice(); + let len_ptr = unsafe { + data_slice + .as_ptr() + .offset(-(mem::size_of::() as isize)) + }; + let serialized_len = || unsafe { *len_ptr.cast::() as usize }; + let mut caller_account = mock_caller_account.caller_account(); + let instruction_context = invoke_context + .transaction_context + .get_current_instruction_context() + .unwrap(); + let mut callee_account = instruction_context + .try_borrow_instruction_account(0) + .unwrap(); + + for (new_value, expected_realloc_size) in [ + (b"foo".to_vec(), MAX_PERMITTED_DATA_INCREASE + 3), + (b"foobaz".to_vec(), MAX_PERMITTED_DATA_INCREASE), + (b"foobazbad".to_vec(), MAX_PERMITTED_DATA_INCREASE - 3), + ] { + assert_eq!(caller_account.serialized_data, callee_account.get_data()); + callee_account.set_data_from_slice(&new_value).unwrap(); + + update_caller_account( + &invoke_context, + &memory_mapping, + true, // check_aligned + &mut caller_account, + &mut callee_account, + false, + false, + ) + .unwrap(); + + let data_len = callee_account.get_data().len(); + assert_eq!(data_len, *caller_account.ref_to_len_in_vm as usize); + assert_eq!(data_len, serialized_len()); + assert_eq!(data_len, caller_account.serialized_data.len()); + assert_eq!( + callee_account.get_data(), + &caller_account.serialized_data[..data_len] + ); + assert_eq!(data_slice[data_len..].len(), expected_realloc_size); + assert!(is_zeroed(&data_slice[data_len..])); + } + + callee_account + .set_data_length(original_data_len + MAX_PERMITTED_DATA_INCREASE) + .unwrap(); + update_caller_account( + &invoke_context, + &memory_mapping, + true, // check_aligned + &mut caller_account, + &mut callee_account, + false, + false, + ) + .unwrap(); + let data_len = callee_account.get_data().len(); + assert_eq!(data_slice[data_len..].len(), 0); + assert!(is_zeroed(&data_slice[data_len..])); + + callee_account + .set_data_length(original_data_len + MAX_PERMITTED_DATA_INCREASE + 1) + .unwrap(); + assert_matches!( + update_caller_account( + &invoke_context, + &memory_mapping, + true, // check_aligned + &mut caller_account, + &mut callee_account, + false, + false, + ), + Err(error) if error.downcast_ref::().unwrap() == &InstructionError::InvalidRealloc + ); + + // close the account + callee_account.set_data_length(0).unwrap(); + callee_account + .set_owner(system_program::id().as_ref()) + .unwrap(); + update_caller_account( + &invoke_context, + &memory_mapping, + true, // check_aligned + &mut caller_account, + &mut callee_account, + false, + false, + ) + .unwrap(); + let data_len = callee_account.get_data().len(); + assert_eq!(data_len, 0); + } + + #[test_matrix([false, true])] + fn test_update_callee_account_lamports_owner(stricter_abi_and_runtime_constraints: bool) { + let transaction_accounts = transaction_with_one_writable_instruction_account(vec![]); + let account = transaction_accounts[1].1.clone(); + + mock_invoke_context!( + invoke_context, + transaction_context, + b"instruction data", + transaction_accounts, + 0, + &[1] + ); + + let mut mock_caller_account = + MockCallerAccount::new(1234, *account.owner(), account.data(), false); + + let caller_account = mock_caller_account.caller_account(); + + borrow_instruction_account!(callee_account, invoke_context, 0); + + *caller_account.lamports = 42; + *caller_account.owner = Pubkey::new_unique(); + + update_callee_account( + true, // check_aligned + &caller_account, + callee_account, + stricter_abi_and_runtime_constraints, + true, // account_data_direct_mapping + ) + .unwrap(); + + borrow_instruction_account!(callee_account, invoke_context, 0); + assert_eq!(callee_account.get_lamports(), 42); + assert_eq!(caller_account.owner, callee_account.get_owner()); + } + + #[test_matrix([false, true])] + fn test_update_callee_account_data_writable(stricter_abi_and_runtime_constraints: bool) { + let transaction_accounts = + transaction_with_one_writable_instruction_account(b"foobar".to_vec()); + let account = transaction_accounts[1].1.clone(); + + mock_invoke_context!( + invoke_context, + transaction_context, + b"instruction data", + transaction_accounts, + 0, + &[1] + ); + + let mut mock_caller_account = + MockCallerAccount::new(1234, *account.owner(), account.data(), false); + + let mut caller_account = mock_caller_account.caller_account(); + borrow_instruction_account!(callee_account, invoke_context, 0); + + // stricter_abi_and_runtime_constraints does not copy data in update_callee_account() + caller_account.serialized_data[0] = b'b'; + update_callee_account( + true, // check_aligned + &caller_account, + callee_account, + false, // stricter_abi_and_runtime_constraints + false, // account_data_direct_mapping + ) + .unwrap(); + borrow_instruction_account!(callee_account, invoke_context, 0); + assert_eq!(callee_account.get_data(), b"boobar"); + + // growing resize + let mut data = b"foobarbaz".to_vec(); + *caller_account.ref_to_len_in_vm = data.len() as u64; + caller_account.serialized_data = &mut data; + assert_eq!( + update_callee_account( + true, // check_aligned + &caller_account, + callee_account, + stricter_abi_and_runtime_constraints, + true, // account_data_direct_mapping + ) + .unwrap(), + stricter_abi_and_runtime_constraints, + ); + + // truncating resize + let mut data = b"baz".to_vec(); + *caller_account.ref_to_len_in_vm = data.len() as u64; + caller_account.serialized_data = &mut data; + borrow_instruction_account!(callee_account, invoke_context, 0); + assert_eq!( + update_callee_account( + true, // check_aligned + &caller_account, + callee_account, + stricter_abi_and_runtime_constraints, + true, // account_data_direct_mapping + ) + .unwrap(), + stricter_abi_and_runtime_constraints, + ); + + // close the account + let mut data = Vec::new(); + caller_account.serialized_data = &mut data; + *caller_account.ref_to_len_in_vm = 0; + let mut owner = system_program::id(); + caller_account.owner = &mut owner; + borrow_instruction_account!(callee_account, invoke_context, 0); + update_callee_account( + true, // check_aligned + &caller_account, + callee_account, + stricter_abi_and_runtime_constraints, + true, // account_data_direct_mapping + ) + .unwrap(); + borrow_instruction_account!(callee_account, invoke_context, 0); + assert_eq!(callee_account.get_data(), b""); + + // growing beyond address_space_reserved_for_account + *caller_account.ref_to_len_in_vm = (7 + MAX_PERMITTED_DATA_INCREASE) as u64; + let result = update_callee_account( + true, // check_aligned + &caller_account, + callee_account, + stricter_abi_and_runtime_constraints, + true, // account_data_direct_mapping + ); + if stricter_abi_and_runtime_constraints { + assert_matches!( + result, + Err(error) if error.downcast_ref::().unwrap() == &InstructionError::InvalidRealloc + ); + } else { + result.unwrap(); + } + } + + #[test_matrix([false, true])] + fn test_update_callee_account_data_readonly(stricter_abi_and_runtime_constraints: bool) { + let transaction_accounts = + transaction_with_one_readonly_instruction_account(b"foobar".to_vec()); + let account = transaction_accounts[1].1.clone(); + + mock_invoke_context!( + invoke_context, + transaction_context, + b"instruction data", + transaction_accounts, + 0, + &[1] + ); + + let mut mock_caller_account = + MockCallerAccount::new(1234, *account.owner(), account.data(), false); + let mut caller_account = mock_caller_account.caller_account(); + borrow_instruction_account!(callee_account, invoke_context, 0); + + // stricter_abi_and_runtime_constraints does not copy data in update_callee_account() + caller_account.serialized_data[0] = b'b'; + assert_matches!( + update_callee_account( + true, // check_aligned + &caller_account, + callee_account, + false, // stricter_abi_and_runtime_constraints + false, // account_data_direct_mapping + ), + Err(error) if error.downcast_ref::().unwrap() == &InstructionError::ExternalAccountDataModified + ); + + // growing resize + let mut data = b"foobarbaz".to_vec(); + *caller_account.ref_to_len_in_vm = data.len() as u64; + caller_account.serialized_data = &mut data; + borrow_instruction_account!(callee_account, invoke_context, 0); + assert_matches!( + update_callee_account( + true, // check_aligned + &caller_account, + callee_account, + stricter_abi_and_runtime_constraints, + true, // account_data_direct_mapping + ), + Err(error) if error.downcast_ref::().unwrap() == &InstructionError::AccountDataSizeChanged + ); + + // truncating resize + let mut data = b"baz".to_vec(); + *caller_account.ref_to_len_in_vm = data.len() as u64; + caller_account.serialized_data = &mut data; + borrow_instruction_account!(callee_account, invoke_context, 0); + assert_matches!( + update_callee_account( + true, // check_aligned + &caller_account, + callee_account, + stricter_abi_and_runtime_constraints, + true, // account_data_direct_mapping + ), + Err(error) if error.downcast_ref::().unwrap() == &InstructionError::AccountDataSizeChanged + ); + } +} diff --git a/program-runtime/src/execution_budget.rs b/program-runtime/src/execution_budget.rs index 9884127c19e678..fff0a2b65b27af 100644 --- a/program-runtime/src/execution_budget.rs +++ b/program-runtime/src/execution_budget.rs @@ -5,12 +5,12 @@ use { /// Max instruction stack depth. This is the maximum nesting of instructions that can happen during /// a transaction. pub const MAX_INSTRUCTION_STACK_DEPTH: usize = 5; -/// Max instruction stack depth with SIMD-0296 enabled. Allows 8 nested CPIs. -pub const MAX_INSTRUCTION_STACK_DEPTH_SIMD_0296: usize = 9; +/// Max instruction stack depth with SIMD-0268 enabled. Allows 8 nested CPIs. +pub const MAX_INSTRUCTION_STACK_DEPTH_SIMD_0268: usize = 9; -fn get_max_instruction_stack_depth(simd_0296_active: bool) -> usize { - if simd_0296_active { - MAX_INSTRUCTION_STACK_DEPTH_SIMD_0296 +fn get_max_instruction_stack_depth(simd_0268_active: bool) -> usize { + if simd_0268_active { + MAX_INSTRUCTION_STACK_DEPTH_SIMD_0268 } else { MAX_INSTRUCTION_STACK_DEPTH } @@ -58,8 +58,6 @@ pub struct SVMTransactionExecutionBudget { pub max_call_depth: usize, /// Size of a stack frame in bytes, must match the size specified in the LLVM SBF backend pub stack_frame_size: usize, - /// Maximum cross-program invocation instruction size - pub max_cpi_instruction_size: usize, /// program heap region size, default: solana_program_entrypoint::HEAP_LENGTH pub heap_size: u32, } @@ -67,20 +65,19 @@ pub struct SVMTransactionExecutionBudget { #[cfg(feature = "dev-context-only-utils")] impl Default for SVMTransactionExecutionBudget { fn default() -> Self { - Self::new_with_defaults(/* simd_0296_active */ false) + Self::new_with_defaults(/* simd_0268_active */ false) } } impl SVMTransactionExecutionBudget { - pub fn new_with_defaults(simd_0296_active: bool) -> Self { + pub fn new_with_defaults(simd_0268_active: bool) -> Self { SVMTransactionExecutionBudget { compute_unit_limit: u64::from(MAX_COMPUTE_UNIT_LIMIT), - max_instruction_stack_depth: get_max_instruction_stack_depth(simd_0296_active), + max_instruction_stack_depth: get_max_instruction_stack_depth(simd_0268_active), max_instruction_trace_length: 64, sha256_max_slices: 20_000, max_call_depth: MAX_CALL_DEPTH, stack_frame_size: STACK_FRAME_SIZE, - max_cpi_instruction_size: 1280, // IPv6 Min MTU size heap_size: u32::try_from(solana_program_entrypoint::HEAP_LENGTH).unwrap(), } } diff --git a/program-runtime/src/invoke_context.rs b/program-runtime/src/invoke_context.rs index 22a02d58d48a32..13d935887ed84a 100644 --- a/program-runtime/src/invoke_context.rs +++ b/program-runtime/src/invoke_context.rs @@ -13,8 +13,6 @@ use { solana_epoch_schedule::EpochSchedule, solana_hash::Hash, solana_instruction::{error::InstructionError, AccountMeta, Instruction}, - solana_log_collector::{ic_msg, LogCollector}, - solana_measure::measure::Measure, solana_pubkey::Pubkey, solana_sbpf::{ ebpf::MM_HEAP_START, @@ -28,12 +26,15 @@ use { }, solana_svm_callback::InvokeContextCallback, solana_svm_feature_set::SVMFeatureSet, + solana_svm_log_collector::{ic_msg, LogCollector}, + solana_svm_measure::measure::Measure, + solana_svm_timings::{ExecuteDetailsTimings, ExecuteTimings}, solana_svm_transaction::{instruction::SVMInstruction, svm_message::SVMMessage}, - solana_timings::{ExecuteDetailsTimings, ExecuteTimings}, + solana_svm_type_overrides::sync::Arc, solana_transaction_context::{ - IndexOfAccount, InstructionAccount, TransactionAccount, TransactionContext, + transaction_accounts::TransactionAccount, IndexOfAccount, InstructionAccount, + TransactionContext, MAX_ACCOUNTS_PER_TRANSACTION, }, - solana_type_overrides::sync::{atomic::Ordering, Arc}, std::{ alloc::Layout, cell::RefCell, @@ -202,8 +203,6 @@ pub struct InvokeContext<'a> { pub timings: ExecuteDetailsTimings, pub syscall_context: Vec>, traces: Vec>, - /// Stops copying account data if stricter_abi_and_runtime_constraints is enabled - pub account_data_direct_mapping: bool, } impl<'a> InvokeContext<'a> { @@ -228,7 +227,6 @@ impl<'a> InvokeContext<'a> { timings: ExecuteDetailsTimings::default(), syscall_context: Vec::new(), traces: Vec::new(), - account_data_direct_mapping: false, } } @@ -251,33 +249,22 @@ impl<'a> InvokeContext<'a> { self.transaction_context.get_instruction_trace_length(), )?; let program_id = instruction_context - .get_last_program_key(self.transaction_context) + .get_program_key() .map_err(|_| InstructionError::UnsupportedProgramId)?; - if self - .transaction_context - .get_instruction_context_stack_height() - != 0 - { - let contains = (0..self - .transaction_context - .get_instruction_context_stack_height()) - .any(|level| { + if self.transaction_context.get_instruction_stack_height() != 0 { + let contains = + (0..self.transaction_context.get_instruction_stack_height()).any(|level| { self.transaction_context .get_instruction_context_at_nesting_level(level) - .and_then(|instruction_context| { - instruction_context - .try_borrow_last_program_account(self.transaction_context) - }) - .map(|program_account| program_account.get_key() == program_id) + .and_then(|instruction_context| instruction_context.get_program_key()) + .map(|program_key| program_key == program_id) .unwrap_or(false) }); let is_last = self .transaction_context .get_current_instruction_context() - .and_then(|instruction_context| { - instruction_context.try_borrow_last_program_account(self.transaction_context) - }) - .map(|program_account| program_account.get_key() == program_id) + .and_then(|instruction_context| instruction_context.get_program_key()) + .map(|program_key| program_key == program_id) .unwrap_or(false); if contains && !is_last { // Reentrancy not allowed unless caller is calling itself @@ -300,8 +287,7 @@ impl<'a> InvokeContext<'a> { /// Current height of the invocation stack, top level instructions are height /// `solana_instruction::TRANSACTION_LEVEL_STACK_HEIGHT` pub fn get_stack_height(&self) -> usize { - self.transaction_context - .get_instruction_context_stack_height() + self.transaction_context.get_instruction_stack_height() } /// Entrypoint for a cross-program invocation from a builtin program @@ -326,7 +312,7 @@ impl<'a> InvokeContext<'a> { // We reference accounts by an u8 index, so we have a total of 256 accounts. // This algorithm allocates the array on the stack for speed. // On AArch64 in release mode, this function only consumes 640 bytes of stack. - let mut transaction_callee_map: [u8; 256] = [u8::MAX; 256]; + let mut transaction_callee_map: Vec = vec![u8::MAX; MAX_ACCOUNTS_PER_TRANSACTION]; let mut instruction_accounts: Vec = Vec::with_capacity(instruction.accounts.len()); @@ -335,10 +321,9 @@ impl<'a> InvokeContext<'a> { // function, we must borrow it again as mutable. let program_account_index = { let instruction_context = self.transaction_context.get_current_instruction_context()?; - debug_assert!(instruction.accounts.len() <= u8::MAX as usize); + debug_assert!(instruction.accounts.len() <= transaction_callee_map.len()); - for (instruction_account_index, account_meta) in instruction.accounts.iter().enumerate() - { + for account_meta in instruction.accounts.iter() { let index_in_transaction = self .transaction_context .find_index_of_account(&account_meta.pubkey) @@ -360,21 +345,20 @@ impl<'a> InvokeContext<'a> { let cloned_account = { let instruction_account = instruction_accounts .get_mut(*index_in_callee as usize) - .ok_or(InstructionError::NotEnoughAccountKeys)?; + .ok_or(InstructionError::MissingAccount)?; instruction_account.set_is_signer( instruction_account.is_signer() || account_meta.is_signer, ); instruction_account.set_is_writable( instruction_account.is_writable() || account_meta.is_writable, ); - instruction_account.clone() + *instruction_account }; instruction_accounts.push(cloned_account); } else { *index_in_callee = instruction_accounts.len() as u8; instruction_accounts.push(InstructionAccount::new( index_in_transaction, - instruction_account_index as IndexOfAccount, account_meta.is_signer, account_meta.is_writable, )); @@ -383,12 +367,15 @@ impl<'a> InvokeContext<'a> { for current_index in 0..instruction_accounts.len() { let instruction_account = instruction_accounts.get(current_index).unwrap(); + let index_in_callee = *transaction_callee_map + .get(instruction_account.index_in_transaction as usize) + .unwrap() as usize; - if current_index != instruction_account.index_in_callee as usize { + if current_index != index_in_callee { let (is_signer, is_writable) = { let reference_account = instruction_accounts - .get(instruction_account.index_in_callee as usize) - .ok_or(InstructionError::NotEnoughAccountKeys)?; + .get(index_in_callee) + .ok_or(InstructionError::MissingAccount)?; ( reference_account.is_signer(), reference_account.is_writable(), @@ -405,64 +392,59 @@ impl<'a> InvokeContext<'a> { let index_in_caller = instruction_context.get_index_of_account_in_instruction( instruction_account.index_in_transaction, )?; - let borrowed_account = instruction_context - .try_borrow_instruction_account(self.transaction_context, index_in_caller)?; + + // This unwrap is safe because instruction.accounts.len() == instruction_accounts.len() + let account_key = &instruction.accounts.get(current_index).unwrap().pubkey; + // get_index_of_account_in_instruction has already checked if the index is valid. + let caller_instruction_account = instruction_context + .instruction_accounts() + .get(index_in_caller as usize) + .unwrap(); // Readonly in caller cannot become writable in callee - if instruction_account.is_writable() && !borrowed_account.is_writable() { - ic_msg!( - self, - "{}'s writable privilege escalated", - borrowed_account.get_key(), - ); + if instruction_account.is_writable() && !caller_instruction_account.is_writable() { + ic_msg!(self, "{}'s writable privilege escalated", account_key,); return Err(InstructionError::PrivilegeEscalation); } // To be signed in the callee, // it must be either signed in the caller or by the program if instruction_account.is_signer() - && !(borrowed_account.is_signer() - || signers.contains(borrowed_account.get_key())) + && !(caller_instruction_account.is_signer() || signers.contains(account_key)) { - ic_msg!( - self, - "{}'s signer privilege escalated", - borrowed_account.get_key() - ); + ic_msg!(self, "{}'s signer privilege escalated", account_key,); return Err(InstructionError::PrivilegeEscalation); } } // Find and validate executables / program accounts - let callee_program_id = instruction.program_id; - let program_account_index = instruction_context - .find_index_of_instruction_account(self.transaction_context, &callee_program_id) - .ok_or_else(|| { - ic_msg!(self, "Unknown program {}", callee_program_id); - InstructionError::MissingAccount - })?; - let borrowed_program_account = instruction_context - .try_borrow_instruction_account(self.transaction_context, program_account_index)?; - #[allow(deprecated)] - if !self - .get_feature_set() - .remove_accounts_executable_flag_checks - && !borrowed_program_account.is_executable() + let callee_program_id = &instruction.program_id; + let program_account_index_in_transaction = self + .transaction_context + .find_index_of_account(callee_program_id); + let program_account_index_in_instruction = program_account_index_in_transaction + .map(|index| instruction_context.get_index_of_account_in_instruction(index)); + + // We first check if the account exists in the transaction, and then see if it is part + // of the instruction. + if program_account_index_in_instruction.is_none() + || program_account_index_in_instruction.unwrap().is_err() { - ic_msg!(self, "Account {} is not executable", callee_program_id); - return Err(InstructionError::AccountNotExecutable); + ic_msg!(self, "Unknown program {}", callee_program_id); + return Err(InstructionError::MissingAccount); } - borrowed_program_account.get_index_in_transaction() + // SAFETY: This unwrap is safe, because we checked the index in instruction in the + // previous if-condition. + program_account_index_in_transaction.unwrap() }; - self.transaction_context - .get_next_instruction_context_mut()? - .configure( - vec![program_account_index], - instruction_accounts, - &instruction.data, - ); + self.transaction_context.configure_next_instruction( + program_account_index, + instruction_accounts, + transaction_callee_map, + &instruction.data, + )?; Ok(()) } @@ -472,14 +454,14 @@ impl<'a> InvokeContext<'a> { &mut self, message: &impl SVMMessage, instruction: &SVMInstruction, - program_indices: Vec, + program_account_index: IndexOfAccount, ) -> Result<(), InstructionError> { // We reference accounts by an u8 index, so we have a total of 256 accounts. // This algorithm allocates the array on the stack for speed. // On AArch64 in release mode, this function only consumes 464 bytes of stack (when it is // not inlined). - let mut transaction_callee_map: [u8; 256] = [u8::MAX; 256]; - debug_assert!(instruction.accounts.len() <= u8::MAX as usize); + let mut transaction_callee_map: Vec = vec![u8::MAX; MAX_ACCOUNTS_PER_TRANSACTION]; + debug_assert!(instruction.accounts.len() <= transaction_callee_map.len()); let mut instruction_accounts: Vec = Vec::with_capacity(instruction.accounts.len()); @@ -497,15 +479,17 @@ impl<'a> InvokeContext<'a> { let index_in_transaction = *index_in_transaction as usize; instruction_accounts.push(InstructionAccount::new( index_in_transaction as IndexOfAccount, - *index_in_callee as IndexOfAccount, message.is_signer(index_in_transaction), message.is_writable(index_in_transaction), )); } - self.transaction_context - .get_next_instruction_context_mut()? - .configure(program_indices, instruction_accounts, instruction.data); + self.transaction_context.configure_next_instruction( + program_account_index, + instruction_accounts, + transaction_callee_map, + instruction.data, + )?; Ok(()) } @@ -549,28 +533,17 @@ impl<'a> InvokeContext<'a> { let process_executable_chain_time = Measure::start("process_executable_chain_time"); let builtin_id = { - debug_assert!(instruction_context.get_number_of_program_accounts() <= 1); - let borrowed_root_account = instruction_context - .try_borrow_program_account(self.transaction_context, 0) - .map_err(|_| InstructionError::UnsupportedProgramId)?; - let owner_id = borrowed_root_account.get_owner(); - if native_loader::check_id(owner_id) { - *borrowed_root_account.get_key() - } else if self - .get_feature_set() - .remove_accounts_executable_flag_checks + let owner_id = instruction_context.get_program_owner()?; + if native_loader::check_id(&owner_id) { + *instruction_context.get_program_key()? + } else if bpf_loader_deprecated::check_id(&owner_id) + || bpf_loader::check_id(&owner_id) + || bpf_loader_upgradeable::check_id(&owner_id) + || loader_v4::check_id(&owner_id) { - if bpf_loader_deprecated::check_id(owner_id) - || bpf_loader::check_id(owner_id) - || bpf_loader_upgradeable::check_id(owner_id) - || loader_v4::check_id(owner_id) - { - *owner_id - } else { - return Err(InstructionError::UnsupportedProgramId); - } + owner_id } else { - *owner_id + return Err(InstructionError::UnsupportedProgramId); } }; @@ -588,9 +561,8 @@ impl<'a> InvokeContext<'a> { _ => None, } .ok_or(InstructionError::UnsupportedProgramId)?; - entry.ix_usage_counter.fetch_add(1, Ordering::Relaxed); - let program_id = *instruction_context.get_last_program_key(self.transaction_context)?; + let program_id = *instruction_context.get_program_key()?; self.transaction_context .set_return_data(program_id, Vec::new())?; let logger = self.get_log_collector(); @@ -729,12 +701,11 @@ impl<'a> InvokeContext<'a> { self.transaction_context .get_current_instruction_context() .and_then(|instruction_context| { - let program_account = - instruction_context.try_borrow_last_program_account(self.transaction_context); - debug_assert!(program_account.is_ok()); - program_account + let owner_id = instruction_context.get_program_owner(); + debug_assert!(owner_id.is_ok()); + owner_id }) - .map(|program_account| *program_account.get_owner() != bpf_loader_deprecated::id()) + .map(|owner_key| owner_key != bpf_loader_deprecated::id()) .unwrap_or(true) } @@ -781,8 +752,8 @@ macro_rules! with_mock_invoke_context_with_feature_set { $transaction_accounts:expr $(,)? ) => { use { - solana_log_collector::LogCollector, solana_svm_callback::InvokeContextCallback, + solana_svm_log_collector::LogCollector, $crate::{ __private::{Hash, ReadableAccount, Rent, TransactionContext}, execution_budget::{SVMTransactionExecutionBudget, SVMTransactionExecutionCost}, @@ -865,7 +836,7 @@ pub fn mock_process_instruction_with_feature_set< G: FnMut(&mut InvokeContext), >( loader_id: &Pubkey, - mut program_indices: Vec, + program_index: Option, instruction_data: &[u8], mut transaction_accounts: Vec, instruction_account_metas: Vec, @@ -877,32 +848,26 @@ pub fn mock_process_instruction_with_feature_set< ) -> Vec { let mut instruction_accounts: Vec = Vec::with_capacity(instruction_account_metas.len()); - for (instruction_account_index, account_meta) in instruction_account_metas.iter().enumerate() { + for account_meta in instruction_account_metas.iter() { let index_in_transaction = transaction_accounts .iter() .position(|(key, _account)| *key == account_meta.pubkey) .unwrap_or(transaction_accounts.len()) as IndexOfAccount; - let index_in_callee = instruction_accounts - .get(0..instruction_account_index) - .unwrap() - .iter() - .position(|instruction_account| { - instruction_account.index_in_transaction == index_in_transaction - }) - .unwrap_or(instruction_account_index) as IndexOfAccount; instruction_accounts.push(InstructionAccount::new( index_in_transaction, - index_in_callee, account_meta.is_signer, account_meta.is_writable, )); } - if program_indices.is_empty() { - program_indices.insert(0, transaction_accounts.len() as IndexOfAccount); + + let program_index = if let Some(index) = program_index { + index + } else { let processor_account = AccountSharedData::new(0, 0, &native_loader::id()); transaction_accounts.push((*loader_id, processor_account)); - } + transaction_accounts.len().saturating_sub(1) as IndexOfAccount + }; let pop_epoch_schedule_account = if !transaction_accounts .iter() .any(|(key, _)| *key == sysvar::epoch_schedule::id()) @@ -930,9 +895,8 @@ pub fn mock_process_instruction_with_feature_set< pre_adjustments(&mut invoke_context); invoke_context .transaction_context - .get_next_instruction_context_mut() - .unwrap() - .configure(program_indices, instruction_accounts, instruction_data); + .configure_next_instruction_for_tests(program_index, instruction_accounts, instruction_data) + .unwrap(); let result = invoke_context.process_instruction(&mut 0, &mut ExecuteTimings::default()); assert_eq!(result, expected_result); post_adjustments(&mut invoke_context); @@ -946,7 +910,7 @@ pub fn mock_process_instruction_with_feature_set< pub fn mock_process_instruction( loader_id: &Pubkey, - program_indices: Vec, + program_index: Option, instruction_data: &[u8], transaction_accounts: Vec, instruction_account_metas: Vec, @@ -957,7 +921,7 @@ pub fn mock_process_instruction Vec { mock_process_instruction_with_feature_set( loader_id, - program_indices, + program_index, instruction_data, transaction_accounts, instruction_account_metas, @@ -977,7 +941,12 @@ mod tests { serde::{Deserialize, Serialize}, solana_account::WritableAccount, solana_instruction::Instruction, + solana_keypair::Keypair, solana_rent::Rent, + solana_signer::Signer, + solana_transaction::{sanitized::SanitizedTransaction, Transaction}, + solana_transaction_context::MAX_ACCOUNTS_PER_INSTRUCTION, + std::collections::HashSet, test_case::test_case, }; @@ -1008,30 +977,23 @@ mod tests { let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; let instruction_data = instruction_context.get_instruction_data(); - let program_id = instruction_context.get_last_program_key(transaction_context)?; + let program_id = instruction_context.get_program_key()?; let instruction_accounts = (0..4) .map(|instruction_account_index| { - InstructionAccount::new( - instruction_account_index, - instruction_account_index, - false, - false, - ) + InstructionAccount::new(instruction_account_index, false, false) }) .collect::>(); assert_eq!( program_id, instruction_context - .try_borrow_instruction_account(transaction_context, 0)? + .try_borrow_instruction_account(0)? .get_owner() ); assert_ne!( instruction_context - .try_borrow_instruction_account(transaction_context, 1)? + .try_borrow_instruction_account(1)? .get_owner(), - instruction_context - .try_borrow_instruction_account(transaction_context, 0)? - .get_key() + instruction_context.get_key_of_instruction_account(0)? ); if let Ok(instruction) = bincode::deserialize(instruction_data) { @@ -1039,17 +1001,17 @@ mod tests { MockInstruction::NoopSuccess => (), MockInstruction::NoopFail => return Err(InstructionError::GenericError), MockInstruction::ModifyOwned => instruction_context - .try_borrow_instruction_account(transaction_context, 0)? + .try_borrow_instruction_account(0)? .set_data_from_slice(&[1])?, MockInstruction::ModifyNotOwned => instruction_context - .try_borrow_instruction_account(transaction_context, 1)? + .try_borrow_instruction_account(1)? .set_data_from_slice(&[1])?, MockInstruction::ModifyReadonly => instruction_context - .try_borrow_instruction_account(transaction_context, 2)? + .try_borrow_instruction_account(2)? .set_data_from_slice(&[1])?, MockInstruction::UnbalancedPush => { instruction_context - .try_borrow_instruction_account(transaction_context, 0)? + .try_borrow_instruction_account(0)? .checked_add_lamports(1)?; let program_id = *transaction_context.get_key_of_account_at_index(3)?; let metas = vec![ @@ -1069,9 +1031,8 @@ mod tests { ); invoke_context .transaction_context - .get_next_instruction_context_mut() - .unwrap() - .configure(vec![3], instruction_accounts, &[]); + .configure_next_instruction_for_tests(3, instruction_accounts, &[]) + .unwrap(); let result = invoke_context.push(); assert_eq!(result, Err(InstructionError::UnbalancedInstruction)); result?; @@ -1080,7 +1041,7 @@ mod tests { .and(invoke_context.pop())?; } MockInstruction::UnbalancedPop => instruction_context - .try_borrow_instruction_account(transaction_context, 0)? + .try_borrow_instruction_account(0)? .checked_add_lamports(1)?, MockInstruction::ConsumeComputeUnits { compute_units_to_consume, @@ -1092,8 +1053,8 @@ mod tests { return desired_result; } MockInstruction::Resize { new_len } => instruction_context - .try_borrow_instruction_account(transaction_context, 0)? - .set_data(vec![0; new_len as usize])?, + .try_borrow_instruction_account(0)? + .set_data_from_slice(&vec![0; new_len as usize])?, } } else { return Err(InstructionError::InvalidInstructionData); @@ -1102,11 +1063,11 @@ mod tests { } ); - #[test_case(false; "SIMD-0296 disabled")] - #[test_case(true; "SIMD-0296 enabled")] - fn test_instruction_stack_height(simd_0296_active: bool) { + #[test_case(false; "SIMD-0268 disabled")] + #[test_case(true; "SIMD-0268 enabled")] + fn test_instruction_stack_height(simd_0268_active: bool) { let one_more_than_max_depth = - SVMTransactionExecutionBudget::new_with_defaults(simd_0296_active) + SVMTransactionExecutionBudget::new_with_defaults(simd_0268_active) .max_instruction_stack_depth .saturating_add(1); let mut invoke_stack = vec![]; @@ -1120,7 +1081,6 @@ mod tests { )); instruction_accounts.push(InstructionAccount::new( index as IndexOfAccount, - instruction_accounts.len() as IndexOfAccount, false, true, )); @@ -1131,7 +1091,6 @@ mod tests { AccountSharedData::new(1, 1, &solana_pubkey::Pubkey::default()), )); instruction_accounts.push(InstructionAccount::new( - index as IndexOfAccount, index as IndexOfAccount, false, false, @@ -1144,13 +1103,12 @@ mod tests { for _ in 0..invoke_stack.len() { invoke_context .transaction_context - .get_next_instruction_context_mut() - .unwrap() - .configure( - vec![one_more_than_max_depth.saturating_add(depth_reached) as IndexOfAccount], + .configure_next_instruction_for_tests( + one_more_than_max_depth.saturating_add(depth_reached) as IndexOfAccount, instruction_accounts.clone(), &[], - ); + ) + .unwrap(); if Err(InstructionError::CallDepth) == invoke_context.push() { break; } @@ -1163,10 +1121,24 @@ mod tests { #[test] fn test_max_instruction_trace_length() { const MAX_INSTRUCTIONS: usize = 8; - let mut transaction_context = - TransactionContext::new(Vec::new(), Rent::default(), 1, MAX_INSTRUCTIONS); + let mut transaction_context = TransactionContext::new( + vec![( + Pubkey::new_unique(), + AccountSharedData::new(1, 1, &Pubkey::new_unique()), + )], + Rent::default(), + 1, + MAX_INSTRUCTIONS, + ); for _ in 0..MAX_INSTRUCTIONS { transaction_context.push().unwrap(); + transaction_context + .configure_next_instruction_for_tests( + 0, + vec![InstructionAccount::new(0, false, false)], + &[], + ) + .unwrap(); transaction_context.pop().unwrap(); } assert_eq!( @@ -1208,7 +1180,6 @@ mod tests { let instruction_accounts = (0..4) .map(|instruction_account_index| { InstructionAccount::new( - instruction_account_index, instruction_account_index, false, instruction_account_index < 2, @@ -1226,9 +1197,8 @@ mod tests { // Account modification tests invoke_context .transaction_context - .get_next_instruction_context_mut() - .unwrap() - .configure(vec![4], instruction_accounts, &[]); + .configure_next_instruction_for_tests(4, instruction_accounts, &[]) + .unwrap(); invoke_context.push().unwrap(); let inner_instruction = Instruction::new_with_bincode(callee_program_id, &instruction, metas.clone()); @@ -1265,7 +1235,6 @@ mod tests { let instruction_accounts = (0..4) .map(|instruction_account_index| { InstructionAccount::new( - instruction_account_index, instruction_account_index, false, instruction_account_index < 2, @@ -1284,9 +1253,8 @@ mod tests { let compute_units_to_consume = 10; invoke_context .transaction_context - .get_next_instruction_context_mut() - .unwrap() - .configure(vec![4], instruction_accounts, &[]); + .configure_next_instruction_for_tests(4, instruction_accounts, &[]) + .unwrap(); invoke_context.push().unwrap(); let inner_instruction = Instruction::new_with_bincode( callee_program_id, @@ -1330,9 +1298,8 @@ mod tests { invoke_context .transaction_context - .get_next_instruction_context_mut() - .unwrap() - .configure(vec![0], vec![], &[]); + .configure_next_instruction_for_tests(0, vec![], &[]) + .unwrap(); invoke_context.push().unwrap(); assert_eq!(*invoke_context.get_compute_budget(), execution_budget); invoke_context.pop().unwrap(); @@ -1355,8 +1322,8 @@ mod tests { (program_key, program_account), ]; let instruction_accounts = vec![ - InstructionAccount::new(0, 0, false, true), - InstructionAccount::new(1, 1, false, false), + InstructionAccount::new(0, false, true), + InstructionAccount::new(1, false, false), ]; with_mock_invoke_context!(invoke_context, transaction_context, transaction_accounts); let mut program_cache_for_tx_batch = ProgramCacheForTxBatch::default(); @@ -1371,18 +1338,221 @@ mod tests { invoke_context .transaction_context - .get_next_instruction_context_mut() - .unwrap() - .configure(vec![2], instruction_accounts, &instruction_data); + .configure_next_instruction_for_tests(2, instruction_accounts, &instruction_data) + .unwrap(); let result = invoke_context.process_instruction(&mut 0, &mut ExecuteTimings::default()); assert!(result.is_ok()); assert_eq!( - invoke_context - .transaction_context - .accounts_resize_delta() - .unwrap(), + invoke_context.transaction_context.accounts().resize_delta(), resize_delta ); } + + #[test] + fn test_prepare_instruction_maximum_accounts() { + let mut transaction_accounts: Vec = + Vec::with_capacity(MAX_ACCOUNTS_PER_TRANSACTION); + let mut account_metas: Vec = Vec::with_capacity(MAX_ACCOUNTS_PER_INSTRUCTION); + + // Fee-payer + let fee_payer = Keypair::new(); + transaction_accounts.push(( + fee_payer.pubkey(), + AccountSharedData::new(1, 1, &Pubkey::new_unique()), + )); + account_metas.push(AccountMeta::new(fee_payer.pubkey(), true)); + + let program_id = Pubkey::new_unique(); + let mut program_account = AccountSharedData::new(1, 1, &Pubkey::new_unique()); + program_account.set_executable(true); + transaction_accounts.push((program_id, program_account)); + account_metas.push(AccountMeta::new_readonly(program_id, false)); + + for _ in 2..MAX_ACCOUNTS_PER_INSTRUCTION { + let key = Pubkey::new_unique(); + transaction_accounts.push((key, AccountSharedData::new(1, 1, &Pubkey::new_unique()))); + account_metas.push(AccountMeta::new_readonly(key, false)); + } + + with_mock_invoke_context!(invoke_context, transaction_context, transaction_accounts); + + let instruction_1 = Instruction::new_with_bytes(program_id, &[20], account_metas.clone()); + + let instruction_2 = Instruction::new_with_bytes( + program_id, + &[20], + account_metas.iter().rev().cloned().collect(), + ); + + let transaction = Transaction::new_with_payer( + &[instruction_1.clone(), instruction_2.clone()], + Some(&fee_payer.pubkey()), + ); + + let sanitized = + SanitizedTransaction::try_from_legacy_transaction(transaction, &HashSet::new()) + .unwrap(); + + fn test_case_1(invoke_context: &InvokeContext) { + let instruction_context = invoke_context + .transaction_context + .get_next_instruction_context() + .unwrap(); + for index_in_transaction in 0..MAX_ACCOUNTS_PER_INSTRUCTION as IndexOfAccount { + let index_in_instruction = instruction_context + .get_index_of_account_in_instruction(index_in_transaction as IndexOfAccount) + .unwrap(); + let other_transaction = instruction_context + .get_index_of_instruction_account_in_transaction(index_in_instruction) + .unwrap(); + assert_eq!(index_in_transaction, other_transaction); + assert_eq!(index_in_transaction, index_in_instruction); + } + } + + fn test_case_2(invoke_context: &InvokeContext) { + let instruction_context = invoke_context + .transaction_context + .get_next_instruction_context() + .unwrap(); + for index_in_transaction in 0..MAX_ACCOUNTS_PER_INSTRUCTION as IndexOfAccount { + let index_in_instruction = instruction_context + .get_index_of_account_in_instruction(index_in_transaction as IndexOfAccount) + .unwrap(); + let other_transaction = instruction_context + .get_index_of_instruction_account_in_transaction(index_in_instruction) + .unwrap(); + assert_eq!( + index_in_instruction, + (MAX_ACCOUNTS_PER_INSTRUCTION as IndexOfAccount) + .saturating_sub(index_in_transaction) + .saturating_sub(1) + ); + assert_eq!(index_in_transaction, other_transaction); + } + } + + let svm_instruction = + SVMInstruction::from(sanitized.message().instructions().first().unwrap()); + invoke_context + .prepare_next_top_level_instruction(&sanitized, &svm_instruction, 90) + .unwrap(); + + test_case_1(&invoke_context); + + invoke_context.transaction_context.push().unwrap(); + let svm_instruction = + SVMInstruction::from(sanitized.message().instructions().get(1).unwrap()); + invoke_context + .prepare_next_top_level_instruction(&sanitized, &svm_instruction, 90) + .unwrap(); + + test_case_2(&invoke_context); + + invoke_context.transaction_context.push().unwrap(); + invoke_context + .prepare_next_instruction(&instruction_1, &[fee_payer.pubkey()]) + .unwrap(); + test_case_1(&invoke_context); + + invoke_context.transaction_context.push().unwrap(); + invoke_context + .prepare_next_instruction(&instruction_2, &[fee_payer.pubkey()]) + .unwrap(); + test_case_2(&invoke_context); + } + + #[test] + fn test_duplicated_accounts() { + let mut transaction_accounts: Vec = + Vec::with_capacity(MAX_ACCOUNTS_PER_TRANSACTION); + let mut account_metas: Vec = + Vec::with_capacity(MAX_ACCOUNTS_PER_INSTRUCTION.saturating_sub(1)); + + // Fee-payer + let fee_payer = Keypair::new(); + transaction_accounts.push(( + fee_payer.pubkey(), + AccountSharedData::new(1, 1, &Pubkey::new_unique()), + )); + account_metas.push(AccountMeta::new(fee_payer.pubkey(), true)); + + let program_id = Pubkey::new_unique(); + let mut program_account = AccountSharedData::new(1, 1, &Pubkey::new_unique()); + program_account.set_executable(true); + transaction_accounts.push((program_id, program_account)); + account_metas.push(AccountMeta::new_readonly(program_id, false)); + + for i in 2..account_metas.capacity() { + if i % 2 == 0 { + let key = Pubkey::new_unique(); + transaction_accounts + .push((key, AccountSharedData::new(1, 1, &Pubkey::new_unique()))); + account_metas.push(AccountMeta::new_readonly(key, false)); + } else { + let last_key = transaction_accounts.last().unwrap().0; + account_metas.push(AccountMeta::new_readonly(last_key, false)); + } + } + + with_mock_invoke_context!(invoke_context, transaction_context, transaction_accounts); + + let instruction = Instruction::new_with_bytes(program_id, &[20], account_metas.clone()); + + let transaction = Transaction::new_with_payer(&[instruction], Some(&fee_payer.pubkey())); + + let sanitized = + SanitizedTransaction::try_from_legacy_transaction(transaction, &HashSet::new()) + .unwrap(); + let svm_instruction = + SVMInstruction::from(sanitized.message().instructions().first().unwrap()); + + invoke_context + .prepare_next_top_level_instruction(&sanitized, &svm_instruction, 90) + .unwrap(); + + { + let instruction_context = invoke_context + .transaction_context + .get_next_instruction_context() + .unwrap(); + for index_in_instruction in 2..account_metas.len() as IndexOfAccount { + let is_duplicate = instruction_context + .is_instruction_account_duplicate(index_in_instruction) + .unwrap(); + if index_in_instruction % 2 == 0 { + assert!(is_duplicate.is_none()); + } else { + assert_eq!(is_duplicate, Some(index_in_instruction.saturating_sub(1))); + } + } + } + + invoke_context.transaction_context.push().unwrap(); + + let instruction = Instruction::new_with_bytes( + program_id, + &[20], + account_metas.iter().cloned().rev().collect(), + ); + + invoke_context + .prepare_next_instruction(&instruction, &[fee_payer.pubkey()]) + .unwrap(); + let instruction_context = invoke_context + .transaction_context + .get_next_instruction_context() + .unwrap(); + for index_in_instruction in 2..account_metas.len().saturating_sub(1) as u16 { + let is_duplicate = instruction_context + .is_instruction_account_duplicate(index_in_instruction) + .unwrap(); + if index_in_instruction % 2 == 0 { + assert!(is_duplicate.is_none()); + } else { + assert_eq!(is_duplicate, Some(index_in_instruction.saturating_sub(1))); + } + } + } } diff --git a/program-runtime/src/lib.rs b/program-runtime/src/lib.rs index 4942513590016a..e302e59bada7d1 100644 --- a/program-runtime/src/lib.rs +++ b/program-runtime/src/lib.rs @@ -2,15 +2,13 @@ #![deny(clippy::arithmetic_side_effects)] #![deny(clippy::indexing_slicing)] -#[cfg(feature = "metrics")] -#[macro_use] -extern crate solana_metrics; - pub use solana_sbpf; +pub mod cpi; pub mod execution_budget; pub mod invoke_context; pub mod loaded_programs; pub mod mem_pool; +pub mod memory; pub mod serialization; pub mod stable_log; pub mod sysvar_cache; diff --git a/program-runtime/src/loaded_programs.rs b/program-runtime/src/loaded_programs.rs index d524ae6247bd24..ff00bbb884ae07 100644 --- a/program-runtime/src/loaded_programs.rs +++ b/program-runtime/src/loaded_programs.rs @@ -10,7 +10,7 @@ use { solana_sdk_ids::{ bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, loader_v4, native_loader, }, - solana_type_overrides::{ + solana_svm_type_overrides::{ rand::{thread_rng, Rng}, sync::{ atomic::{AtomicU64, Ordering}, @@ -25,7 +25,7 @@ use { }, }; #[cfg(feature = "metrics")] -use {solana_measure::measure::Measure, solana_timings::ExecuteDetailsTimings}; +use {solana_svm_measure::measure::Measure, solana_svm_timings::ExecuteDetailsTimings}; pub type ProgramRuntimeEnvironment = Arc>>; pub const MAX_LOADED_ENTRY_COUNT: usize = 512; @@ -189,9 +189,7 @@ pub struct ProgramCacheEntry { /// Slot in which this entry will become active (can be in the future) pub effective_slot: Slot, /// How often this entry was used by a transaction - pub tx_usage_counter: AtomicU64, - /// How often this entry was used by an instruction - pub ix_usage_counter: AtomicU64, + pub tx_usage_counter: Arc, /// Latest slot in which the entry was used pub latest_access_slot: AtomicU64, } @@ -293,14 +291,6 @@ impl LoadProgramMetrics { timings.create_executor_load_elf_us += self.load_elf_us; timings.create_executor_verify_code_us += self.verify_code_us; timings.create_executor_jit_compile_us += self.jit_compile_us; - datapoint_trace!( - "create_executor_trace", - ("program_id", self.program_id, String), - ("register_syscalls_us", self.register_syscalls_us, i64), - ("load_elf_us", self.load_elf_us, i64), - ("verify_code_us", self.verify_code_us, i64), - ("jit_compile_us", self.jit_compile_us, i64), - ); } } @@ -413,9 +403,8 @@ impl ProgramCacheEntry { account_owner: ProgramCacheEntryOwner::try_from(loader_key).unwrap(), account_size, effective_slot, - tx_usage_counter: AtomicU64::new(0), + tx_usage_counter: Arc::::default(), program: ProgramCacheEntryType::Loaded(executable), - ix_usage_counter: AtomicU64::new(0), latest_access_slot: AtomicU64::new(0), }) } @@ -437,8 +426,7 @@ impl ProgramCacheEntry { account_size: self.account_size, deployment_slot: self.deployment_slot, effective_slot: self.effective_slot, - tx_usage_counter: AtomicU64::new(self.tx_usage_counter.load(Ordering::Relaxed)), - ix_usage_counter: AtomicU64::new(self.ix_usage_counter.load(Ordering::Relaxed)), + tx_usage_counter: self.tx_usage_counter.clone(), latest_access_slot: AtomicU64::new(self.latest_access_slot.load(Ordering::Relaxed)), }) } @@ -458,9 +446,8 @@ impl ProgramCacheEntry { account_owner: ProgramCacheEntryOwner::NativeLoader, account_size, effective_slot: deployment_slot, - tx_usage_counter: AtomicU64::new(0), + tx_usage_counter: Arc::::default(), program: ProgramCacheEntryType::Builtin(program), - ix_usage_counter: AtomicU64::new(0), latest_access_slot: AtomicU64::new(0), } } @@ -469,6 +456,20 @@ impl ProgramCacheEntry { slot: Slot, account_owner: ProgramCacheEntryOwner, reason: ProgramCacheEntryType, + ) -> Self { + Self::new_tombstone_with_usage_counter( + slot, + account_owner, + reason, + Arc::::default(), + ) + } + + pub fn new_tombstone_with_usage_counter( + slot: Slot, + account_owner: ProgramCacheEntryOwner, + reason: ProgramCacheEntryType, + tx_usage_counter: Arc, ) -> Self { let tombstone = Self { program: reason, @@ -476,8 +477,7 @@ impl ProgramCacheEntry { account_size: 0, deployment_slot: slot, effective_slot: slot, - tx_usage_counter: AtomicU64::default(), - ix_usage_counter: AtomicU64::default(), + tx_usage_counter, latest_access_slot: AtomicU64::new(0), }; debug_assert!(tombstone.is_tombstone()); @@ -772,10 +772,11 @@ impl ProgramCacheForTxBatch { // Found a program entry on the current fork, but it's not effective // yet. It indicates that the program has delayed visibility. Return // the tombstone to reflect that. - Arc::new(ProgramCacheEntry::new_tombstone( + Arc::new(ProgramCacheEntry::new_tombstone_with_usage_counter( entry.deployment_slot, entry.account_owner, ProgramCacheEntryType::DelayVisibility, + entry.tx_usage_counter.clone(), )) } else { entry.clone() @@ -919,10 +920,6 @@ impl ProgramCache { existing.tx_usage_counter.load(Ordering::Relaxed), Ordering::Relaxed, ); - entry.ix_usage_counter.fetch_add( - existing.ix_usage_counter.load(Ordering::Relaxed), - Ordering::Relaxed, - ); *existing = Arc::clone(&entry); self.stats.reloads.fetch_add(1, Ordering::Relaxed); } @@ -1060,10 +1057,11 @@ impl ProgramCache { /// and returns which program accounts the accounts DB needs to load. pub fn extract( &self, - search_for: &mut Vec<(Pubkey, (ProgramCacheMatchCriteria, u64))>, + search_for: &mut Vec<(Pubkey, ProgramCacheMatchCriteria)>, loaded_programs_for_tx_batch: &mut ProgramCacheForTxBatch, - is_first_round: bool, - ) -> Option<(Pubkey, u64)> { + increment_usage_counter: bool, + count_hits_and_misses: bool, + ) -> Option { debug_assert!(self.fork_graph.is_some()); let fork_graph = self.fork_graph.as_ref().unwrap().upgrade().unwrap(); let locked_fork_graph = fork_graph.read().unwrap(); @@ -1073,7 +1071,7 @@ impl ProgramCache { entries, loading_entries, } => { - search_for.retain(|(key, (match_criteria, usage_count))| { + search_for.retain(|(key, match_criteria)| { if let Some(second_level) = entries.get(key) { for entry in second_level.iter().rev() { if entry.deployment_slot <= self.latest_root_slot @@ -1106,19 +1104,22 @@ impl ProgramCache { // Found a program entry on the current fork, but it's not effective // yet. It indicates that the program has delayed visibility. Return // the tombstone to reflect that. - Arc::new(ProgramCacheEntry::new_tombstone( + Arc::new(ProgramCacheEntry::new_tombstone_with_usage_counter( entry.deployment_slot, entry.account_owner, ProgramCacheEntryType::DelayVisibility, + entry.tx_usage_counter.clone(), )) } else { continue; }; entry_to_return .update_access_slot(loaded_programs_for_tx_batch.slot); - entry_to_return - .tx_usage_counter - .fetch_add(*usage_count, Ordering::Relaxed); + if increment_usage_counter { + entry_to_return + .tx_usage_counter + .fetch_add(1, Ordering::Relaxed); + } loaded_programs_for_tx_batch .entries .insert(*key, entry_to_return); @@ -1134,7 +1135,7 @@ impl ProgramCache { loaded_programs_for_tx_batch.slot, thread::current().id(), )); - cooperative_loading_task = Some((*key, *usage_count)); + cooperative_loading_task = Some(*key); } } true @@ -1142,7 +1143,7 @@ impl ProgramCache { } } drop(locked_fork_graph); - if is_first_round { + if count_hits_and_misses { self.stats .misses .fetch_add(search_for.len() as u64, Ordering::Relaxed); @@ -1439,8 +1440,7 @@ mod tests { account_size: 0, deployment_slot, effective_slot, - tx_usage_counter: usage_counter, - ix_usage_counter: AtomicU64::default(), + tx_usage_counter: Arc::new(usage_counter), latest_access_slot: AtomicU64::new(deployment_slot), }) } @@ -1455,8 +1455,7 @@ mod tests { account_size: 0, deployment_slot, effective_slot, - tx_usage_counter: AtomicU64::default(), - ix_usage_counter: AtomicU64::default(), + tx_usage_counter: Arc::default(), latest_access_slot: AtomicU64::default(), }) } @@ -1895,8 +1894,7 @@ mod tests { account_size: 0, deployment_slot: 10, effective_slot: 11, - tx_usage_counter: AtomicU64::default(), - ix_usage_counter: AtomicU64::default(), + tx_usage_counter: Arc::default(), latest_access_slot: AtomicU64::default(), }), )); @@ -1908,8 +1906,7 @@ mod tests { account_size: 0, deployment_slot: 10, effective_slot: 11, - tx_usage_counter: AtomicU64::default(), - ix_usage_counter: AtomicU64::default(), + tx_usage_counter: Arc::default(), latest_access_slot: AtomicU64::default(), }), ); @@ -1934,8 +1931,7 @@ mod tests { account_size: 0, deployment_slot: 10, effective_slot: 11, - tx_usage_counter: AtomicU64::default(), - ix_usage_counter: AtomicU64::default(), + tx_usage_counter: Arc::default(), latest_access_slot: AtomicU64::default(), }), )); @@ -1947,8 +1943,7 @@ mod tests { account_size: 0, deployment_slot: 10, effective_slot: 11, - tx_usage_counter: AtomicU64::default(), - ix_usage_counter: AtomicU64::default(), + tx_usage_counter: Arc::default(), latest_access_slot: AtomicU64::default(), }), )); @@ -2103,8 +2098,7 @@ mod tests { account_size: 0, deployment_slot: 20, effective_slot: 20, - tx_usage_counter: AtomicU64::default(), - ix_usage_counter: AtomicU64::default(), + tx_usage_counter: Arc::default(), latest_access_slot: AtomicU64::default(), }); cache.assign_program(program1, updated_program.clone()); @@ -2176,7 +2170,7 @@ mod tests { cache: &ProgramCache, loading_slot: Slot, keys: &[Pubkey], - ) -> Vec<(Pubkey, (ProgramCacheMatchCriteria, u64))> { + ) -> Vec<(Pubkey, ProgramCacheMatchCriteria)> { let fork_graph = cache.fork_graph.as_ref().unwrap().upgrade().unwrap(); let locked_fork_graph = fork_graph.read().unwrap(); let entries = cache.get_flattened_entries_for_tests(); @@ -2193,7 +2187,7 @@ mod tests { ) }) .map(|(program_id, _entry)| { - (*program_id, (ProgramCacheMatchCriteria::NoCriteria, 1)) + (*program_id, ProgramCacheMatchCriteria::NoCriteria) }) }) .collect() @@ -2214,7 +2208,7 @@ mod tests { } fn match_missing( - missing: &[(Pubkey, (ProgramCacheMatchCriteria, u64))], + missing: &[(Pubkey, ProgramCacheMatchCriteria)], program: &Pubkey, expected_result: bool, ) -> bool { @@ -2293,7 +2287,7 @@ mod tests { assert!(match_missing(&missing, &program2, false)); assert!(match_missing(&missing, &program3, false)); let mut extracted = ProgramCacheForTxBatch::new(22, cache.environments.clone(), None, 0); - cache.extract(&mut missing, &mut extracted, true); + cache.extract(&mut missing, &mut extracted, true, true); assert!(match_slot(&extracted, &program1, 20, 22)); assert!(match_slot(&extracted, &program4, 0, 22)); @@ -2302,7 +2296,7 @@ mod tests { get_entries_to_load(&cache, 15, &[program1, program2, program3, program4]); assert!(match_missing(&missing, &program3, false)); let mut extracted = ProgramCacheForTxBatch::new(15, cache.environments.clone(), None, 0); - cache.extract(&mut missing, &mut extracted, true); + cache.extract(&mut missing, &mut extracted, true, true); assert!(match_slot(&extracted, &program1, 0, 15)); assert!(match_slot(&extracted, &program2, 11, 15)); // The effective slot of program4 deployed in slot 15 is 19. So it should not be usable in slot 16. @@ -2318,7 +2312,7 @@ mod tests { get_entries_to_load(&cache, 18, &[program1, program2, program3, program4]); assert!(match_missing(&missing, &program3, false)); let mut extracted = ProgramCacheForTxBatch::new(18, cache.environments.clone(), None, 0); - cache.extract(&mut missing, &mut extracted, true); + cache.extract(&mut missing, &mut extracted, true, true); assert!(match_slot(&extracted, &program1, 0, 18)); assert!(match_slot(&extracted, &program2, 11, 18)); // The effective slot of program4 deployed in slot 15 is 18. So it should be usable in slot 18. @@ -2329,7 +2323,7 @@ mod tests { get_entries_to_load(&cache, 23, &[program1, program2, program3, program4]); assert!(match_missing(&missing, &program3, false)); let mut extracted = ProgramCacheForTxBatch::new(23, cache.environments.clone(), None, 0); - cache.extract(&mut missing, &mut extracted, true); + cache.extract(&mut missing, &mut extracted, true, true); assert!(match_slot(&extracted, &program1, 0, 23)); assert!(match_slot(&extracted, &program2, 11, 23)); // The effective slot of program4 deployed in slot 15 is 19. So it should be usable in slot 23. @@ -2340,7 +2334,7 @@ mod tests { get_entries_to_load(&cache, 11, &[program1, program2, program3, program4]); assert!(match_missing(&missing, &program3, false)); let mut extracted = ProgramCacheForTxBatch::new(11, cache.environments.clone(), None, 0); - cache.extract(&mut missing, &mut extracted, true); + cache.extract(&mut missing, &mut extracted, true, true); assert!(match_slot(&extracted, &program1, 0, 11)); // program2 was updated at slot 11, but is not effective till slot 12. The result should contain a tombstone. let tombstone = extracted @@ -2372,7 +2366,7 @@ mod tests { get_entries_to_load(&cache, 21, &[program1, program2, program3, program4]); assert!(match_missing(&missing, &program3, false)); let mut extracted = ProgramCacheForTxBatch::new(21, cache.environments.clone(), None, 0); - cache.extract(&mut missing, &mut extracted, true); + cache.extract(&mut missing, &mut extracted, true, true); // Since the fork was pruned, we should not find the entry deployed at slot 20. assert!(match_slot(&extracted, &program1, 0, 21)); assert!(match_slot(&extracted, &program2, 11, 21)); @@ -2382,7 +2376,7 @@ mod tests { let mut missing = get_entries_to_load(&cache, 27, &[program1, program2, program3, program4]); let mut extracted = ProgramCacheForTxBatch::new(27, cache.environments.clone(), None, 0); - cache.extract(&mut missing, &mut extracted, true); + cache.extract(&mut missing, &mut extracted, true, true); assert!(match_slot(&extracted, &program1, 0, 27)); assert!(match_slot(&extracted, &program2, 11, 27)); assert!(match_slot(&extracted, &program3, 25, 27)); @@ -2410,7 +2404,7 @@ mod tests { get_entries_to_load(&cache, 23, &[program1, program2, program3, program4]); assert!(match_missing(&missing, &program3, false)); let mut extracted = ProgramCacheForTxBatch::new(23, cache.environments.clone(), None, 0); - cache.extract(&mut missing, &mut extracted, true); + cache.extract(&mut missing, &mut extracted, true, true); assert!(match_slot(&extracted, &program1, 0, 23)); assert!(match_slot(&extracted, &program2, 11, 23)); assert!(match_slot(&extracted, &program4, 15, 23)); @@ -2458,17 +2452,17 @@ mod tests { let mut missing = get_entries_to_load(&cache, 12, &[program1, program2, program3]); assert!(match_missing(&missing, &program3, false)); let mut extracted = ProgramCacheForTxBatch::new(12, cache.environments.clone(), None, 0); - cache.extract(&mut missing, &mut extracted, true); + cache.extract(&mut missing, &mut extracted, true, true); assert!(match_slot(&extracted, &program1, 0, 12)); assert!(match_slot(&extracted, &program2, 11, 12)); // Test the same fork, but request the program modified at a later slot than what's in the cache. let mut missing = get_entries_to_load(&cache, 12, &[program1, program2, program3]); - missing.get_mut(0).unwrap().1 .0 = ProgramCacheMatchCriteria::DeployedOnOrAfterSlot(5); - missing.get_mut(1).unwrap().1 .0 = ProgramCacheMatchCriteria::DeployedOnOrAfterSlot(5); + missing.get_mut(0).unwrap().1 = ProgramCacheMatchCriteria::DeployedOnOrAfterSlot(5); + missing.get_mut(1).unwrap().1 = ProgramCacheMatchCriteria::DeployedOnOrAfterSlot(5); assert!(match_missing(&missing, &program3, false)); let mut extracted = ProgramCacheForTxBatch::new(12, cache.environments.clone(), None, 0); - cache.extract(&mut missing, &mut extracted, true); + cache.extract(&mut missing, &mut extracted, true, true); assert!(match_missing(&missing, &program1, true)); assert!(match_slot(&extracted, &program2, 11, 12)); } @@ -2528,14 +2522,14 @@ mod tests { let mut missing = get_entries_to_load(&cache, 19, &[program1, program2, program3]); assert!(match_missing(&missing, &program3, false)); let mut extracted = ProgramCacheForTxBatch::new(19, cache.environments.clone(), None, 0); - cache.extract(&mut missing, &mut extracted, true); + cache.extract(&mut missing, &mut extracted, true, true); assert!(match_slot(&extracted, &program1, 0, 19)); assert!(match_slot(&extracted, &program2, 11, 19)); // Testing fork 0 - 5 - 11 - 25 - 27 with current slot at 27 let mut missing = get_entries_to_load(&cache, 27, &[program1, program2, program3]); let mut extracted = ProgramCacheForTxBatch::new(27, cache.environments.clone(), None, 0); - cache.extract(&mut missing, &mut extracted, true); + cache.extract(&mut missing, &mut extracted, true, true); assert!(match_slot(&extracted, &program1, 0, 27)); assert!(match_slot(&extracted, &program2, 11, 27)); assert!(match_missing(&missing, &program3, true)); @@ -2544,7 +2538,7 @@ mod tests { let mut missing = get_entries_to_load(&cache, 22, &[program1, program2, program3]); assert!(match_missing(&missing, &program2, false)); let mut extracted = ProgramCacheForTxBatch::new(22, cache.environments.clone(), None, 0); - cache.extract(&mut missing, &mut extracted, true); + cache.extract(&mut missing, &mut extracted, true, true); assert!(match_slot(&extracted, &program1, 20, 22)); assert!(match_missing(&missing, &program3, true)); } @@ -2557,9 +2551,9 @@ mod tests { cache.set_fork_graph(Arc::downgrade(&fork_graph)); let program1 = Pubkey::new_unique(); - let mut missing = vec![(program1, (ProgramCacheMatchCriteria::NoCriteria, 1))]; + let mut missing = vec![(program1, ProgramCacheMatchCriteria::NoCriteria)]; let mut extracted = ProgramCacheForTxBatch::new(0, cache.environments.clone(), None, 0); - cache.extract(&mut missing, &mut extracted, true); + cache.extract(&mut missing, &mut extracted, true, true); assert!(match_missing(&missing, &program1, true)); } @@ -2580,8 +2574,7 @@ mod tests { account_size: 0, deployment_slot: 0, effective_slot: 0, - tx_usage_counter: AtomicU64::default(), - ix_usage_counter: AtomicU64::default(), + tx_usage_counter: Arc::default(), latest_access_slot: AtomicU64::default(), }); assert!(entry.to_unloaded().is_none()); @@ -2636,7 +2629,7 @@ mod tests { let mut missing = get_entries_to_load(&cache, 20, &[program1]); let mut extracted = ProgramCacheForTxBatch::new(20, cache.environments.clone(), None, 0); - cache.extract(&mut missing, &mut extracted, true); + cache.extract(&mut missing, &mut extracted, true, true); // The cache should have the program deployed at slot 0 assert_eq!( @@ -2677,14 +2670,14 @@ mod tests { let mut missing = get_entries_to_load(&cache, 20, &[program1, program2]); let mut extracted = ProgramCacheForTxBatch::new(20, cache.environments.clone(), None, 0); - cache.extract(&mut missing, &mut extracted, true); + cache.extract(&mut missing, &mut extracted, true, true); assert!(match_slot(&extracted, &program1, 0, 20)); assert!(match_slot(&extracted, &program2, 10, 20)); let mut missing = get_entries_to_load(&cache, 6, &[program1, program2]); assert!(match_missing(&missing, &program2, false)); let mut extracted = ProgramCacheForTxBatch::new(6, cache.environments.clone(), None, 0); - cache.extract(&mut missing, &mut extracted, true); + cache.extract(&mut missing, &mut extracted, true, true); assert!(match_slot(&extracted, &program1, 5, 6)); // Pruning slot 5 will remove program1 entry deployed at slot 5. @@ -2693,14 +2686,14 @@ mod tests { let mut missing = get_entries_to_load(&cache, 20, &[program1, program2]); let mut extracted = ProgramCacheForTxBatch::new(20, cache.environments.clone(), None, 0); - cache.extract(&mut missing, &mut extracted, true); + cache.extract(&mut missing, &mut extracted, true, true); assert!(match_slot(&extracted, &program1, 0, 20)); assert!(match_slot(&extracted, &program2, 10, 20)); let mut missing = get_entries_to_load(&cache, 6, &[program1, program2]); assert!(match_missing(&missing, &program2, false)); let mut extracted = ProgramCacheForTxBatch::new(6, cache.environments.clone(), None, 0); - cache.extract(&mut missing, &mut extracted, true); + cache.extract(&mut missing, &mut extracted, true, true); assert!(match_slot(&extracted, &program1, 0, 6)); // Pruning slot 10 will remove program2 entry deployed at slot 10. @@ -2710,7 +2703,7 @@ mod tests { let mut missing = get_entries_to_load(&cache, 20, &[program1, program2]); assert!(match_missing(&missing, &program2, false)); let mut extracted = ProgramCacheForTxBatch::new(20, cache.environments.clone(), None, 0); - cache.extract(&mut missing, &mut extracted, true); + cache.extract(&mut missing, &mut extracted, true, true); assert!(match_slot(&extracted, &program1, 0, 20)); } diff --git a/program-runtime/src/memory.rs b/program-runtime/src/memory.rs new file mode 100644 index 00000000000000..b248c24298a2fa --- /dev/null +++ b/program-runtime/src/memory.rs @@ -0,0 +1,138 @@ +//! Memory translation utilities. + +use { + solana_sbpf::memory_region::{AccessType, MemoryMapping}, + solana_transaction_context::vm_slice::VmSlice, + std::{mem::align_of, slice::from_raw_parts_mut}, +}; + +/// Error types for memory translation operations. +#[derive(Debug, thiserror::Error, PartialEq, Eq)] +pub enum MemoryTranslationError { + #[error("Unaligned pointer")] + UnalignedPointer, + #[error("Invalid length")] + InvalidLength, +} + +pub fn address_is_aligned(address: u64) -> bool { + (address as *mut T as usize) + .checked_rem(align_of::()) + .map(|rem| rem == 0) + .expect("T to be non-zero aligned") +} + +// Do not use this directly +#[macro_export] +macro_rules! translate_inner { + ($memory_mapping:expr, $map:ident, $access_type:expr, $vm_addr:expr, $len:expr $(,)?) => { + Result::>::from( + $memory_mapping + .$map($access_type, $vm_addr, $len) + .map_err(|err| err.into()), + ) + }; +} + +// Do not use this directly +#[macro_export] +macro_rules! translate_type_inner { + ($memory_mapping:expr, $access_type:expr, $vm_addr:expr, $T:ty, $check_aligned:expr $(,)?) => {{ + let host_addr = $crate::translate_inner!( + $memory_mapping, + map, + $access_type, + $vm_addr, + size_of::<$T>() as u64 + )?; + if !$check_aligned { + Ok(unsafe { std::mem::transmute::(host_addr) }) + } else if !$crate::memory::address_is_aligned::<$T>(host_addr) { + Err($crate::memory::MemoryTranslationError::UnalignedPointer.into()) + } else { + Ok(unsafe { &mut *(host_addr as *mut $T) }) + } + }}; +} + +// Do not use this directly +#[macro_export] +macro_rules! translate_slice_inner { + ($memory_mapping:expr, $access_type:expr, $vm_addr:expr, $len:expr, $T:ty, $check_aligned:expr $(,)?) => {{ + if $len == 0 { + return Ok(&mut []); + } + let total_size = $len.saturating_mul(size_of::<$T>() as u64); + if isize::try_from(total_size).is_err() { + return Err($crate::memory::MemoryTranslationError::InvalidLength.into()); + } + let host_addr = + $crate::translate_inner!($memory_mapping, map, $access_type, $vm_addr, total_size)?; + if $check_aligned && !$crate::memory::address_is_aligned::<$T>(host_addr) { + return Err($crate::memory::MemoryTranslationError::UnalignedPointer.into()); + } + Ok(unsafe { from_raw_parts_mut(host_addr as *mut $T, $len as usize) }) + }}; +} + +pub fn translate_type<'a, T>( + memory_mapping: &MemoryMapping, + vm_addr: u64, + check_aligned: bool, +) -> Result<&'a T, Box> { + translate_type_inner!(memory_mapping, AccessType::Load, vm_addr, T, check_aligned) + .map(|value| &*value) +} + +pub fn translate_slice<'a, T>( + memory_mapping: &MemoryMapping, + vm_addr: u64, + len: u64, + check_aligned: bool, +) -> Result<&'a [T], Box> { + translate_slice_inner!( + memory_mapping, + AccessType::Load, + vm_addr, + len, + T, + check_aligned, + ) + .map(|value| &*value) +} + +/// CPI-specific version with intentionally different lifetime signature. +/// This version is missing lifetime 'a of the return type in the parameter &MemoryMapping. +pub fn translate_type_mut_for_cpi<'a, T>( + memory_mapping: &MemoryMapping, + vm_addr: u64, + check_aligned: bool, +) -> Result<&'a mut T, Box> { + translate_type_inner!(memory_mapping, AccessType::Store, vm_addr, T, check_aligned) +} + +/// CPI-specific version with intentionally different lifetime signature. +/// This version is missing lifetime 'a of the return type in the parameter &MemoryMapping. +pub fn translate_slice_mut_for_cpi<'a, T>( + memory_mapping: &MemoryMapping, + vm_addr: u64, + len: u64, + check_aligned: bool, +) -> Result<&'a mut [T], Box> { + translate_slice_inner!( + memory_mapping, + AccessType::Store, + vm_addr, + len, + T, + check_aligned, + ) +} + +pub fn translate_vm_slice<'a, T>( + slice: &VmSlice, + memory_mapping: &'a MemoryMapping, + check_aligned: bool, +) -> Result<&'a [T], Box> { + translate_slice::(memory_mapping, slice.ptr(), slice.len(), check_aligned) +} diff --git a/program-runtime/src/serialization.rs b/program-runtime/src/serialization.rs index eb3e3c99d83afd..a535680c5aa28e 100644 --- a/program-runtime/src/serialization.rs +++ b/program-runtime/src/serialization.rs @@ -13,18 +13,15 @@ use { solana_sdk_ids::bpf_loader_deprecated, solana_system_interface::MAX_PERMITTED_DATA_LENGTH, solana_transaction_context::{ - BorrowedAccount, IndexOfAccount, InstructionContext, TransactionContext, + BorrowedInstructionAccount, IndexOfAccount, InstructionContext, + MAX_ACCOUNTS_PER_INSTRUCTION, }, std::mem::{self, size_of}, }; -/// Maximum number of instruction accounts that can be serialized into the -/// SBF VM. -const MAX_INSTRUCTION_ACCOUNTS: u8 = NON_DUP_MARKER; - /// Modifies the memory mapping in serialization and CPI return for stricter_abi_and_runtime_constraints pub fn modify_memory_region_of_account( - account: &mut BorrowedAccount<'_>, + account: &mut BorrowedInstructionAccount<'_>, region: &mut MemoryRegion, ) { region.len = account.get_data().len() as u64; @@ -39,7 +36,7 @@ pub fn modify_memory_region_of_account( /// Creates the memory mapping in serialization and CPI return for account_data_direct_mapping pub fn create_memory_region_of_account( - account: &mut BorrowedAccount<'_>, + account: &mut BorrowedInstructionAccount<'_>, vaddr: u64, ) -> Result { let can_data_be_changed = account.can_data_be_changed().is_ok(); @@ -56,7 +53,7 @@ pub fn create_memory_region_of_account( #[allow(dead_code)] enum SerializeAccount<'a> { - Account(IndexOfAccount, BorrowedAccount<'a>), + Account(IndexOfAccount, BorrowedInstructionAccount<'a>), Duplicate(IndexOfAccount), } @@ -130,7 +127,7 @@ impl Serializer { fn write_account( &mut self, - account: &mut BorrowedAccount<'_>, + account: &mut BorrowedInstructionAccount<'_>, ) -> Result { if !self.stricter_abi_and_runtime_constraints { let vm_data_addr = self.vaddr.saturating_add(self.buffer.len() as u64); @@ -223,7 +220,6 @@ impl Serializer { } pub fn serialize_parameters( - transaction_context: &TransactionContext, instruction_context: &InstructionContext, stricter_abi_and_runtime_constraints: bool, account_data_direct_mapping: bool, @@ -233,22 +229,18 @@ pub fn serialize_parameters( AlignedMemory, Vec, Vec, + usize, ), InstructionError, > { let num_ix_accounts = instruction_context.get_number_of_instruction_accounts(); - if num_ix_accounts > MAX_INSTRUCTION_ACCOUNTS as IndexOfAccount { + if num_ix_accounts > MAX_ACCOUNTS_PER_INSTRUCTION as IndexOfAccount { return Err(InstructionError::MaxAccountsExceeded); } - let (program_id, is_loader_deprecated) = { - let program_account = - instruction_context.try_borrow_last_program_account(transaction_context)?; - ( - *program_account.get_key(), - *program_account.get_owner() == bpf_loader_deprecated::id(), - ) - }; + let program_id = *instruction_context.get_program_key()?; + let is_loader_deprecated = + instruction_context.get_program_owner()? == bpf_loader_deprecated::id(); let accounts = (0..instruction_context.get_number_of_instruction_accounts()) .map(|instruction_account_index| { @@ -259,7 +251,7 @@ pub fn serialize_parameters( SerializeAccount::Duplicate(index) } else { let account = instruction_context - .try_borrow_instruction_account(transaction_context, instruction_account_index) + .try_borrow_instruction_account(instruction_account_index) .unwrap(); SerializeAccount::Account(instruction_account_index, account) } @@ -292,21 +284,17 @@ pub fn serialize_parameters( } pub fn deserialize_parameters( - transaction_context: &TransactionContext, instruction_context: &InstructionContext, stricter_abi_and_runtime_constraints: bool, account_data_direct_mapping: bool, buffer: &[u8], accounts_metadata: &[SerializedAccountMetadata], ) -> Result<(), InstructionError> { - let is_loader_deprecated = *instruction_context - .try_borrow_last_program_account(transaction_context)? - .get_owner() - == bpf_loader_deprecated::id(); + let is_loader_deprecated = + instruction_context.get_program_owner()? == bpf_loader_deprecated::id(); let account_lengths = accounts_metadata.iter().map(|a| a.original_data_len); if is_loader_deprecated { deserialize_parameters_unaligned( - transaction_context, instruction_context, stricter_abi_and_runtime_constraints, account_data_direct_mapping, @@ -315,7 +303,6 @@ pub fn deserialize_parameters( ) } else { deserialize_parameters_aligned( - transaction_context, instruction_context, stricter_abi_and_runtime_constraints, account_data_direct_mapping, @@ -337,6 +324,7 @@ fn serialize_parameters_unaligned( AlignedMemory, Vec, Vec, + usize, ), InstructionError, > { @@ -409,15 +397,19 @@ fn serialize_parameters_unaligned( }; } s.write::((instruction_data.len() as u64).to_le()); - s.write_all(instruction_data); + let instruction_data_offset = s.write_all(instruction_data); s.write_all(program_id.as_ref()); let (mem, regions) = s.finish(); - Ok((mem, regions, accounts_metadata)) + Ok(( + mem, + regions, + accounts_metadata, + instruction_data_offset as usize, + )) } fn deserialize_parameters_unaligned>( - transaction_context: &TransactionContext, instruction_context: &InstructionContext, stricter_abi_and_runtime_constraints: bool, account_data_direct_mapping: bool, @@ -433,8 +425,8 @@ fn deserialize_parameters_unaligned>( instruction_context.is_instruction_account_duplicate(instruction_account_index)?; start += 1; // is_dup if duplicate.is_none() { - let mut borrowed_account = instruction_context - .try_borrow_instruction_account(transaction_context, instruction_account_index)?; + let mut borrowed_account = + instruction_context.try_borrow_instruction_account(instruction_account_index)?; start += size_of::(); // is_signer start += size_of::(); // is_writable start += size_of::(); // key @@ -491,6 +483,7 @@ fn serialize_parameters_aligned( AlignedMemory, Vec, Vec, + usize, ), InstructionError, > { @@ -572,15 +565,19 @@ fn serialize_parameters_aligned( }; } s.write::((instruction_data.len() as u64).to_le()); - s.write_all(instruction_data); + let instruction_data_offset = s.write_all(instruction_data); s.write_all(program_id.as_ref()); let (mem, regions) = s.finish(); - Ok((mem, regions, accounts_metadata)) + Ok(( + mem, + regions, + accounts_metadata, + instruction_data_offset as usize, + )) } fn deserialize_parameters_aligned>( - transaction_context: &TransactionContext, instruction_context: &InstructionContext, stricter_abi_and_runtime_constraints: bool, account_data_direct_mapping: bool, @@ -598,8 +595,8 @@ fn deserialize_parameters_aligned>( if duplicate.is_some() { start += 7; // padding to 64-bit aligned } else { - let mut borrowed_account = instruction_context - .try_borrow_instruction_account(transaction_context, instruction_account_index)?; + let mut borrowed_account = + instruction_context.try_borrow_instruction_account(instruction_account_index)?; start += size_of::() // is_signer + size_of::() // is_writable + size_of::() // executable @@ -675,14 +672,16 @@ mod tests { use { super::*, crate::with_mock_invoke_context, - solana_account::{Account, AccountSharedData, ReadableAccount, WritableAccount}, + solana_account::{Account, AccountSharedData, ReadableAccount}, solana_account_info::AccountInfo, solana_program_entrypoint::deserialize, solana_rent::Rent, solana_sbpf::{memory_region::MemoryMapping, program::SBPFVersion, vm::Config}, solana_sdk_ids::bpf_loader, solana_system_interface::MAX_PERMITTED_ACCOUNTS_DATA_ALLOCATIONS_PER_TRANSACTION, - solana_transaction_context::InstructionAccount, + solana_transaction_context::{ + InstructionAccount, TransactionContext, MAX_ACCOUNTS_PER_TRANSACTION, + }, std::{ cell::RefCell, mem::transmute, @@ -699,15 +698,8 @@ mod tests { .iter() .enumerate() .map(|(index_in_instruction, index_in_transaction)| { - let index_in_callee = transaction_indexes - .get(0..index_in_instruction) - .unwrap() - .iter() - .position(|account_index| account_index == index_in_transaction) - .unwrap_or(index_in_instruction); InstructionAccount::new( *index_in_transaction, - index_in_callee as IndexOfAccount, false, is_writable(index_in_instruction), ) @@ -733,19 +725,19 @@ mod tests { } in [ TestCase { name: "serialize max accounts with cap", - num_ix_accounts: usize::from(MAX_INSTRUCTION_ACCOUNTS), + num_ix_accounts: MAX_ACCOUNTS_PER_INSTRUCTION, append_dup_account: false, expected_err: None, }, TestCase { name: "serialize too many accounts with cap", - num_ix_accounts: usize::from(MAX_INSTRUCTION_ACCOUNTS) + 1, + num_ix_accounts: MAX_ACCOUNTS_PER_INSTRUCTION + 1, append_dup_account: false, expected_err: Some(InstructionError::MaxAccountsExceeded), }, TestCase { name: "serialize too many accounts and append dup with cap", - num_ix_accounts: usize::from(MAX_INSTRUCTION_ACCOUNTS), + num_ix_accounts: MAX_ACCOUNTS_PER_INSTRUCTION, append_dup_account: true, expected_err: Some(InstructionError::MaxAccountsExceeded), }, @@ -775,13 +767,12 @@ mod tests { } let transaction_accounts_indexes: Vec = - (1..(num_ix_accounts + 1) as u16).collect(); + (0..num_ix_accounts as u16).collect(); let mut instruction_accounts = deduplicated_instruction_accounts(&transaction_accounts_indexes, |_| false); if append_dup_account { instruction_accounts.push(instruction_accounts.last().cloned().unwrap()); } - let program_indices = vec![0]; let instruction_data = vec![]; with_mock_invoke_context!( @@ -789,11 +780,30 @@ mod tests { transaction_context, transaction_accounts ); - invoke_context - .transaction_context - .get_next_instruction_context_mut() - .unwrap() - .configure(program_indices, instruction_accounts, &instruction_data); + if instruction_accounts.len() > MAX_ACCOUNTS_PER_INSTRUCTION { + // Special case implementation of configure_next_instruction_for_tests() + // which avoids the overflow when constructing the dedup_map + // by simply not filling it. + let dedup_map = vec![u8::MAX; MAX_ACCOUNTS_PER_TRANSACTION]; + invoke_context + .transaction_context + .configure_next_instruction( + 0, + instruction_accounts, + dedup_map, + &instruction_data, + ) + .unwrap(); + } else { + invoke_context + .transaction_context + .configure_next_instruction_for_tests( + 0, + instruction_accounts, + &instruction_data, + ) + .unwrap(); + } invoke_context.push().unwrap(); let instruction_context = invoke_context .transaction_context @@ -801,8 +811,7 @@ mod tests { .unwrap(); let serialization_result = serialize_parameters( - invoke_context.transaction_context, - instruction_context, + &instruction_context, stricter_abi_and_runtime_constraints, false, // account_data_direct_mapping true, // mask_out_rent_epoch_in_vm_serialization @@ -816,7 +825,8 @@ mod tests { continue; } - let (mut serialized, regions, _account_lengths) = serialization_result.unwrap(); + let (mut serialized, regions, _account_lengths, _instruction_data_offset) = + serialization_result.unwrap(); let mut serialized_regions = concat_regions(®ions); let (de_program_id, de_accounts, de_instruction_data) = unsafe { deserialize( @@ -845,7 +855,11 @@ mod tests { assert_eq!(account.data(), &account_info.data.borrow()[..]); assert_eq!(account.owner(), account_info.owner); assert_eq!(account.executable(), account_info.executable); - assert_eq!(u64::MAX, account_info.rent_epoch); + #[allow(deprecated)] + { + // Using the sdk entrypoint, the rent-epoch is skipped + assert_eq!(0, account_info._unused); + } } } } @@ -926,18 +940,30 @@ mod tests { rent_epoch: 3100, }), ), + ( + program_id, + AccountSharedData::from(Account { + lamports: 0, + data: vec![], + owner: bpf_loader_deprecated::id(), + executable: true, + rent_epoch: 0, + }), + ), ]; let instruction_accounts = deduplicated_instruction_accounts(&[1, 1, 2, 3, 4, 4, 5, 6], |index| index >= 4); let instruction_data = vec![1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]; - let program_indices = vec![0]; - let mut original_accounts = transaction_accounts.clone(); + let original_accounts = transaction_accounts.clone(); with_mock_invoke_context!(invoke_context, transaction_context, transaction_accounts); invoke_context .transaction_context - .get_next_instruction_context_mut() - .unwrap() - .configure(program_indices, instruction_accounts, &instruction_data); + .configure_next_instruction_for_tests( + 0, + instruction_accounts.clone(), + &instruction_data, + ) + .unwrap(); invoke_context.push().unwrap(); let instruction_context = invoke_context .transaction_context @@ -945,14 +971,14 @@ mod tests { .unwrap(); // check serialize_parameters_aligned - let (mut serialized, regions, accounts_metadata) = serialize_parameters( - invoke_context.transaction_context, - instruction_context, - stricter_abi_and_runtime_constraints, - false, // account_data_direct_mapping - true, // mask_out_rent_epoch_in_vm_serialization - ) - .unwrap(); + let (mut serialized, regions, accounts_metadata, _instruction_data_offset) = + serialize_parameters( + &instruction_context, + stricter_abi_and_runtime_constraints, + false, // account_data_direct_mapping + true, // mask_out_rent_epoch_in_vm_serialization + ) + .unwrap(); let mut serialized_regions = concat_regions(®ions); if !stricter_abi_and_runtime_constraints { @@ -990,7 +1016,11 @@ mod tests { assert_eq!(account.data(), &account_info.data.borrow()[..]); assert_eq!(account.owner(), account_info.owner); assert_eq!(account.executable(), account_info.executable); - assert_eq!(u64::MAX, account_info.rent_epoch); + #[allow(deprecated)] + { + // Using the sdk entrypoint, the rent-epoch is skipped + assert_eq!(0, account_info._unused); + } assert_eq!( (*account_info.lamports.borrow() as *const u64).align_offset(BPF_ALIGN_OF_U128), @@ -1007,8 +1037,7 @@ mod tests { } deserialize_parameters( - invoke_context.transaction_context, - instruction_context, + &instruction_context, stricter_abi_and_runtime_constraints, false, // account_data_direct_mapping serialized.as_slice(), @@ -1027,27 +1056,24 @@ mod tests { } // check serialize_parameters_unaligned - original_accounts - .first_mut() - .unwrap() - .1 - .set_owner(bpf_loader_deprecated::id()); invoke_context .transaction_context - .get_account_at_index(0) - .unwrap() - .try_borrow_mut() - .unwrap() - .set_owner(bpf_loader_deprecated::id()); + .configure_next_instruction_for_tests(7, instruction_accounts, &instruction_data) + .unwrap(); + invoke_context.push().unwrap(); + let instruction_context = invoke_context + .transaction_context + .get_current_instruction_context() + .unwrap(); - let (mut serialized, regions, account_lengths) = serialize_parameters( - invoke_context.transaction_context, - instruction_context, - stricter_abi_and_runtime_constraints, - false, // account_data_direct_mapping - true, // mask_out_rent_epoch_in_vm_serialization - ) - .unwrap(); + let (mut serialized, regions, account_lengths, _instruction_data_offset) = + serialize_parameters( + &instruction_context, + stricter_abi_and_runtime_constraints, + false, // account_data_direct_mapping + true, // mask_out_rent_epoch_in_vm_serialization + ) + .unwrap(); let mut serialized_regions = concat_regions(®ions); let (de_program_id, de_accounts, de_instruction_data) = unsafe { @@ -1077,12 +1103,14 @@ mod tests { assert_eq!(account.data(), &account_info.data.borrow()[..]); assert_eq!(account.owner(), account_info.owner); assert_eq!(account.executable(), account_info.executable); - assert_eq!(u64::MAX, account_info.rent_epoch); + #[allow(deprecated)] + { + assert_eq!(u64::MAX, account_info._unused); + } } deserialize_parameters( - invoke_context.transaction_context, - instruction_context, + &instruction_context, stricter_abi_and_runtime_constraints, false, // account_data_direct_mapping serialized.as_slice(), @@ -1176,18 +1204,29 @@ mod tests { rent_epoch: 3100, }), ), + ( + solana_pubkey::new_rand(), + AccountSharedData::from(Account { + lamports: 0, + data: vec![], + owner: bpf_loader_deprecated::id(), + executable: true, + rent_epoch: 0, + }), + ), ]; let instruction_accounts = deduplicated_instruction_accounts(&[1, 1, 2, 3, 4, 4, 5, 6], |index| index >= 4); let instruction_data = vec![]; - let program_indices = vec![0]; - let mut original_accounts = transaction_accounts.clone(); with_mock_invoke_context!(invoke_context, transaction_context, transaction_accounts); invoke_context .transaction_context - .get_next_instruction_context_mut() - .unwrap() - .configure(program_indices, instruction_accounts, &instruction_data); + .configure_next_instruction_for_tests( + 0, + instruction_accounts.clone(), + &instruction_data, + ) + .unwrap(); invoke_context.push().unwrap(); let instruction_context = invoke_context .transaction_context @@ -1195,14 +1234,14 @@ mod tests { .unwrap(); // check serialize_parameters_aligned - let (_serialized, regions, _accounts_metadata) = serialize_parameters( - invoke_context.transaction_context, - instruction_context, - true, - false, // account_data_direct_mapping - mask_out_rent_epoch_in_vm_serialization, - ) - .unwrap(); + let (_serialized, regions, _accounts_metadata, _instruction_data_offset) = + serialize_parameters( + &instruction_context, + true, + false, // account_data_direct_mapping + mask_out_rent_epoch_in_vm_serialization, + ) + .unwrap(); let mut serialized_regions = concat_regions(®ions); let (_de_program_id, de_accounts, _de_instruction_data) = unsafe { @@ -1210,45 +1249,32 @@ mod tests { }; for account_info in de_accounts { - let index_in_transaction = invoke_context - .transaction_context - .find_index_of_account(account_info.key) - .unwrap(); - let account = invoke_context - .transaction_context - .accounts() - .try_borrow(index_in_transaction) - .unwrap(); - let expected_rent_epoch = if mask_out_rent_epoch_in_vm_serialization { - u64::MAX - } else { - account.rent_epoch() - }; - assert_eq!(expected_rent_epoch, account_info.rent_epoch); + // Using program-entrypoint, the rent-epoch will always be 0 + #[allow(deprecated)] + { + assert_eq!(0, account_info._unused); + } } // check serialize_parameters_unaligned - original_accounts - .first_mut() - .unwrap() - .1 - .set_owner(bpf_loader_deprecated::id()); invoke_context .transaction_context - .get_account_at_index(0) - .unwrap() - .try_borrow_mut() - .unwrap() - .set_owner(bpf_loader_deprecated::id()); + .configure_next_instruction_for_tests(7, instruction_accounts, &instruction_data) + .unwrap(); + invoke_context.push().unwrap(); + let instruction_context = invoke_context + .transaction_context + .get_current_instruction_context() + .unwrap(); - let (_serialized, regions, _account_lengths) = serialize_parameters( - invoke_context.transaction_context, - instruction_context, - true, - false, // account_data_direct_mapping - mask_out_rent_epoch_in_vm_serialization, - ) - .unwrap(); + let (_serialized, regions, _account_lengths, _instruction_data_offset) = + serialize_parameters( + &instruction_context, + true, + false, // account_data_direct_mapping + mask_out_rent_epoch_in_vm_serialization, + ) + .unwrap(); let mut serialized_regions = concat_regions(®ions); let (_de_program_id, de_accounts, _de_instruction_data) = unsafe { @@ -1271,7 +1297,10 @@ mod tests { } else { account.rent_epoch() }; - assert_eq!(expected_rent_epoch, account_info.rent_epoch); + #[allow(deprecated)] + { + assert_eq!(expected_rent_epoch, account_info._unused); + } } } } @@ -1364,9 +1393,10 @@ mod tests { let executable = Ptr::::read_possibly_unaligned(input, offset) != 0; offset += size_of::(); - let rent_epoch = Ptr::::read_possibly_unaligned(input, offset); + let unused = Ptr::::read_possibly_unaligned(input, offset); offset += size_of::(); + #[allow(deprecated)] accounts.push(AccountInfo { key, is_signer, @@ -1375,7 +1405,7 @@ mod tests { data, owner, executable, - rent_epoch, + _unused: unused, }); } else { // duplicate account, clone the original @@ -1450,15 +1480,13 @@ mod tests { /* max_instruction_stack_depth */ 1, /* max_instruction_trace_length */ 1, ); - let program_indices = vec![6]; let transaction_accounts_indexes = [0, 1, 2, 3, 4, 5]; let instruction_accounts = deduplicated_instruction_accounts(&transaction_accounts_indexes, |index| index > 0); let instruction_data = []; transaction_context - .get_next_instruction_context_mut() - .unwrap() - .configure(program_indices, instruction_accounts, &instruction_data); + .configure_next_instruction_for_tests(6, instruction_accounts, &instruction_data) + .unwrap(); transaction_context.push().unwrap(); let instruction_context = transaction_context .get_current_instruction_context() @@ -1475,10 +1503,7 @@ mod tests { .map(|(index_in_instruction, account_start_offset)| { create_memory_region_of_account( &mut instruction_context - .try_borrow_instruction_account( - &transaction_context, - index_in_instruction as IndexOfAccount, - ) + .try_borrow_instruction_account(index_in_instruction as IndexOfAccount) .unwrap(), *account_start_offset, ) @@ -1598,14 +1623,14 @@ mod tests { let remaining_allowed_growth: usize = 0x700; for index_in_instruction in 4..6 { let mut borrowed_account = instruction_context - .try_borrow_instruction_account(&transaction_context, index_in_instruction) + .try_borrow_instruction_account(index_in_instruction) .unwrap(); borrowed_account - .set_data(vec![0u8; MAX_PERMITTED_DATA_LENGTH as usize]) + .set_data_from_slice(&vec![0u8; MAX_PERMITTED_DATA_LENGTH as usize]) .unwrap(); } assert_eq!( - transaction_context.accounts_resize_delta().unwrap(), + transaction_context.accounts().resize_delta(), MAX_PERMITTED_ACCOUNTS_DATA_ALLOCATIONS_PER_TRANSACTION - remaining_allowed_growth as i64, ); diff --git a/program-runtime/src/stable_log.rs b/program-runtime/src/stable_log.rs index 777247f29e3db0..e1591fb2ea8f2f 100644 --- a/program-runtime/src/stable_log.rs +++ b/program-runtime/src/stable_log.rs @@ -5,8 +5,8 @@ use { base64::{prelude::BASE64_STANDARD, Engine}, itertools::Itertools, - solana_log_collector::{ic_logger_msg, LogCollector}, solana_pubkey::Pubkey, + solana_svm_log_collector::{ic_logger_msg, LogCollector}, std::{cell::RefCell, rc::Rc}, }; diff --git a/program-runtime/src/sysvar_cache.rs b/program-runtime/src/sysvar_cache.rs index 19ad891a0904e4..8ce92b1b0ee369 100644 --- a/program-runtime/src/sysvar_cache.rs +++ b/program-runtime/src/sysvar_cache.rs @@ -12,10 +12,11 @@ use { solana_rent::Rent, solana_sdk_ids::sysvar, solana_slot_hashes::SlotHashes, - solana_sysvar::{stake_history::StakeHistory, Sysvar}, + solana_stake_interface::stake_history::StakeHistory, + solana_svm_type_overrides::sync::Arc, + solana_sysvar::SysvarSerialize, solana_sysvar_id::SysvarId, - solana_transaction_context::{IndexOfAccount, InstructionContext, TransactionContext}, - solana_type_overrides::sync::Arc, + solana_transaction_context::{IndexOfAccount, InstructionContext}, }; #[cfg(feature = "frozen-abi")] @@ -59,7 +60,7 @@ const RECENT_BLOCKHASHES_ID: Pubkey = impl SysvarCache { /// Overwrite a sysvar. For testing purposes only. #[allow(deprecated)] - pub fn set_sysvar_for_tests(&mut self, sysvar: &T) { + pub fn set_sysvar_for_tests(&mut self, sysvar: &T) { let data = bincode::serialize(sysvar).expect("Failed to serialize sysvar."); let sysvar_id = T::id(); match sysvar_id { @@ -283,14 +284,13 @@ impl SysvarCache { pub mod get_sysvar_with_account_check { use super::*; - fn check_sysvar_account( - transaction_context: &TransactionContext, + fn check_sysvar_account( instruction_context: &InstructionContext, instruction_account_index: IndexOfAccount, ) -> Result<(), InstructionError> { - let index_in_transaction = instruction_context - .get_index_of_instruction_account_in_transaction(instruction_account_index)?; - if !S::check_id(transaction_context.get_key_of_account_at_index(index_in_transaction)?) { + if !S::check_id( + instruction_context.get_key_of_instruction_account(instruction_account_index)?, + ) { return Err(InstructionError::InvalidArgument); } Ok(()) @@ -301,11 +301,7 @@ pub mod get_sysvar_with_account_check { instruction_context: &InstructionContext, instruction_account_index: IndexOfAccount, ) -> Result, InstructionError> { - check_sysvar_account::( - invoke_context.transaction_context, - instruction_context, - instruction_account_index, - )?; + check_sysvar_account::(instruction_context, instruction_account_index)?; invoke_context.get_sysvar_cache().get_clock() } @@ -314,11 +310,7 @@ pub mod get_sysvar_with_account_check { instruction_context: &InstructionContext, instruction_account_index: IndexOfAccount, ) -> Result, InstructionError> { - check_sysvar_account::( - invoke_context.transaction_context, - instruction_context, - instruction_account_index, - )?; + check_sysvar_account::(instruction_context, instruction_account_index)?; invoke_context.get_sysvar_cache().get_rent() } @@ -327,11 +319,7 @@ pub mod get_sysvar_with_account_check { instruction_context: &InstructionContext, instruction_account_index: IndexOfAccount, ) -> Result, InstructionError> { - check_sysvar_account::( - invoke_context.transaction_context, - instruction_context, - instruction_account_index, - )?; + check_sysvar_account::(instruction_context, instruction_account_index)?; invoke_context.get_sysvar_cache().get_slot_hashes() } @@ -341,11 +329,7 @@ pub mod get_sysvar_with_account_check { instruction_context: &InstructionContext, instruction_account_index: IndexOfAccount, ) -> Result, InstructionError> { - check_sysvar_account::( - invoke_context.transaction_context, - instruction_context, - instruction_account_index, - )?; + check_sysvar_account::(instruction_context, instruction_account_index)?; invoke_context.get_sysvar_cache().get_recent_blockhashes() } @@ -354,11 +338,7 @@ pub mod get_sysvar_with_account_check { instruction_context: &InstructionContext, instruction_account_index: IndexOfAccount, ) -> Result, InstructionError> { - check_sysvar_account::( - invoke_context.transaction_context, - instruction_context, - instruction_account_index, - )?; + check_sysvar_account::(instruction_context, instruction_account_index)?; invoke_context.get_sysvar_cache().get_stake_history() } @@ -367,11 +347,7 @@ pub mod get_sysvar_with_account_check { instruction_context: &InstructionContext, instruction_account_index: IndexOfAccount, ) -> Result, InstructionError> { - check_sysvar_account::( - invoke_context.transaction_context, - instruction_context, - instruction_account_index, - )?; + check_sysvar_account::(instruction_context, instruction_account_index)?; invoke_context.get_sysvar_cache().get_last_restart_slot() } } @@ -394,7 +370,7 @@ mod tests { #[test_case(SlotHashes::default(); "slot_hashes")] #[test_case(StakeHistory::default(); "stake_history")] #[test_case(LastRestartSlot::default(); "last_restart_slot")] - fn test_sysvar_cache_preserves_bytes(_: T) { + fn test_sysvar_cache_preserves_bytes(_: T) { let id = T::id(); let size = T::size_of().saturating_mul(2); let in_buf = vec![0; size]; diff --git a/program-test/Cargo.toml b/program-test/Cargo.toml index 2ae9c86fab9663..e2dc3bdac743b5 100644 --- a/program-test/Cargo.toml +++ b/program-test/Cargo.toml @@ -25,6 +25,7 @@ solana-banks-client = { workspace = true } solana-banks-interface = { workspace = true } solana-banks-server = { workspace = true } solana-clock = { workspace = true } +solana-cluster-type = { workspace = true } solana-commitment-config = { workspace = true } solana-compute-budget = { workspace = true } solana-epoch-rewards = { workspace = true } @@ -35,12 +36,12 @@ solana-hash = { workspace = true } solana-instruction = { workspace = true } solana-keypair = { workspace = true } solana-loader-v3-interface = { workspace = true } -solana-log-collector = { workspace = true } solana-logger = { workspace = true } solana-message = { workspace = true } solana-msg = { workspace = true } solana-native-token = { workspace = true } solana-poh-config = { workspace = true } +solana-program-binaries = { workspace = true } solana-program-entrypoint = { workspace = true } solana-program-error = { workspace = true } solana-program-runtime = { workspace = true } @@ -53,10 +54,11 @@ solana-signer = { workspace = true } solana-stable-layout = { workspace = true } solana-stake-interface = { workspace = true } solana-svm = { workspace = true } +solana-svm-log-collector = { workspace = true } +solana-svm-timings = { workspace = true } solana-system-interface = { workspace = true } solana-sysvar = { workspace = true } solana-sysvar-id = { workspace = true } -solana-timings = { workspace = true } solana-transaction = { workspace = true } solana-transaction-context = { workspace = true } solana-transaction-error = { workspace = true } diff --git a/program-test/src/lib.rs b/program-test/src/lib.rs index 2cf8c37b70f868..baddd95b9fc5d3 100644 --- a/program-test/src/lib.rs +++ b/program-test/src/lib.rs @@ -11,21 +11,23 @@ use { log::*, solana_account::{create_account_shared_data_for_test, Account, AccountSharedData}, solana_account_info::AccountInfo, + solana_accounts_db::accounts_db::ACCOUNTS_DB_CONFIG_FOR_TESTING, solana_banks_client::start_client, solana_banks_server::banks_server::start_local_server, solana_clock::{Epoch, Slot}, + solana_cluster_type::ClusterType, solana_compute_budget::compute_budget::ComputeBudget, solana_fee_calculator::{FeeRateGovernor, DEFAULT_TARGET_LAMPORTS_PER_SIGNATURE}, - solana_genesis_config::{ClusterType, GenesisConfig}, + solana_genesis_config::GenesisConfig, solana_hash::Hash, solana_instruction::{ error::{InstructionError, UNSUPPORTED_SYSVAR}, Instruction, }, solana_keypair::Keypair, - solana_log_collector::ic_msg, - solana_native_token::sol_to_lamports, + solana_native_token::LAMPORTS_PER_SOL, solana_poh_config::PohConfig, + solana_program_binaries as programs, solana_program_entrypoint::{deserialize, SUCCESS}, solana_program_error::{ProgramError, ProgramResult}, solana_program_runtime::{ @@ -42,9 +44,10 @@ use { runtime_config::RuntimeConfig, }, solana_signer::Signer, - solana_sysvar::Sysvar, + solana_svm_log_collector::ic_msg, + solana_svm_timings::ExecuteTimings, + solana_sysvar::SysvarSerialize, solana_sysvar_id::SysvarId, - solana_timings::ExecuteTimings, solana_vote_program::vote_state::{self, VoteStateV3, VoteStateVersions}, std::{ cell::RefCell, @@ -76,8 +79,6 @@ pub use { solana_transaction_context::IndexOfAccount, }; -pub mod programs; - /// Errors from the program test environment #[derive(Error, Debug, PartialEq, Eq)] pub enum ProgramTestError { @@ -116,7 +117,7 @@ pub fn invoke_builtin_function( invoke_context.consume_checked(1)?; let log_collector = invoke_context.get_log_collector(); - let program_id = instruction_context.get_last_program_key(transaction_context)?; + let program_id = instruction_context.get_program_key()?; stable_log::program_invoke( &log_collector, program_id, @@ -130,13 +131,13 @@ pub fn invoke_builtin_function( let mask_out_rent_epoch_in_vm_serialization = invoke_context .get_feature_set() .mask_out_rent_epoch_in_vm_serialization; - let (mut parameter_bytes, _regions, _account_lengths) = serialize_parameters( - transaction_context, - instruction_context, - false, // There is no VM so stricter_abi_and_runtime_constraints can not be implemented here - false, // There is no VM so account_data_direct_mapping can not be implemented here - mask_out_rent_epoch_in_vm_serialization, - )?; + let (mut parameter_bytes, _regions, _account_lengths, _instruction_data_offset) = + serialize_parameters( + &instruction_context, + false, // There is no VM so stricter_abi_and_runtime_constraints can not be implemented here + false, // There is no VM so account_data_direct_mapping can not be implemented here + mask_out_rent_epoch_in_vm_serialization, + )?; // Deserialize data back into instruction params let (program_id, account_infos, input) = @@ -174,8 +175,7 @@ pub fn invoke_builtin_function( // Commit AccountInfo changes back into KeyedAccounts for i in deduplicated_indices.into_iter() { - let mut borrowed_account = - instruction_context.try_borrow_instruction_account(transaction_context, i)?; + let mut borrowed_account = instruction_context.try_borrow_instruction_account(i)?; if borrowed_account.is_writable() { if let Some(account_info) = account_info_map.get(borrowed_account.get_key()) { if borrowed_account.get_lamports() != account_info.lamports() { @@ -216,7 +216,7 @@ macro_rules! processor { }; } -fn get_sysvar( +fn get_sysvar( sysvar: Result, InstructionError>, var_addr: *mut u8, ) -> u64 { @@ -256,9 +256,7 @@ impl solana_sysvar::program_stubs::SyscallStubs for SyscallStubs { let instruction_context = transaction_context .get_current_instruction_context() .unwrap(); - let caller = instruction_context - .get_last_program_key(transaction_context) - .unwrap(); + let caller = instruction_context.get_program_key().unwrap(); stable_log::program_invoke( &log_collector, @@ -280,11 +278,8 @@ impl solana_sysvar::program_stubs::SyscallStubs for SyscallStubs { let instruction_context = transaction_context .get_current_instruction_context() .unwrap(); - - let next_instruction_accounts = transaction_context - .get_next_instruction_context() - .unwrap() - .instruction_accounts(); + let next_instruction_context = transaction_context.get_next_instruction_context().unwrap(); + let next_instruction_accounts = next_instruction_context.instruction_accounts(); let mut account_indices = Vec::with_capacity(next_instruction_accounts.len()); for instruction_account in next_instruction_accounts.iter() { let account_key = transaction_context @@ -300,7 +295,7 @@ impl solana_sysvar::program_stubs::SyscallStubs for SyscallStubs { .get_index_of_account_in_instruction(instruction_account.index_in_transaction) .unwrap(); let mut borrowed_account = instruction_context - .try_borrow_instruction_account(transaction_context, index_in_caller) + .try_borrow_instruction_account(index_in_caller) .unwrap(); if borrowed_account.get_lamports() != account_info.lamports() { borrowed_account @@ -345,7 +340,7 @@ impl solana_sysvar::program_stubs::SyscallStubs for SyscallStubs { .get_index_of_account_in_instruction(index_in_transaction) .unwrap(); let borrowed_account = instruction_context - .try_borrow_instruction_account(transaction_context, index_in_caller) + .try_borrow_instruction_account(index_in_caller) .unwrap(); let account_info = &account_infos[account_info_index]; **account_info.try_borrow_mut_lamports().unwrap() = borrowed_account.get_lamports(); @@ -425,9 +420,7 @@ impl solana_sysvar::program_stubs::SyscallStubs for SyscallStubs { let instruction_context = transaction_context .get_current_instruction_context() .unwrap(); - let caller = *instruction_context - .get_last_program_key(transaction_context) - .unwrap(); + let caller = *instruction_context.get_program_key().unwrap(); transaction_context .set_return_data(caller, data.to_vec()) .unwrap(); @@ -612,7 +605,7 @@ impl ProgramTest { ); } - pub fn add_sysvar_account(&mut self, address: Pubkey, sysvar: &S) { + pub fn add_sysvar_account(&mut self, address: Pubkey, sysvar: &S) { let account = create_account_shared_data_for_test(sysvar); self.add_account(address, account.into()); } @@ -634,8 +627,9 @@ impl ProgramTest { program_name: &'static str, program_id: &Pubkey, ) { - let program_file = find_file(&format!("{program_name}.so")) - .expect("Program file data not available for {program_name} ({program_id})"); + let program_file = find_file(&format!("{program_name}.so")).unwrap_or_else(|| { + panic!("Program file data not available for {program_name} ({program_id})") + }); let elf = read_file(program_file); let program_accounts = programs::bpf_loader_upgradeable_program_accounts(program_id, &elf, &Rent::default()); @@ -801,13 +795,13 @@ impl ProgramTest { }; let bootstrap_validator_pubkey = Pubkey::new_unique(); let bootstrap_validator_stake_lamports = - rent.minimum_balance(VoteStateV3::size_of()) + sol_to_lamports(1_000_000.0); + rent.minimum_balance(VoteStateV3::size_of()) + 1_000_000 * LAMPORTS_PER_SOL; let mint_keypair = Keypair::new(); let voting_keypair = Keypair::new(); let mut genesis_config = create_genesis_config_with_leader_ex( - sol_to_lamports(1_000_000.0), + 1_000_000 * LAMPORTS_PER_SOL, &mint_keypair.pubkey(), &bootstrap_validator_pubkey, &voting_keypair.pubkey(), @@ -843,7 +837,7 @@ impl ProgramTest { debug!("Payer address: {}", mint_keypair.pubkey()); debug!("Genesis config: {genesis_config}"); - let bank = Bank::new_with_paths( + let bank = Bank::new_from_genesis( &genesis_config, Arc::new(RuntimeConfig { compute_budget: self.compute_max_units.map(|max_units| ComputeBudget { @@ -859,9 +853,7 @@ impl ProgramTest { }), Vec::default(), None, - None, - false, - None, + ACCOUNTS_DB_CONFIG_FOR_TESTING, None, None, Arc::default(), @@ -1108,7 +1100,7 @@ impl ProgramTestContext { for _ in 0..number_of_credits { vote_state.increment_credits(epoch, 1); } - let versioned = VoteStateVersions::new_current(vote_state); + let versioned = VoteStateVersions::new_v3(vote_state); vote_state::to(&versioned, &mut vote_account).unwrap(); bank.store_account(vote_account_address, &vote_account); } @@ -1131,7 +1123,7 @@ impl ProgramTestContext { /// that would be difficult to replicate on a new test cluster. Beware /// that it can be used to create states that would not be reachable /// under normal conditions! - pub fn set_sysvar(&self, sysvar: &T) { + pub fn set_sysvar(&self, sysvar: &T) { let bank_forks = self.bank_forks.read().unwrap(); let bank = bank_forks.working_bank(); bank.set_sysvar_for_tests(sysvar); diff --git a/program-test/tests/core_bpf.rs b/program-test/tests/core_bpf.rs index 6961d581dc8da6..516d488d83a7fe 100644 --- a/program-test/tests/core_bpf.rs +++ b/program-test/tests/core_bpf.rs @@ -26,12 +26,13 @@ async fn test_vended_core_bpf_programs() { assert_bpf_program(&context, &solana_sdk_ids::address_lookup_table::id()).await; assert_bpf_program(&context, &solana_sdk_ids::config::id()).await; assert_bpf_program(&context, &solana_sdk_ids::feature::id()).await; + assert_bpf_program(&context, &solana_sdk_ids::stake::id()).await; } #[tokio::test] async fn test_add_core_bpf_program_manually() { - // Core BPF program: Stake. - let program_id = solana_sdk_ids::stake::id(); + // Core BPF program: Vote. + let program_id = solana_sdk_ids::vote::id(); let mut program_test = ProgramTest::default(); program_test.add_upgradeable_program_to_genesis("noop_program", &program_id); diff --git a/program-test/tests/setup.rs b/program-test/tests/setup.rs index 0cdd7d0950a219..a85b738405f951 100644 --- a/program-test/tests/setup.rs +++ b/program-test/tests/setup.rs @@ -67,7 +67,7 @@ pub async fn setup_vote(context: &mut ProgramTestContext) -> Pubkey { }, vote_lamports, vote_instruction::CreateVoteAccountConfig { - space: vote_state::VoteStateVersions::vote_state_size_of(true) as u64, + space: vote_state::VoteStateV3::size_of() as u64, ..vote_instruction::CreateVoteAccountConfig::default() }, )); diff --git a/program-test/tests/spl.rs b/program-test/tests/spl.rs index bf4f6ff079f0bb..06e13b06cb4da4 100644 --- a/program-test/tests/spl.rs +++ b/program-test/tests/spl.rs @@ -1,7 +1,8 @@ use { solana_instruction::{AccountMeta, Instruction}, solana_keypair::Keypair, - solana_program_test::{programs::spl_programs, ProgramTest}, + solana_program_binaries::spl_programs, + solana_program_test::ProgramTest, solana_pubkey::Pubkey, solana_sdk_ids::{bpf_loader, bpf_loader_upgradeable}, solana_signer::Signer, diff --git a/program-test/tests/sysvar_last_restart_slot.rs b/program-test/tests/sysvar_last_restart_slot.rs index dd2d64d54cdfc6..79f1d5dad17f0b 100644 --- a/program-test/tests/sysvar_last_restart_slot.rs +++ b/program-test/tests/sysvar_last_restart_slot.rs @@ -7,7 +7,9 @@ use { solana_program_test::{processor, ProgramTest, ProgramTestContext}, solana_pubkey::Pubkey, solana_signer::Signer, - solana_sysvar::{last_restart_slot, last_restart_slot::LastRestartSlot, Sysvar}, + solana_sysvar::{ + last_restart_slot, last_restart_slot::LastRestartSlot, Sysvar, SysvarSerialize, + }, solana_transaction::Transaction, }; diff --git a/program-test/tests/warp.rs b/program-test/tests/warp.rs index 6c8c71e394c34c..2e9ed32ee930ce 100644 --- a/program-test/tests/warp.rs +++ b/program-test/tests/warp.rs @@ -19,14 +19,12 @@ use { solana_signer::Signer, solana_stake_interface::{ instruction as stake_instruction, + stake_history::StakeHistory, state::{StakeActivationStatus, StakeStateV2}, + sysvar::stake_history, }, solana_stake_program::stake_state, - solana_sysvar::{ - clock, - stake_history::{self, StakeHistory}, - Sysvar, - }, + solana_sysvar::{clock, SysvarSerialize}, solana_transaction::Transaction, solana_transaction_error::TransactionError, solana_vote_program::vote_state, diff --git a/programs/bpf-loader-tests/tests/extend_program_ix.rs b/programs/bpf-loader-tests/tests/extend_program_ix.rs index 54363a92a15710..42e218b2c516b4 100644 --- a/programs/bpf-loader-tests/tests/extend_program_ix.rs +++ b/programs/bpf-loader-tests/tests/extend_program_ix.rs @@ -522,7 +522,7 @@ async fn test_extend_program_without_payer() { &mut context, extend_program_checked(&program_address, &upgrade_authority.pubkey(), None, 1024), Some(&upgrade_authority), - InstructionError::NotEnoughAccountKeys, + InstructionError::MissingAccount, "should fail because program data has insufficient funds to cover rent", ) .await; diff --git a/programs/bpf_loader/Cargo.toml b/programs/bpf_loader/Cargo.toml index 6c8c7deb63a794..5b46ce2955eeea 100644 --- a/programs/bpf_loader/Cargo.toml +++ b/programs/bpf_loader/Cargo.toml @@ -20,9 +20,9 @@ name = "solana_bpf_loader_program" default = ["metrics"] metrics = ["solana-program-runtime/metrics"] shuttle-test = [ - "solana-type-overrides/shuttle-test", "solana-program-runtime/shuttle-test", "solana-sbpf/shuttle-test", + "solana-svm-type-overrides/shuttle-test", ] svm-internal = [] @@ -36,18 +36,18 @@ solana-clock = { workspace = true } solana-instruction = { workspace = true } solana-loader-v3-interface = { workspace = true, features = ["serde"] } solana-loader-v4-interface = { workspace = true, features = ["bincode"] } -solana-log-collector = { workspace = true } -solana-measure = { workspace = true } solana-packet = { workspace = true } solana-program-entrypoint = { workspace = true } solana-program-runtime = { workspace = true } solana-pubkey = { workspace = true } -solana-sbpf = { workspace = true } +solana-sbpf = { workspace = true, features = ["jit"] } solana-sdk-ids = { workspace = true } solana-svm-feature-set = { workspace = true } +solana-svm-log-collector = { workspace = true } +solana-svm-measure = { workspace = true } +solana-svm-type-overrides = { workspace = true } solana-system-interface = { workspace = true } solana-transaction-context = { workspace = true, features = ["bincode"] } -solana-type-overrides = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } diff --git a/programs/bpf_loader/benches/bpf_loader_upgradeable.rs b/programs/bpf_loader/benches/bpf_loader_upgradeable.rs index 23ee2f26ade4f5..210617d07bbcf5 100644 --- a/programs/bpf_loader/benches/bpf_loader_upgradeable.rs +++ b/programs/bpf_loader/benches/bpf_loader_upgradeable.rs @@ -143,7 +143,7 @@ impl TestSetup { fn run(&self) { mock_process_instruction( &self.loader_address, - Vec::new(), + None, &self.instruction_data, self.transaction_accounts.clone(), self.instruction_accounts.clone(), diff --git a/programs/bpf_loader/benches/serialization.rs b/programs/bpf_loader/benches/serialization.rs index f603279b133b72..2cf7f8e0ebc554 100644 --- a/programs/bpf_loader/benches/serialization.rs +++ b/programs/bpf_loader/benches/serialization.rs @@ -5,7 +5,7 @@ use { solana_pubkey::Pubkey, solana_rent::Rent, solana_sdk_ids::{bpf_loader, bpf_loader_deprecated}, - solana_transaction_context::{IndexOfAccount, InstructionAccount, TransactionContext}, + solana_transaction_context::{InstructionAccount, TransactionContext}, }; fn create_inputs(owner: Pubkey, num_instruction_accounts: usize) -> TransactionContext { @@ -89,13 +89,8 @@ fn create_inputs(owner: Pubkey, num_instruction_accounts: usize) -> TransactionC .take(num_instruction_accounts) .enumerate() { - let index_in_callee = instruction_accounts - .iter() - .position(|account| account.index_in_transaction == index_in_transaction) - .unwrap_or(instruction_account_index) as IndexOfAccount; instruction_accounts.push(InstructionAccount::new( index_in_transaction, - index_in_callee, false, instruction_account_index >= 4, )); @@ -105,9 +100,8 @@ fn create_inputs(owner: Pubkey, num_instruction_accounts: usize) -> TransactionC TransactionContext::new(transaction_accounts, Rent::default(), 1, 1); let instruction_data = vec![1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]; transaction_context - .get_next_instruction_context_mut() - .unwrap() - .configure(vec![0], instruction_accounts, &instruction_data); + .configure_next_instruction_for_tests(0, instruction_accounts, &instruction_data) + .unwrap(); transaction_context.push().unwrap(); transaction_context } @@ -121,8 +115,7 @@ fn bench_serialize_unaligned(c: &mut Criterion) { c.bench_function("serialize_unaligned", |b| { b.iter(|| { let _ = serialize_parameters( - &transaction_context, - instruction_context, + &instruction_context, true, // stricter_abi_and_runtime_constraints true, // account_data_direct_mapping true, // mask_out_rent_epoch_in_vm_serialization @@ -140,8 +133,7 @@ fn bench_serialize_unaligned_copy_account_data(c: &mut Criterion) { c.bench_function("serialize_unaligned_copy_account_data", |b| { b.iter(|| { let _ = serialize_parameters( - &transaction_context, - instruction_context, + &instruction_context, false, // stricter_abi_and_runtime_constraints false, // account_data_direct_mapping true, // mask_out_rent_epoch_in_vm_serialization @@ -160,8 +152,7 @@ fn bench_serialize_aligned(c: &mut Criterion) { c.bench_function("serialize_aligned", |b| { b.iter(|| { let _ = serialize_parameters( - &transaction_context, - instruction_context, + &instruction_context, true, // stricter_abi_and_runtime_constraints true, // account_data_direct_mapping true, // mask_out_rent_epoch_in_vm_serialization @@ -180,8 +171,7 @@ fn bench_serialize_aligned_copy_account_data(c: &mut Criterion) { c.bench_function("serialize_aligned_copy_account_data", |b| { b.iter(|| { let _ = serialize_parameters( - &transaction_context, - instruction_context, + &instruction_context, false, // stricter_abi_and_runtime_constraints false, // account_data_direct_mapping true, // mask_out_rent_epoch_in_vm_serialization @@ -200,8 +190,7 @@ fn bench_serialize_unaligned_max_accounts(c: &mut Criterion) { c.bench_function("serialize_unaligned_max_accounts", |b| { b.iter(|| { let _ = serialize_parameters( - &transaction_context, - instruction_context, + &instruction_context, true, // stricter_abi_and_runtime_constraints true, // account_data_direct_mapping true, // mask_out_rent_epoch_in_vm_serialization @@ -220,8 +209,7 @@ fn bench_serialize_aligned_max_accounts(c: &mut Criterion) { c.bench_function("serialize_aligned_max_accounts", |b| { b.iter(|| { let _ = serialize_parameters( - &transaction_context, - instruction_context, + &instruction_context, true, // stricter_abi_and_runtime_constraints true, // account_data_direct_mapping true, // mask_out_rent_epoch_in_vm_serialization diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index e06e5c811aabf1..9fcfadc4407406 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -10,8 +10,6 @@ use { solana_loader_v3_interface::{ instruction::UpgradeableLoaderInstruction, state::UpgradeableLoaderState, }, - solana_log_collector::{ic_logger_msg, ic_msg, LogCollector}, - solana_measure::measure::Measure, solana_program_entrypoint::{MAX_PERMITTED_DATA_INCREASE, SUCCESS}, solana_program_runtime::{ execution_budget::MAX_INSTRUCTION_STACK_DEPTH, @@ -38,9 +36,11 @@ use { solana_sdk_ids::{ bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, loader_v4, native_loader, }, + solana_svm_log_collector::{ic_logger_msg, ic_msg, LogCollector}, + solana_svm_measure::measure::Measure, + solana_svm_type_overrides::sync::{atomic::Ordering, Arc}, solana_system_interface::{instruction as system_instruction, MAX_PERMITTED_DATA_LENGTH}, solana_transaction_context::{IndexOfAccount, InstructionContext, TransactionContext}, - solana_type_overrides::sync::{atomic::Ordering, Arc}, std::{cell::RefCell, mem, rc::Rc}, }; @@ -177,10 +177,6 @@ pub fn deploy_program( old_entry.tx_usage_counter.load(Ordering::Relaxed), Ordering::Relaxed, ); - executor.ix_usage_counter.store( - old_entry.ix_usage_counter.load(Ordering::Relaxed), - Ordering::Relaxed, - ); } load_program_metrics.program_id = program_id.to_string(); program_cache_for_tx_batch.store_modified_entry(*program_id, Arc::new(executor)); @@ -219,7 +215,7 @@ fn write_program_data( ) -> Result<(), InstructionError> { let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; - let mut program = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; + let mut program = instruction_context.try_borrow_instruction_account(0)?; let data = program.get_data_mut()?; let write_offset = program_data_offset.saturating_add(bytes.len()); if data.len() < write_offset { @@ -272,7 +268,7 @@ fn create_vm<'a, 'b>( invoke_context .get_feature_set() .stricter_abi_and_runtime_constraints, - invoke_context.account_data_direct_mapping, + invoke_context.get_feature_set().account_data_direct_mapping, )?; invoke_context.set_syscall_context(SyscallContext { allocator: BpfAllocator::new(heap_size as u64), @@ -385,13 +381,12 @@ pub(crate) fn process_instruction_inner( let log_collector = invoke_context.get_log_collector(); let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; - let program_account = - instruction_context.try_borrow_last_program_account(transaction_context)?; + let program_id = instruction_context.get_program_key()?; + let owner_id = instruction_context.get_program_owner()?; // Program Management Instruction - if native_loader::check_id(program_account.get_owner()) { - drop(program_account); - let program_id = instruction_context.get_last_program_key(transaction_context)?; + if native_loader::check_id(&owner_id) { + let program_id = instruction_context.get_program_key()?; return if bpf_loader_upgradeable::check_id(program_id) { invoke_context.consume_checked(UPGRADEABLE_LOADER_COMPUTE_UNITS)?; process_loader_upgradeable_instruction(invoke_context) @@ -408,79 +403,33 @@ pub(crate) fn process_instruction_inner( Err(InstructionError::UnsupportedProgramId) } else { ic_logger_msg!(log_collector, "Invalid BPF loader id"); - Err( - if invoke_context - .get_feature_set() - .remove_accounts_executable_flag_checks - { - InstructionError::UnsupportedProgramId - } else { - InstructionError::IncorrectProgramId - }, - ) + Err(InstructionError::UnsupportedProgramId) } .map(|_| 0) .map_err(|error| Box::new(error) as Box); } // Program Invocation - #[allow(deprecated)] - if !invoke_context - .get_feature_set() - .remove_accounts_executable_flag_checks - && !program_account.is_executable() - { - ic_logger_msg!(log_collector, "Program is not executable"); - return Err(Box::new(InstructionError::IncorrectProgramId)); - } - let mut get_or_create_executor_time = Measure::start("get_or_create_executor_time"); let executor = invoke_context .program_cache_for_tx_batch - .find(program_account.get_key()) + .find(program_id) .ok_or_else(|| { ic_logger_msg!(log_collector, "Program is not cached"); - if invoke_context - .get_feature_set() - .remove_accounts_executable_flag_checks - { - InstructionError::UnsupportedProgramId - } else { - InstructionError::InvalidAccountData - } + InstructionError::UnsupportedProgramId })?; - drop(program_account); get_or_create_executor_time.stop(); invoke_context.timings.get_or_create_executor_us += get_or_create_executor_time.as_us(); - executor.ix_usage_counter.fetch_add(1, Ordering::Relaxed); match &executor.program { ProgramCacheEntryType::FailedVerification(_) | ProgramCacheEntryType::Closed | ProgramCacheEntryType::DelayVisibility => { ic_logger_msg!(log_collector, "Program is not deployed"); - let instruction_error = if invoke_context - .get_feature_set() - .remove_accounts_executable_flag_checks - { - InstructionError::UnsupportedProgramId - } else { - InstructionError::InvalidAccountData - }; - Err(Box::new(instruction_error) as Box) + Err(Box::new(InstructionError::UnsupportedProgramId) as Box) } ProgramCacheEntryType::Loaded(executable) => execute(executable, invoke_context), - _ => { - let instruction_error = if invoke_context - .get_feature_set() - .remove_accounts_executable_flag_checks - { - InstructionError::UnsupportedProgramId - } else { - InstructionError::IncorrectProgramId - }; - Err(Box::new(instruction_error) as Box) - } + _ => Err(Box::new(InstructionError::UnsupportedProgramId) as Box), } .map(|_| 0) } @@ -492,22 +441,19 @@ fn process_loader_upgradeable_instruction( let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; let instruction_data = instruction_context.get_instruction_data(); - let program_id = instruction_context.get_last_program_key(transaction_context)?; + let program_id = instruction_context.get_program_key()?; match limited_deserialize(instruction_data, solana_packet::PACKET_DATA_SIZE as u64)? { UpgradeableLoaderInstruction::InitializeBuffer => { instruction_context.check_number_of_instruction_accounts(2)?; - let mut buffer = - instruction_context.try_borrow_instruction_account(transaction_context, 0)?; + let mut buffer = instruction_context.try_borrow_instruction_account(0)?; if UpgradeableLoaderState::Uninitialized != buffer.get_state()? { ic_logger_msg!(log_collector, "Buffer account already initialized"); return Err(InstructionError::AccountAlreadyInitialized); } - let authority_key = Some(*transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(1)?, - )?); + let authority_key = Some(*instruction_context.get_key_of_instruction_account(1)?); buffer.set_state(&UpgradeableLoaderState::Buffer { authority_address: authority_key, @@ -515,17 +461,14 @@ fn process_loader_upgradeable_instruction( } UpgradeableLoaderInstruction::Write { offset, bytes } => { instruction_context.check_number_of_instruction_accounts(2)?; - let buffer = - instruction_context.try_borrow_instruction_account(transaction_context, 0)?; + let buffer = instruction_context.try_borrow_instruction_account(0)?; if let UpgradeableLoaderState::Buffer { authority_address } = buffer.get_state()? { if authority_address.is_none() { ic_logger_msg!(log_collector, "Buffer is immutable"); return Err(InstructionError::Immutable); // TODO better error code } - let authority_key = Some(*transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(1)?, - )?); + let authority_key = Some(*instruction_context.get_key_of_instruction_account(1)?); if authority_address != authority_key { ic_logger_msg!(log_collector, "Incorrect buffer authority provided"); return Err(InstructionError::IncorrectAuthority); @@ -547,24 +490,18 @@ fn process_loader_upgradeable_instruction( } UpgradeableLoaderInstruction::DeployWithMaxDataLen { max_data_len } => { instruction_context.check_number_of_instruction_accounts(4)?; - let payer_key = *transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(0)?, - )?; - let programdata_key = *transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(1)?, - )?; - let rent = get_sysvar_with_account_check::rent(invoke_context, instruction_context, 4)?; + let payer_key = *instruction_context.get_key_of_instruction_account(0)?; + let programdata_key = *instruction_context.get_key_of_instruction_account(1)?; + let rent = + get_sysvar_with_account_check::rent(invoke_context, &instruction_context, 4)?; let clock = - get_sysvar_with_account_check::clock(invoke_context, instruction_context, 5)?; + get_sysvar_with_account_check::clock(invoke_context, &instruction_context, 5)?; instruction_context.check_number_of_instruction_accounts(8)?; - let authority_key = Some(*transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(7)?, - )?); + let authority_key = Some(*instruction_context.get_key_of_instruction_account(7)?); // Verify Program account - let program = - instruction_context.try_borrow_instruction_account(transaction_context, 2)?; + let program = instruction_context.try_borrow_instruction_account(2)?; if UpgradeableLoaderState::Uninitialized != program.get_state()? { ic_logger_msg!(log_collector, "Program account already initialized"); return Err(InstructionError::AccountAlreadyInitialized); @@ -582,8 +519,7 @@ fn process_loader_upgradeable_instruction( // Verify Buffer account - let buffer = - instruction_context.try_borrow_instruction_account(transaction_context, 3)?; + let buffer = instruction_context.try_borrow_instruction_account(3)?; if let UpgradeableLoaderState::Buffer { authority_address } = buffer.get_state()? { if authority_address != authority_key { ic_logger_msg!(log_collector, "Buffer and upgrade authority don't match"); @@ -631,10 +567,8 @@ fn process_loader_upgradeable_instruction( // Drain the Buffer account to payer before paying for programdata account { - let mut buffer = - instruction_context.try_borrow_instruction_account(transaction_context, 3)?; - let mut payer = - instruction_context.try_borrow_instruction_account(transaction_context, 0)?; + let mut buffer = instruction_context.try_borrow_instruction_account(3)?; + let mut payer = instruction_context.try_borrow_instruction_account(0)?; payer.checked_add_lamports(buffer.get_lamports())?; buffer.set_lamports(0)?; } @@ -655,19 +589,20 @@ fn process_loader_upgradeable_instruction( let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; - let caller_program_id = - instruction_context.get_last_program_key(transaction_context)?; + let caller_program_id = instruction_context.get_program_key()?; + // The conversion from `PubkeyError` to `InstructionError` through + // num-traits is incorrect, but it's the existing behavior. let signers = [[new_program_id.as_ref(), &[bump_seed]]] .iter() .map(|seeds| Pubkey::create_program_address(seeds, caller_program_id)) - .collect::, solana_pubkey::PubkeyError>>()?; + .collect::, solana_pubkey::PubkeyError>>() + .map_err(|e| e as u64)?; invoke_context.native_invoke(instruction, signers.as_slice())?; // Load and verify the program bits let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; - let buffer = - instruction_context.try_borrow_instruction_account(transaction_context, 3)?; + let buffer = instruction_context.try_borrow_instruction_account(3)?; deploy_program!( invoke_context, &new_program_id, @@ -686,8 +621,7 @@ fn process_loader_upgradeable_instruction( // Update the ProgramData account and record the program bits { - let mut programdata = - instruction_context.try_borrow_instruction_account(transaction_context, 1)?; + let mut programdata = instruction_context.try_borrow_instruction_account(1)?; programdata.set_state(&UpgradeableLoaderState::ProgramData { slot: clock.slot, upgrade_authority_address: authority_key, @@ -699,8 +633,7 @@ fn process_loader_upgradeable_instruction( ..programdata_data_offset.saturating_add(buffer_data_len), ) .ok_or(InstructionError::AccountDataTooSmall)?; - let mut buffer = - instruction_context.try_borrow_instruction_account(transaction_context, 3)?; + let mut buffer = instruction_context.try_borrow_instruction_account(3)?; let src_slice = buffer .get_data() .get(buffer_data_offset..) @@ -710,8 +643,7 @@ fn process_loader_upgradeable_instruction( } // Update the Program account - let mut program = - instruction_context.try_borrow_instruction_account(transaction_context, 2)?; + let mut program = instruction_context.try_borrow_instruction_account(2)?; program.set_state(&UpgradeableLoaderState::Program { programdata_address: programdata_key, })?; @@ -722,30 +654,17 @@ fn process_loader_upgradeable_instruction( } UpgradeableLoaderInstruction::Upgrade => { instruction_context.check_number_of_instruction_accounts(3)?; - let programdata_key = *transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(0)?, - )?; - let rent = get_sysvar_with_account_check::rent(invoke_context, instruction_context, 4)?; + let programdata_key = *instruction_context.get_key_of_instruction_account(0)?; + let rent = + get_sysvar_with_account_check::rent(invoke_context, &instruction_context, 4)?; let clock = - get_sysvar_with_account_check::clock(invoke_context, instruction_context, 5)?; + get_sysvar_with_account_check::clock(invoke_context, &instruction_context, 5)?; instruction_context.check_number_of_instruction_accounts(7)?; - let authority_key = Some(*transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(6)?, - )?); + let authority_key = Some(*instruction_context.get_key_of_instruction_account(6)?); // Verify Program account - let program = - instruction_context.try_borrow_instruction_account(transaction_context, 1)?; - #[allow(deprecated)] - if !invoke_context - .get_feature_set() - .remove_accounts_executable_flag_checks - && !program.is_executable() - { - ic_logger_msg!(log_collector, "Program account not executable"); - return Err(InstructionError::AccountNotExecutable); - } + let program = instruction_context.try_borrow_instruction_account(1)?; if !program.is_writable() { ic_logger_msg!(log_collector, "Program account not writeable"); return Err(InstructionError::InvalidArgument); @@ -771,8 +690,7 @@ fn process_loader_upgradeable_instruction( // Verify Buffer account - let buffer = - instruction_context.try_borrow_instruction_account(transaction_context, 2)?; + let buffer = instruction_context.try_borrow_instruction_account(2)?; if let UpgradeableLoaderState::Buffer { authority_address } = buffer.get_state()? { if authority_address != authority_key { ic_logger_msg!(log_collector, "Buffer and upgrade authority don't match"); @@ -799,8 +717,7 @@ fn process_loader_upgradeable_instruction( // Verify ProgramData account - let programdata = - instruction_context.try_borrow_instruction_account(transaction_context, 0)?; + let programdata = instruction_context.try_borrow_instruction_account(0)?; let programdata_data_offset = UpgradeableLoaderState::size_of_programdata_metadata(); let programdata_balance_required = 1.max(rent.minimum_balance(programdata.get_data().len())); @@ -848,8 +765,7 @@ fn process_loader_upgradeable_instruction( drop(programdata); // Load and verify the program bits - let buffer = - instruction_context.try_borrow_instruction_account(transaction_context, 2)?; + let buffer = instruction_context.try_borrow_instruction_account(2)?; deploy_program!( invoke_context, &new_program_id, @@ -868,8 +784,7 @@ fn process_loader_upgradeable_instruction( // Update the ProgramData account, record the upgraded data, and zero // the rest - let mut programdata = - instruction_context.try_borrow_instruction_account(transaction_context, 0)?; + let mut programdata = instruction_context.try_borrow_instruction_account(0)?; { programdata.set_state(&UpgradeableLoaderState::ProgramData { slot: clock.slot, @@ -882,8 +797,7 @@ fn process_loader_upgradeable_instruction( ..programdata_data_offset.saturating_add(buffer_data_len), ) .ok_or(InstructionError::AccountDataTooSmall)?; - let buffer = - instruction_context.try_borrow_instruction_account(transaction_context, 2)?; + let buffer = instruction_context.try_borrow_instruction_account(2)?; let src_slice = buffer .get_data() .get(buffer_data_offset..) @@ -897,10 +811,8 @@ fn process_loader_upgradeable_instruction( .fill(0); // Fund ProgramData to rent-exemption, spill the rest - let mut buffer = - instruction_context.try_borrow_instruction_account(transaction_context, 2)?; - let mut spill = - instruction_context.try_borrow_instruction_account(transaction_context, 3)?; + let mut buffer = instruction_context.try_borrow_instruction_account(2)?; + let mut spill = instruction_context.try_borrow_instruction_account(3)?; spill.checked_add_lamports( programdata .get_lamports() @@ -915,17 +827,9 @@ fn process_loader_upgradeable_instruction( } UpgradeableLoaderInstruction::SetAuthority => { instruction_context.check_number_of_instruction_accounts(2)?; - let mut account = - instruction_context.try_borrow_instruction_account(transaction_context, 0)?; - let present_authority_key = transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(1)?, - )?; - let new_authority = instruction_context - .get_index_of_instruction_account_in_transaction(2) - .and_then(|index_in_transaction| { - transaction_context.get_key_of_account_at_index(index_in_transaction) - }) - .ok(); + let mut account = instruction_context.try_borrow_instruction_account(0)?; + let present_authority_key = instruction_context.get_key_of_instruction_account(1)?; + let new_authority = instruction_context.get_key_of_instruction_account(2).ok(); match account.get_state()? { UpgradeableLoaderState::Buffer { authority_address } => { @@ -987,14 +891,9 @@ fn process_loader_upgradeable_instruction( } instruction_context.check_number_of_instruction_accounts(3)?; - let mut account = - instruction_context.try_borrow_instruction_account(transaction_context, 0)?; - let present_authority_key = transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(1)?, - )?; - let new_authority_key = transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(2)?, - )?; + let mut account = instruction_context.try_borrow_instruction_account(0)?; + let present_authority_key = instruction_context.get_key_of_instruction_account(1)?; + let new_authority_key = instruction_context.get_key_of_instruction_account(2)?; match account.get_state()? { UpgradeableLoaderState::Buffer { authority_address } => { @@ -1062,15 +961,14 @@ fn process_loader_upgradeable_instruction( ); return Err(InstructionError::InvalidArgument); } - let mut close_account = - instruction_context.try_borrow_instruction_account(transaction_context, 0)?; + let mut close_account = instruction_context.try_borrow_instruction_account(0)?; let close_key = *close_account.get_key(); let close_account_state = close_account.get_state()?; close_account.set_data_length(UpgradeableLoaderState::size_of_uninitialized())?; match close_account_state { UpgradeableLoaderState::Uninitialized => { - let mut recipient_account = instruction_context - .try_borrow_instruction_account(transaction_context, 1)?; + let mut recipient_account = + instruction_context.try_borrow_instruction_account(1)?; recipient_account.checked_add_lamports(close_account.get_lamports())?; close_account.set_lamports(0)?; @@ -1079,12 +977,7 @@ fn process_loader_upgradeable_instruction( UpgradeableLoaderState::Buffer { authority_address } => { instruction_context.check_number_of_instruction_accounts(3)?; drop(close_account); - common_close_account( - &authority_address, - transaction_context, - instruction_context, - &log_collector, - )?; + common_close_account(&authority_address, &instruction_context, &log_collector)?; ic_logger_msg!(log_collector, "Closed Buffer {}", close_key); } @@ -1094,8 +987,7 @@ fn process_loader_upgradeable_instruction( } => { instruction_context.check_number_of_instruction_accounts(4)?; drop(close_account); - let program_account = instruction_context - .try_borrow_instruction_account(transaction_context, 3)?; + let program_account = instruction_context.try_borrow_instruction_account(3)?; let program_key = *program_account.get_key(); if !program_account.is_writable() { @@ -1127,8 +1019,7 @@ fn process_loader_upgradeable_instruction( drop(program_account); common_close_account( &authority_address, - transaction_context, - instruction_context, + &instruction_context, &log_collector, )?; let clock = invoke_context.get_sysvar_cache().get_clock()?; @@ -1185,23 +1076,17 @@ fn process_loader_upgradeable_instruction( } instruction_context.check_number_of_instruction_accounts(3)?; - let programdata_address = *transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(0)?, - )?; - let program_address = *transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(1)?, - )?; - let provided_authority_address = *transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(2)?, - )?; + let programdata_address = *instruction_context.get_key_of_instruction_account(0)?; + let program_address = *instruction_context.get_key_of_instruction_account(1)?; + let provided_authority_address = + *instruction_context.get_key_of_instruction_account(2)?; let clock_slot = invoke_context .get_sysvar_cache() .get_clock() .map(|clock| clock.slot)?; // Verify ProgramData account - let programdata = - instruction_context.try_borrow_instruction_account(transaction_context, 0)?; + let programdata = instruction_context.try_borrow_instruction_account(0)?; if !programdata.is_writable() { ic_logger_msg!(log_collector, "ProgramData account not writeable"); return Err(InstructionError::InvalidArgument); @@ -1243,8 +1128,7 @@ fn process_loader_upgradeable_instruction( } // Verify Program account - let mut program = - instruction_context.try_borrow_instruction_account(transaction_context, 1)?; + let mut program = instruction_context.try_borrow_instruction_account(1)?; if !program.is_writable() { ic_logger_msg!(log_collector, "Program account not writeable"); return Err(InstructionError::InvalidArgument); @@ -1270,8 +1154,7 @@ fn process_loader_upgradeable_instruction( program.set_owner(&loader_v4::id().to_bytes())?; drop(program); - let mut programdata = - instruction_context.try_borrow_instruction_account(transaction_context, 0)?; + let mut programdata = instruction_context.try_borrow_instruction_account(0)?; programdata.set_lamports(0)?; drop(programdata); @@ -1340,8 +1223,7 @@ fn process_loader_upgradeable_instruction( let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; - let mut programdata = - instruction_context.try_borrow_instruction_account(transaction_context, 0)?; + let mut programdata = instruction_context.try_borrow_instruction_account(0)?; programdata.set_data_from_slice(&[])?; drop(programdata); @@ -1360,7 +1242,7 @@ fn common_extend_program( let log_collector = invoke_context.get_log_collector(); let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; - let program_id = instruction_context.get_last_program_key(transaction_context)?; + let program_id = instruction_context.get_program_key()?; const PROGRAM_DATA_ACCOUNT_INDEX: IndexOfAccount = 0; const PROGRAM_ACCOUNT_INDEX: IndexOfAccount = 1; @@ -1373,8 +1255,8 @@ fn common_extend_program( return Err(InstructionError::InvalidInstructionData); } - let programdata_account = instruction_context - .try_borrow_instruction_account(transaction_context, PROGRAM_DATA_ACCOUNT_INDEX)?; + let programdata_account = + instruction_context.try_borrow_instruction_account(PROGRAM_DATA_ACCOUNT_INDEX)?; let programdata_key = *programdata_account.get_key(); if program_id != programdata_account.get_owner() { @@ -1386,8 +1268,8 @@ fn common_extend_program( return Err(InstructionError::InvalidArgument); } - let program_account = instruction_context - .try_borrow_instruction_account(transaction_context, PROGRAM_ACCOUNT_INDEX)?; + let program_account = + instruction_context.try_borrow_instruction_account(PROGRAM_ACCOUNT_INDEX)?; if !program_account.is_writable() { ic_logger_msg!(log_collector, "Program account is not writable"); return Err(InstructionError::InvalidArgument); @@ -1452,12 +1334,8 @@ fn common_extend_program( } if check_authority { - let authority_key = Some( - *transaction_context.get_key_of_account_at_index( - instruction_context - .get_index_of_instruction_account_in_transaction(AUTHORITY_ACCOUNT_INDEX)?, - )?, - ); + let authority_key = + Some(*instruction_context.get_key_of_instruction_account(AUTHORITY_ACCOUNT_INDEX)?); if upgrade_authority_address != authority_key { ic_logger_msg!(log_collector, "Incorrect upgrade authority provided"); return Err(InstructionError::IncorrectAuthority); @@ -1487,10 +1365,8 @@ fn common_extend_program( // Dereference the program ID to prevent overlapping mutable/immutable borrow of invoke context let program_id = *program_id; if required_payment > 0 { - let payer_key = *transaction_context.get_key_of_account_at_index( - instruction_context - .get_index_of_instruction_account_in_transaction(optional_payer_account_index)?, - )?; + let payer_key = + *instruction_context.get_key_of_instruction_account(optional_payer_account_index)?; invoke_context.native_invoke( system_instruction::transfer(&payer_key, &programdata_key, required_payment), @@ -1500,8 +1376,8 @@ fn common_extend_program( let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; - let mut programdata_account = instruction_context - .try_borrow_instruction_account(transaction_context, PROGRAM_DATA_ACCOUNT_INDEX)?; + let mut programdata_account = + instruction_context.try_borrow_instruction_account(PROGRAM_DATA_ACCOUNT_INDEX)?; programdata_account.set_data_length(new_len)?; let programdata_data_offset = UpgradeableLoaderState::size_of_programdata_metadata(); @@ -1519,8 +1395,8 @@ fn common_extend_program( ); drop(programdata_account); - let mut programdata_account = instruction_context - .try_borrow_instruction_account(transaction_context, PROGRAM_DATA_ACCOUNT_INDEX)?; + let mut programdata_account = + instruction_context.try_borrow_instruction_account(PROGRAM_DATA_ACCOUNT_INDEX)?; programdata_account.set_state(&UpgradeableLoaderState::ProgramData { slot: clock_slot, upgrade_authority_address, @@ -1537,7 +1413,6 @@ fn common_extend_program( fn common_close_account( authority_address: &Option, - transaction_context: &TransactionContext, instruction_context: &InstructionContext, log_collector: &Option>>, ) -> Result<(), InstructionError> { @@ -1545,11 +1420,7 @@ fn common_close_account( ic_logger_msg!(log_collector, "Account is immutable"); return Err(InstructionError::Immutable); } - if *authority_address - != Some(*transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(2)?, - )?) - { + if *authority_address != Some(*instruction_context.get_key_of_instruction_account(2)?) { ic_logger_msg!(log_collector, "Incorrect authority provided"); return Err(InstructionError::IncorrectAuthority); } @@ -1558,10 +1429,8 @@ fn common_close_account( return Err(InstructionError::MissingRequiredSignature); } - let mut close_account = - instruction_context.try_borrow_instruction_account(transaction_context, 0)?; - let mut recipient_account = - instruction_context.try_borrow_instruction_account(transaction_context, 1)?; + let mut close_account = instruction_context.try_borrow_instruction_account(0)?; + let mut recipient_account = instruction_context.try_borrow_instruction_account(1)?; recipient_account.checked_add_lamports(close_account.get_lamports())?; close_account.set_lamports(0)?; @@ -1584,14 +1453,9 @@ fn execute<'a, 'b: 'a>( let log_collector = invoke_context.get_log_collector(); let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; - let (program_id, is_loader_deprecated) = { - let program_account = - instruction_context.try_borrow_last_program_account(transaction_context)?; - ( - *program_account.get_key(), - *program_account.get_owner() == bpf_loader_deprecated::id(), - ) - }; + let program_id = *instruction_context.get_program_key()?; + let is_loader_deprecated = + instruction_context.get_program_owner()? == bpf_loader_deprecated::id(); #[cfg(any(target_os = "windows", not(target_arch = "x86_64")))] let use_jit = false; #[cfg(all(not(target_os = "windows"), target_arch = "x86_64"))] @@ -1599,18 +1463,22 @@ fn execute<'a, 'b: 'a>( let stricter_abi_and_runtime_constraints = invoke_context .get_feature_set() .stricter_abi_and_runtime_constraints; + let account_data_direct_mapping = invoke_context.get_feature_set().account_data_direct_mapping; let mask_out_rent_epoch_in_vm_serialization = invoke_context .get_feature_set() .mask_out_rent_epoch_in_vm_serialization; + let provide_instruction_data_offset_in_vm_r2 = invoke_context + .get_feature_set() + .provide_instruction_data_offset_in_vm_r2; let mut serialize_time = Measure::start("serialize"); - let (parameter_bytes, regions, accounts_metadata) = serialization::serialize_parameters( - invoke_context.transaction_context, - instruction_context, - stricter_abi_and_runtime_constraints, - invoke_context.account_data_direct_mapping, - mask_out_rent_epoch_in_vm_serialization, - )?; + let (parameter_bytes, regions, accounts_metadata, instruction_data_offset) = + serialization::serialize_parameters( + &instruction_context, + stricter_abi_and_runtime_constraints, + account_data_direct_mapping, + mask_out_rent_epoch_in_vm_serialization, + )?; serialize_time.stop(); // save the account addresses so in case we hit an AccessViolation error we @@ -1644,6 +1512,12 @@ fn execute<'a, 'b: 'a>( create_vm_time.stop(); vm.context_object_pointer.execute_time = Some(Measure::start("execute")); + vm.registers[1] = ebpf::MM_INPUT_START; + + // SIMD-0321: Provide offset to instruction data in VM register 2. + if provide_instruction_data_offset_in_vm_r2 { + vm.registers[2] = instruction_data_offset as u64; + } let (compute_units_consumed, result) = vm.execute_program(executable, !use_jit); MEMORY_POOL.with_borrow_mut(|memory_pool| { memory_pool.put_stack(stack); @@ -1712,7 +1586,6 @@ fn execute<'a, 'b: 'a>( let instruction_context = transaction_context.get_current_instruction_context()?; let account = instruction_context.try_borrow_instruction_account( - transaction_context, instruction_account_index as IndexOfAccount, )?; if vm_addr.saturating_add(len) <= vm_addr_range.end { @@ -1768,14 +1641,14 @@ fn execute<'a, 'b: 'a>( invoke_context: &mut InvokeContext, parameter_bytes: &[u8], stricter_abi_and_runtime_constraints: bool, + account_data_direct_mapping: bool, ) -> Result<(), InstructionError> { serialization::deserialize_parameters( - invoke_context.transaction_context, - invoke_context + &invoke_context .transaction_context .get_current_instruction_context()?, stricter_abi_and_runtime_constraints, - invoke_context.account_data_direct_mapping, + account_data_direct_mapping, parameter_bytes, &invoke_context.get_syscall_context()?.accounts_metadata, ) @@ -1787,6 +1660,7 @@ fn execute<'a, 'b: 'a>( invoke_context, parameter_bytes.as_slice(), stricter_abi_and_runtime_constraints, + account_data_direct_mapping, ) .map_err(|error| Box::new(error) as Box) }); @@ -1888,19 +1762,17 @@ mod tests { solana_epoch_schedule::EpochSchedule, solana_instruction::{error::InstructionError, AccountMeta}, solana_program_runtime::{ - invoke_context::{mock_process_instruction, mock_process_instruction_with_feature_set}, - with_mock_invoke_context, + invoke_context::mock_process_instruction, with_mock_invoke_context, }, solana_pubkey::Pubkey, solana_rent::Rent, solana_sdk_ids::{system_program, sysvar}, - solana_svm_feature_set::SVMFeatureSet, std::{fs::File, io::Read, ops::Range, sync::atomic::AtomicU64}, }; fn process_instruction( loader_id: &Pubkey, - program_indices: &[IndexOfAccount], + program_index: Option, instruction_data: &[u8], transaction_accounts: Vec<(Pubkey, AccountSharedData)>, instruction_accounts: Vec, @@ -1908,7 +1780,7 @@ mod tests { ) -> Vec { mock_process_instruction( loader_id, - program_indices.to_vec(), + program_index, instruction_data, transaction_accounts, instruction_accounts, @@ -1950,7 +1822,7 @@ mod tests { // Case: No program account process_instruction( &loader_id, - &[], + None, &[], Vec::new(), Vec::new(), @@ -1960,7 +1832,7 @@ mod tests { // Case: Only a program account process_instruction( &loader_id, - &[0], + Some(0), &[], vec![(program_id, program_account.clone())], Vec::new(), @@ -1970,7 +1842,7 @@ mod tests { // Case: With program and parameter account process_instruction( &loader_id, - &[0], + Some(0), &[], vec![ (program_id, program_account.clone()), @@ -1983,7 +1855,7 @@ mod tests { // Case: With duplicate accounts process_instruction( &loader_id, - &[0], + Some(0), &[], vec![ (program_id, program_account.clone()), @@ -1996,7 +1868,7 @@ mod tests { // Case: limited budget mock_process_instruction( &loader_id, - vec![0], + Some(0), &[], vec![(program_id, program_account)], Vec::new(), @@ -2009,27 +1881,23 @@ mod tests { |_invoke_context| {}, ); - let mut feature_set = SVMFeatureSet::all_enabled(); - feature_set.remove_accounts_executable_flag_checks = false; - // Case: Account not a program - mock_process_instruction_with_feature_set( + mock_process_instruction( &loader_id, - vec![0], + Some(0), &[], vec![(program_id, parameter_account.clone())], Vec::new(), - Err(InstructionError::IncorrectProgramId), + Err(InstructionError::UnsupportedProgramId), Entrypoint::vm, |invoke_context| { test_utils::load_all_invoked_programs(invoke_context); }, |_invoke_context| {}, - &feature_set, ); process_instruction( &loader_id, - &[0], + Some(0), &[], vec![(program_id, parameter_account)], Vec::new(), @@ -2054,7 +1922,7 @@ mod tests { // Case: With program and parameter account process_instruction( &loader_id, - &[0], + Some(0), &[], vec![ (program_id, program_account.clone()), @@ -2067,7 +1935,7 @@ mod tests { // Case: With duplicate accounts process_instruction( &loader_id, - &[0], + Some(0), &[], vec![ (program_id, program_account), @@ -2095,7 +1963,7 @@ mod tests { // Case: With program and parameter account process_instruction( &loader_id, - &[0], + Some(0), &[], vec![ (program_id, program_account.clone()), @@ -2108,7 +1976,7 @@ mod tests { // Case: With duplicate accounts process_instruction( &loader_id, - &[0], + Some(0), &[], vec![ (program_id, program_account), @@ -2146,7 +2014,7 @@ mod tests { // Case: Success let accounts = process_instruction( &loader_id, - &[], + None, &instruction_data, vec![ (buffer_address, buffer_account), @@ -2166,7 +2034,7 @@ mod tests { // Case: Already initialized let accounts = process_instruction( &loader_id, - &[], + None, &instruction_data, vec![ (buffer_address, accounts.first().unwrap().clone()), @@ -2211,7 +2079,7 @@ mod tests { .unwrap(); process_instruction( &loader_id, - &[], + None, &instruction, vec![(buffer_address, buffer_account.clone())], instruction_accounts.clone(), @@ -2231,7 +2099,7 @@ mod tests { .unwrap(); let accounts = process_instruction( &loader_id, - &[], + None, &instruction, vec![(buffer_address, buffer_account.clone())], instruction_accounts.clone(), @@ -2269,7 +2137,7 @@ mod tests { .unwrap(); let accounts = process_instruction( &loader_id, - &[], + None, &instruction, vec![(buffer_address, buffer_account.clone())], instruction_accounts.clone(), @@ -2305,7 +2173,7 @@ mod tests { .unwrap(); process_instruction( &loader_id, - &[], + None, &instruction, vec![(buffer_address, buffer_account.clone())], instruction_accounts.clone(), @@ -2325,7 +2193,7 @@ mod tests { .unwrap(); process_instruction( &loader_id, - &[], + None, &instruction, vec![(buffer_address, buffer_account.clone())], instruction_accounts.clone(), @@ -2345,7 +2213,7 @@ mod tests { .unwrap(); process_instruction( &loader_id, - &[], + None, &instruction, vec![(buffer_address, buffer_account.clone())], vec![ @@ -2377,7 +2245,7 @@ mod tests { .unwrap(); process_instruction( &loader_id, - &[], + None, &instruction, vec![ (buffer_address, buffer_account.clone()), @@ -2411,7 +2279,7 @@ mod tests { .unwrap(); process_instruction( &loader_id, - &[], + None, &instruction, vec![(buffer_address, buffer_account.clone())], instruction_accounts, @@ -2558,7 +2426,7 @@ mod tests { bincode::serialize(&UpgradeableLoaderInstruction::Upgrade).unwrap(); mock_process_instruction( &bpf_loader_upgradeable::id(), - Vec::new(), + None, &instruction_data, transaction_accounts, instruction_accounts, @@ -2695,7 +2563,7 @@ mod tests { Err(InstructionError::AccountBorrowFailed), ); - // Case: Program account not executable + // Case: Program account not a program let (transaction_accounts, mut instruction_accounts) = get_accounts( &buffer_address, &upgrade_authority_address, @@ -2706,22 +2574,18 @@ mod tests { *instruction_accounts.get_mut(1).unwrap() = instruction_accounts.get(2).unwrap().clone(); let instruction_data = bincode::serialize(&UpgradeableLoaderInstruction::Upgrade).unwrap(); - let mut feature_set = SVMFeatureSet::all_enabled(); - feature_set.remove_accounts_executable_flag_checks = false; - - mock_process_instruction_with_feature_set( + mock_process_instruction( &bpf_loader_upgradeable::id(), - Vec::new(), + None, &instruction_data, transaction_accounts.clone(), instruction_accounts.clone(), - Err(InstructionError::AccountNotExecutable), + Err(InstructionError::InvalidAccountData), Entrypoint::vm, |invoke_context| { test_utils::load_all_invoked_programs(invoke_context); }, |_invoke_context| {}, - &feature_set, ); process_instruction( transaction_accounts.clone(), @@ -2984,7 +2848,7 @@ mod tests { // Case: Set to new authority let accounts = process_instruction( &loader_id, - &[], + None, &instruction, vec![ (programdata_address, programdata_account.clone()), @@ -3013,7 +2877,7 @@ mod tests { // Case: Not upgradeable let accounts = process_instruction( &loader_id, - &[], + None, &instruction, vec![ (programdata_address, programdata_account.clone()), @@ -3034,7 +2898,7 @@ mod tests { // Case: Authority did not sign process_instruction( &loader_id, - &[], + None, &instruction, vec![ (programdata_address, programdata_account.clone()), @@ -3055,7 +2919,7 @@ mod tests { let invalid_upgrade_authority_address = Pubkey::new_unique(); process_instruction( &loader_id, - &[], + None, &instruction, vec![ (programdata_address, programdata_account.clone()), @@ -3086,7 +2950,7 @@ mod tests { .unwrap(); process_instruction( &loader_id, - &[], + None, &instruction, vec![ (programdata_address, programdata_account.clone()), @@ -3104,7 +2968,7 @@ mod tests { .unwrap(); process_instruction( &loader_id, - &[], + None, &instruction, vec![ (programdata_address, programdata_account.clone()), @@ -3160,7 +3024,7 @@ mod tests { // Case: Set to new authority let accounts = process_instruction( &loader_id, - &[], + None, &instruction, vec![ (programdata_address, programdata_account.clone()), @@ -3190,7 +3054,7 @@ mod tests { // Case: set to same authority process_instruction( &loader_id, - &[], + None, &instruction, vec![ (programdata_address, programdata_account.clone()), @@ -3207,7 +3071,7 @@ mod tests { // Case: present authority not in instruction process_instruction( &loader_id, - &[], + None, &instruction, vec![ (programdata_address, programdata_account.clone()), @@ -3218,13 +3082,13 @@ mod tests { ), ], vec![programdata_meta.clone(), new_upgrade_authority_meta.clone()], - Err(InstructionError::NotEnoughAccountKeys), + Err(InstructionError::MissingAccount), ); // Case: new authority not in instruction process_instruction( &loader_id, - &[], + None, &instruction, vec![ (programdata_address, programdata_account.clone()), @@ -3235,13 +3099,13 @@ mod tests { ), ], vec![programdata_meta.clone(), upgrade_authority_meta.clone()], - Err(InstructionError::NotEnoughAccountKeys), + Err(InstructionError::MissingAccount), ); // Case: present authority did not sign process_instruction( &loader_id, - &[], + None, &instruction, vec![ (programdata_address, programdata_account.clone()), @@ -3266,7 +3130,7 @@ mod tests { // Case: New authority did not sign process_instruction( &loader_id, - &[], + None, &instruction, vec![ (programdata_address, programdata_account.clone()), @@ -3292,7 +3156,7 @@ mod tests { let invalid_upgrade_authority_address = Pubkey::new_unique(); process_instruction( &loader_id, - &[], + None, &instruction, vec![ (programdata_address, programdata_account.clone()), @@ -3323,7 +3187,7 @@ mod tests { .unwrap(); process_instruction( &loader_id, - &[], + None, &instruction, vec![ (programdata_address, programdata_account.clone()), @@ -3345,7 +3209,7 @@ mod tests { .unwrap(); process_instruction( &loader_id, - &[], + None, &instruction, vec![ (programdata_address, programdata_account.clone()), @@ -3401,7 +3265,7 @@ mod tests { // Case: New authority required let accounts = process_instruction( &loader_id, - &[], + None, &instruction, transaction_accounts.clone(), vec![buffer_meta.clone(), authority_meta.clone()], @@ -3423,7 +3287,7 @@ mod tests { .unwrap(); let accounts = process_instruction( &loader_id, - &[], + None, &instruction, transaction_accounts.clone(), vec![ @@ -3444,7 +3308,7 @@ mod tests { // Case: Authority did not sign process_instruction( &loader_id, - &[], + None, &instruction, transaction_accounts.clone(), vec![ @@ -3462,7 +3326,7 @@ mod tests { // Case: wrong authority process_instruction( &loader_id, - &[], + None, &instruction, vec![ (buffer_address, buffer_account.clone()), @@ -3484,7 +3348,7 @@ mod tests { // Case: No authority process_instruction( &loader_id, - &[], + None, &instruction, transaction_accounts.clone(), vec![buffer_meta.clone(), authority_meta.clone()], @@ -3502,7 +3366,7 @@ mod tests { .unwrap(); process_instruction( &loader_id, - &[], + None, &instruction, transaction_accounts.clone(), vec![ @@ -3524,7 +3388,7 @@ mod tests { .unwrap(); process_instruction( &loader_id, - &[], + None, &instruction, transaction_accounts.clone(), vec![buffer_meta, authority_meta, new_authority_meta], @@ -3579,7 +3443,7 @@ mod tests { .unwrap(); let accounts = process_instruction( &loader_id, - &[], + None, &instruction, transaction_accounts.clone(), vec![ @@ -3600,7 +3464,7 @@ mod tests { // Case: set to same authority process_instruction( &loader_id, - &[], + None, &instruction, transaction_accounts.clone(), vec![ @@ -3614,27 +3478,27 @@ mod tests { // Case: Missing current authority process_instruction( &loader_id, - &[], + None, &instruction, transaction_accounts.clone(), vec![buffer_meta.clone(), new_authority_meta.clone()], - Err(InstructionError::NotEnoughAccountKeys), + Err(InstructionError::MissingAccount), ); // Case: Missing new authority process_instruction( &loader_id, - &[], + None, &instruction, transaction_accounts.clone(), vec![buffer_meta.clone(), authority_meta.clone()], - Err(InstructionError::NotEnoughAccountKeys), + Err(InstructionError::MissingAccount), ); // Case: wrong present authority process_instruction( &loader_id, - &[], + None, &instruction, vec![ (buffer_address, buffer_account.clone()), @@ -3656,7 +3520,7 @@ mod tests { // Case: present authority did not sign process_instruction( &loader_id, - &[], + None, &instruction, transaction_accounts.clone(), vec![ @@ -3674,7 +3538,7 @@ mod tests { // Case: new authority did not sign process_instruction( &loader_id, - &[], + None, &instruction, transaction_accounts.clone(), vec![ @@ -3700,7 +3564,7 @@ mod tests { .unwrap(); process_instruction( &loader_id, - &[], + None, &instruction, transaction_accounts.clone(), vec![ @@ -3722,7 +3586,7 @@ mod tests { .unwrap(); process_instruction( &loader_id, - &[], + None, &instruction, transaction_accounts.clone(), vec![buffer_meta, authority_meta, new_authority_meta], @@ -3805,7 +3669,7 @@ mod tests { // Case: close a buffer account let accounts = process_instruction( &loader_id, - &[], + None, &instruction, transaction_accounts, vec![ @@ -3827,7 +3691,7 @@ mod tests { // Case: close with wrong authority process_instruction( &loader_id, - &[], + None, &instruction, vec![ (buffer_address, buffer_account.clone()), @@ -3849,7 +3713,7 @@ mod tests { // Case: close an uninitialized account let accounts = process_instruction( &loader_id, - &[], + None, &instruction, vec![ (uninitialized_address, uninitialized_account.clone()), @@ -3879,7 +3743,7 @@ mod tests { // Case: close a program account let accounts = process_instruction( &loader_id, - &[], + None, &instruction, vec![ (programdata_address, programdata_account.clone()), @@ -3918,7 +3782,7 @@ mod tests { program_account = accounts.get(3).unwrap().clone(); process_instruction( &loader_id, - &[1], + Some(1), &[], vec![ (programdata_address, programdata_account.clone()), @@ -3931,7 +3795,7 @@ mod tests { // Case: Reopen should fail process_instruction( &loader_id, - &[], + None, &bincode::serialize(&UpgradeableLoaderInstruction::DeployWithMaxDataLen { max_data_len: 0, }) @@ -4045,7 +3909,7 @@ mod tests { program_account.set_executable(true); process_instruction( &loader_id, - &[], + None, &[], vec![(program_id, program_account)], Vec::new(), @@ -4107,8 +3971,7 @@ mod tests { account_size: 0, deployment_slot: 0, effective_slot: 0, - tx_usage_counter: AtomicU64::new(100), - ix_usage_counter: AtomicU64::new(100), + tx_usage_counter: Arc::new(AtomicU64::new(100)), latest_access_slot: AtomicU64::new(0), }; invoke_context @@ -4130,10 +3993,6 @@ mod tests { updated_program.tx_usage_counter.load(Ordering::Relaxed), 100 ); - assert_eq!( - updated_program.ix_usage_counter.load(Ordering::Relaxed), - 100 - ); } #[test] @@ -4151,8 +4010,7 @@ mod tests { account_size: 0, deployment_slot: 0, effective_slot: 0, - tx_usage_counter: AtomicU64::new(100), - ix_usage_counter: AtomicU64::new(100), + tx_usage_counter: Arc::new(AtomicU64::new(100)), latest_access_slot: AtomicU64::new(0), }; invoke_context @@ -4172,6 +4030,5 @@ mod tests { assert_eq!(program2.deployment_slot, 2); assert_eq!(program2.tx_usage_counter.load(Ordering::Relaxed), 0); - assert_eq!(program2.ix_usage_counter.load(Ordering::Relaxed), 0); } } diff --git a/programs/loader-v4/Cargo.toml b/programs/loader-v4/Cargo.toml index 5aaaa6b012d567..e525742d626cdc 100644 --- a/programs/loader-v4/Cargo.toml +++ b/programs/loader-v4/Cargo.toml @@ -18,9 +18,9 @@ name = "solana_loader_v4_program" [features] agave-unstable-api = [] shuttle-test = [ - "solana-type-overrides/shuttle-test", "solana-program-runtime/shuttle-test", "solana-sbpf/shuttle-test", + "solana-svm-type-overrides/shuttle-test", ] svm-internal = [] @@ -33,15 +33,15 @@ solana-bpf-loader-program = { workspace = true, features = ["svm-internal"] } solana-instruction = { workspace = true } solana-loader-v3-interface = { workspace = true } solana-loader-v4-interface = { workspace = true, features = ["serde"] } -solana-log-collector = { workspace = true } -solana-measure = { workspace = true } solana-packet = { workspace = true } solana-program-runtime = { workspace = true } solana-pubkey = { workspace = true } -solana-sbpf = { workspace = true } +solana-sbpf = { workspace = true, features = ["jit"] } solana-sdk-ids = { workspace = true } +solana-svm-log-collector = { workspace = true } +solana-svm-measure = { workspace = true } +solana-svm-type-overrides = { workspace = true } solana-transaction-context = { workspace = true } -solana-type-overrides = { workspace = true } [dev-dependencies] bincode = { workspace = true } diff --git a/programs/loader-v4/src/lib.rs b/programs/loader-v4/src/lib.rs index 443513c868054a..c2591e8b1cf6b7 100644 --- a/programs/loader-v4/src/lib.rs +++ b/programs/loader-v4/src/lib.rs @@ -10,8 +10,6 @@ use { state::{LoaderV4State, LoaderV4Status}, DEPLOYMENT_COOLDOWN_IN_SLOTS, }, - solana_log_collector::{ic_logger_msg, LogCollector}, - solana_measure::measure::Measure, solana_program_runtime::{ invoke_context::InvokeContext, loaded_programs::{ProgramCacheEntry, ProgramCacheEntryOwner, ProgramCacheEntryType}, @@ -19,8 +17,10 @@ use { solana_pubkey::Pubkey, solana_sbpf::{declare_builtin_function, memory_region::MemoryMapping}, solana_sdk_ids::{bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, loader_v4}, - solana_transaction_context::{BorrowedAccount, InstructionContext}, - solana_type_overrides::sync::{atomic::Ordering, Arc}, + solana_svm_log_collector::{ic_logger_msg, LogCollector}, + solana_svm_measure::measure::Measure, + solana_svm_type_overrides::sync::Arc, + solana_transaction_context::{BorrowedInstructionAccount, InstructionContext}, std::{cell::RefCell, rc::Rc}, }; @@ -58,7 +58,7 @@ fn get_state_mut(data: &mut [u8]) -> Result<&mut LoaderV4State, InstructionError fn check_program_account( log_collector: &Option>>, instruction_context: &InstructionContext, - program: &BorrowedAccount, + program: &BorrowedInstructionAccount, authority_address: &Pubkey, ) -> Result { if !loader_v4::check_id(program.get_owner()) { @@ -93,13 +93,11 @@ fn process_instruction_write( let log_collector = invoke_context.get_log_collector(); let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; - let mut program = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; - let authority_address = instruction_context - .get_index_of_instruction_account_in_transaction(1) - .and_then(|index| transaction_context.get_key_of_account_at_index(index))?; + let mut program = instruction_context.try_borrow_instruction_account(0)?; + let authority_address = instruction_context.get_key_of_instruction_account(1)?; let state = check_program_account( &log_collector, - instruction_context, + &instruction_context, &program, authority_address, )?; @@ -128,15 +126,12 @@ fn process_instruction_copy( let log_collector = invoke_context.get_log_collector(); let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; - let mut program = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; - let authority_address = instruction_context - .get_index_of_instruction_account_in_transaction(1) - .and_then(|index| transaction_context.get_key_of_account_at_index(index))?; - let source_program = - instruction_context.try_borrow_instruction_account(transaction_context, 2)?; + let mut program = instruction_context.try_borrow_instruction_account(0)?; + let authority_address = instruction_context.get_key_of_instruction_account(1)?; + let source_program = instruction_context.try_borrow_instruction_account(2)?; let state = check_program_account( &log_collector, - instruction_context, + &instruction_context, &program, authority_address, )?; @@ -185,10 +180,8 @@ fn process_instruction_set_program_length( let log_collector = invoke_context.get_log_collector(); let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; - let mut program = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; - let authority_address = instruction_context - .get_index_of_instruction_account_in_transaction(1) - .and_then(|index| transaction_context.get_key_of_account_at_index(index))?; + let mut program = instruction_context.try_borrow_instruction_account(0)?; + let authority_address = instruction_context.get_key_of_instruction_account(1)?; let is_initialization = program.get_data().len() < LoaderV4State::program_data_offset(); if is_initialization { if !loader_v4::check_id(program.get_owner()) { @@ -206,7 +199,7 @@ fn process_instruction_set_program_length( } else { let state = check_program_account( &log_collector, - instruction_context, + &instruction_context, &program, authority_address, )?; @@ -232,9 +225,7 @@ fn process_instruction_set_program_length( return Err(InstructionError::InsufficientFunds); } std::cmp::Ordering::Greater => { - let recipient = instruction_context - .try_borrow_instruction_account(transaction_context, 2) - .ok(); + let recipient = instruction_context.try_borrow_instruction_account(2).ok(); if let Some(mut recipient) = recipient { if !instruction_context.is_instruction_account_writable(2)? { ic_logger_msg!(log_collector, "Recipient is not writeable"); @@ -274,13 +265,11 @@ fn process_instruction_deploy(invoke_context: &mut InvokeContext) -> Result<(), let log_collector = invoke_context.get_log_collector(); let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; - let mut program = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; - let authority_address = instruction_context - .get_index_of_instruction_account_in_transaction(1) - .and_then(|index| transaction_context.get_key_of_account_at_index(index))?; + let mut program = instruction_context.try_borrow_instruction_account(0)?; + let authority_address = instruction_context.get_key_of_instruction_account(1)?; let state = check_program_account( &log_collector, - instruction_context, + &instruction_context, &program, authority_address, )?; @@ -324,14 +313,12 @@ fn process_instruction_retract(invoke_context: &mut InvokeContext) -> Result<(), let log_collector = invoke_context.get_log_collector(); let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; - let mut program = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; + let mut program = instruction_context.try_borrow_instruction_account(0)?; - let authority_address = instruction_context - .get_index_of_instruction_account_in_transaction(1) - .and_then(|index| transaction_context.get_key_of_account_at_index(index))?; + let authority_address = instruction_context.get_key_of_instruction_account(1)?; let state = check_program_account( &log_collector, - instruction_context, + &instruction_context, &program, authority_address, )?; @@ -368,16 +355,12 @@ fn process_instruction_transfer_authority( let log_collector = invoke_context.get_log_collector(); let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; - let mut program = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; - let authority_address = instruction_context - .get_index_of_instruction_account_in_transaction(1) - .and_then(|index| transaction_context.get_key_of_account_at_index(index))?; - let new_authority_address = instruction_context - .get_index_of_instruction_account_in_transaction(2) - .and_then(|index| transaction_context.get_key_of_account_at_index(index))?; + let mut program = instruction_context.try_borrow_instruction_account(0)?; + let authority_address = instruction_context.get_key_of_instruction_account(1)?; + let new_authority_address = instruction_context.get_key_of_instruction_account(2)?; let state = check_program_account( &log_collector, - instruction_context, + &instruction_context, &program, authority_address, )?; @@ -400,13 +383,11 @@ fn process_instruction_finalize( let log_collector = invoke_context.get_log_collector(); let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; - let program = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; - let authority_address = instruction_context - .get_index_of_instruction_account_in_transaction(1) - .and_then(|index| transaction_context.get_key_of_account_at_index(index))?; + let program = instruction_context.try_borrow_instruction_account(0)?; + let authority_address = instruction_context.get_key_of_instruction_account(1)?; let state = check_program_account( &log_collector, - instruction_context, + &instruction_context, &program, authority_address, )?; @@ -415,8 +396,7 @@ fn process_instruction_finalize( return Err(InstructionError::InvalidArgument); } drop(program); - let next_version = - instruction_context.try_borrow_instruction_account(transaction_context, 2)?; + let next_version = instruction_context.try_borrow_instruction_account(2)?; if !loader_v4::check_id(next_version.get_owner()) { ic_logger_msg!(log_collector, "Next version is not owned by loader"); return Err(InstructionError::InvalidAccountOwner); @@ -432,7 +412,7 @@ fn process_instruction_finalize( } let address_of_next_version = *next_version.get_key(); drop(next_version); - let mut program = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; + let mut program = instruction_context.try_borrow_instruction_account(0)?; let state = get_state_mut(program.get_data_mut()?)?; state.authority_address_or_next_version = address_of_next_version; state.status = LoaderV4Status::Finalized; @@ -461,7 +441,7 @@ fn process_instruction_inner( let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; let instruction_data = instruction_context.get_instruction_data(); - let program_id = instruction_context.get_last_program_key(transaction_context)?; + let program_id = instruction_context.get_program_key()?; if loader_v4::check_id(program_id) { invoke_context.consume_checked(DEFAULT_COMPUTE_UNITS)?; match limited_deserialize(instruction_data, solana_packet::PACKET_DATA_SIZE as u64)? { @@ -487,21 +467,16 @@ fn process_instruction_inner( } .map_err(|err| Box::new(err) as Box) } else { - let program = instruction_context.try_borrow_last_program_account(transaction_context)?; let mut get_or_create_executor_time = Measure::start("get_or_create_executor_time"); let loaded_program = invoke_context .program_cache_for_tx_batch - .find(program.get_key()) + .find(program_id) .ok_or_else(|| { ic_logger_msg!(log_collector, "Program is not cached"); InstructionError::UnsupportedProgramId })?; get_or_create_executor_time.stop(); invoke_context.timings.get_or_create_executor_us += get_or_create_executor_time.as_us(); - drop(program); - loaded_program - .ix_usage_counter - .fetch_add(1, Ordering::Relaxed); match &loaded_program.program { ProgramCacheEntryType::FailedVerification(_) | ProgramCacheEntryType::Closed @@ -536,7 +511,7 @@ mod tests { }; fn process_instruction( - program_indices: Vec, + program_index: Option, instruction_data: &[u8], transaction_accounts: Vec<(Pubkey, AccountSharedData)>, instruction_accounts: &[(IndexOfAccount, bool, bool)], @@ -552,9 +527,10 @@ mod tests { }, ) .collect::>(); + mock_process_instruction( &loader_v4::id(), - program_indices, + program_index, instruction_data, transaction_accounts, instruction_accounts, @@ -638,25 +614,25 @@ mod tests { // Error: Missing program account process_instruction( - vec![], + None, &instruction, transaction_accounts.clone(), &[], - Err(InstructionError::NotEnoughAccountKeys), + Err(InstructionError::MissingAccount), ); // Error: Missing authority account process_instruction( - vec![], + None, &instruction, transaction_accounts.clone(), &[(0, false, true)], - Err(InstructionError::NotEnoughAccountKeys), + Err(InstructionError::MissingAccount), ); // Error: Program not owned by loader process_instruction( - vec![], + None, &instruction, transaction_accounts.clone(), &[(1, false, true), (1, true, false), (2, true, true)], @@ -665,7 +641,7 @@ mod tests { // Error: Program is not writeable process_instruction( - vec![], + None, &instruction, transaction_accounts.clone(), &[(0, false, false), (1, true, false), (2, true, true)], @@ -674,7 +650,7 @@ mod tests { // Error: Authority did not sign process_instruction( - vec![], + None, &instruction, transaction_accounts.clone(), &[(0, false, true), (1, false, false), (2, true, true)], @@ -683,7 +659,7 @@ mod tests { // Error: Program is finalized process_instruction( - vec![], + None, &instruction, transaction_accounts.clone(), &[(2, false, true), (1, true, false), (0, true, true)], @@ -692,7 +668,7 @@ mod tests { // Error: Incorrect authority provided process_instruction( - vec![], + None, &instruction, transaction_accounts, &[(0, false, true), (2, true, false), (2, true, true)], @@ -736,7 +712,7 @@ mod tests { // Overwrite existing data process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::Write { offset: 2, bytes: vec![8, 8, 8, 8], @@ -749,7 +725,7 @@ mod tests { // Empty write process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::Write { offset: 2, bytes: Vec::new(), @@ -762,7 +738,7 @@ mod tests { // Error: Program is not retracted process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::Write { offset: 8, bytes: vec![8, 8, 8, 8], @@ -775,7 +751,7 @@ mod tests { // Error: Write out of bounds process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::Write { offset: transaction_accounts[0] .1 @@ -833,7 +809,7 @@ mod tests { // Overwrite existing data process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::Copy { destination_offset: 1, source_offset: 2, @@ -847,7 +823,7 @@ mod tests { // Empty copy process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::Copy { destination_offset: 1, source_offset: 2, @@ -861,7 +837,7 @@ mod tests { // Error: Program is not retracted process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::Copy { destination_offset: 1, source_offset: 2, @@ -875,7 +851,7 @@ mod tests { // Error: Destination and source collide process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::Copy { destination_offset: 1, source_offset: 2, @@ -889,7 +865,7 @@ mod tests { // Error: Read out of bounds process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::Copy { destination_offset: 1, source_offset: transaction_accounts[2] @@ -908,7 +884,7 @@ mod tests { // Error: Write out of bounds process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::Copy { destination_offset: transaction_accounts[0] .1 @@ -987,7 +963,7 @@ mod tests { // No change let accounts = process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::SetProgramLength { new_size: transaction_accounts[0] .1 @@ -1012,7 +988,7 @@ mod tests { .1 .set_lamports(smaller_program_lamports); let accounts = process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::SetProgramLength { new_size: transaction_accounts[0] .1 @@ -1036,7 +1012,7 @@ mod tests { .1 .set_lamports(larger_program_lamports); let accounts = process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::SetProgramLength { new_size: transaction_accounts[4] .1 @@ -1057,7 +1033,7 @@ mod tests { // Decrease program account size, with a recipient let accounts = process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::SetProgramLength { new_size: transaction_accounts[0] .1 @@ -1087,7 +1063,7 @@ mod tests { // Decrease program account size, without a recipient let accounts = process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::SetProgramLength { new_size: transaction_accounts[0] .1 @@ -1109,7 +1085,7 @@ mod tests { // Close program account let accounts = process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::SetProgramLength { new_size: 0 }).unwrap(), transaction_accounts.clone(), &[(0, false, true), (1, true, false), (2, false, true)], @@ -1128,7 +1104,7 @@ mod tests { // Close uninitialized program account process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::SetProgramLength { new_size: 0 }).unwrap(), transaction_accounts.clone(), &[(3, false, true), (1, true, false), (2, true, true)], @@ -1137,7 +1113,7 @@ mod tests { // Error: Program not owned by loader process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::SetProgramLength { new_size: 8 }).unwrap(), transaction_accounts.clone(), &[(1, false, true), (1, true, false), (2, true, true)], @@ -1146,7 +1122,7 @@ mod tests { // Error: Program is not writeable process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::SetProgramLength { new_size: 8 }).unwrap(), transaction_accounts.clone(), &[(3, false, false), (1, true, false), (2, true, true)], @@ -1155,7 +1131,7 @@ mod tests { // Error: Close program account without a recipient process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::SetProgramLength { new_size: 0 }).unwrap(), transaction_accounts.clone(), &[(0, false, true), (1, true, false)], @@ -1164,7 +1140,7 @@ mod tests { // Error: Authority did not sign process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::SetProgramLength { new_size: 8 }).unwrap(), transaction_accounts.clone(), &[(3, true, true), (1, false, false), (2, true, true)], @@ -1173,7 +1149,7 @@ mod tests { // Error: Program is not retracted process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::SetProgramLength { new_size: 8 }).unwrap(), transaction_accounts.clone(), &[(5, false, true), (1, true, false), (2, false, true)], @@ -1182,7 +1158,7 @@ mod tests { // Error: Recipient is not writeable process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::SetProgramLength { new_size: 0 }).unwrap(), transaction_accounts.clone(), &[(0, false, true), (1, true, false), (2, false, false)], @@ -1191,7 +1167,7 @@ mod tests { // Error: Insufficient funds process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::SetProgramLength { new_size: transaction_accounts[4] .1 @@ -1256,7 +1232,7 @@ mod tests { // Deploy from its own data let accounts = process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::Deploy).unwrap(), transaction_accounts.clone(), &[(0, false, true), (1, true, false)], @@ -1272,7 +1248,7 @@ mod tests { // Error: Program was deployed recently, cooldown still in effect process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::Deploy).unwrap(), transaction_accounts.clone(), &[(0, false, true), (1, true, false)], @@ -1282,7 +1258,7 @@ mod tests { // Error: Program is uninitialized process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::Deploy).unwrap(), transaction_accounts.clone(), &[(3, false, true), (1, true, false)], @@ -1291,7 +1267,7 @@ mod tests { // Error: Program fails verification process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::Deploy).unwrap(), transaction_accounts.clone(), &[(4, false, true), (1, true, false)], @@ -1300,7 +1276,7 @@ mod tests { // Error: Program is deployed already process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::Deploy).unwrap(), transaction_accounts.clone(), &[(0, false, true), (1, true, false)], @@ -1347,7 +1323,7 @@ mod tests { // Retract program let accounts = process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::Retract).unwrap(), transaction_accounts.clone(), &[(0, false, true), (1, true, false)], @@ -1361,7 +1337,7 @@ mod tests { // Error: Program is uninitialized process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::Retract).unwrap(), transaction_accounts.clone(), &[(2, false, true), (1, true, false)], @@ -1370,7 +1346,7 @@ mod tests { // Error: Program is not deployed process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::Retract).unwrap(), transaction_accounts.clone(), &[(3, false, true), (1, true, false)], @@ -1380,7 +1356,7 @@ mod tests { // Error: Program was deployed recently, cooldown still in effect transaction_accounts[4].1 = clock(0); process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::Retract).unwrap(), transaction_accounts.clone(), &[(0, false, true), (1, true, false)], @@ -1434,7 +1410,7 @@ mod tests { // Transfer authority let accounts = process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::TransferAuthority).unwrap(), transaction_accounts.clone(), &[(0, false, true), (3, true, false), (4, true, false)], @@ -1448,16 +1424,16 @@ mod tests { // Error: No new authority provided process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::TransferAuthority).unwrap(), transaction_accounts.clone(), &[(0, false, true), (3, true, false)], - Err(InstructionError::NotEnoughAccountKeys), + Err(InstructionError::MissingAccount), ); // Error: Program is uninitialized process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::TransferAuthority).unwrap(), transaction_accounts.clone(), &[(2, false, true), (3, true, false), (4, true, false)], @@ -1466,7 +1442,7 @@ mod tests { // Error: New authority did not sign process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::TransferAuthority).unwrap(), transaction_accounts.clone(), &[(0, false, true), (3, true, false), (4, false, false)], @@ -1475,7 +1451,7 @@ mod tests { // Error: Authority did not change process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::TransferAuthority).unwrap(), transaction_accounts, &[(0, false, true), (3, true, false), (3, true, false)], @@ -1541,7 +1517,7 @@ mod tests { // Finalize program with a next version let accounts = process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::Finalize).unwrap(), transaction_accounts.clone(), &[(0, false, true), (5, true, false), (1, false, false)], @@ -1555,7 +1531,7 @@ mod tests { // Finalize program with itself as next version let accounts = process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::Finalize).unwrap(), transaction_accounts.clone(), &[(0, false, true), (5, true, false), (0, false, false)], @@ -1569,7 +1545,7 @@ mod tests { // Error: Program must be deployed to be finalized process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::Finalize).unwrap(), transaction_accounts.clone(), &[(1, false, true), (5, true, false)], @@ -1578,7 +1554,7 @@ mod tests { // Error: Program is uninitialized process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::Finalize).unwrap(), transaction_accounts.clone(), &[(4, false, true), (5, true, false)], @@ -1587,7 +1563,7 @@ mod tests { // Error: Next version not owned by loader process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::Finalize).unwrap(), transaction_accounts.clone(), &[(0, false, true), (5, true, false), (5, false, false)], @@ -1596,7 +1572,7 @@ mod tests { // Error: Program is uninitialized process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::Finalize).unwrap(), transaction_accounts.clone(), &[(0, false, true), (5, true, false), (4, false, false)], @@ -1605,7 +1581,7 @@ mod tests { // Error: Next version is finalized process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::Finalize).unwrap(), transaction_accounts.clone(), &[(0, false, true), (5, true, false), (2, false, false)], @@ -1614,7 +1590,7 @@ mod tests { // Error: Incorrect authority of next version process_instruction( - vec![], + None, &bincode::serialize(&LoaderV4Instruction::Finalize).unwrap(), transaction_accounts.clone(), &[(0, false, true), (5, true, false), (3, false, false)], @@ -1665,7 +1641,7 @@ mod tests { // Execute program process_instruction( - vec![0], + Some(0), &[0, 1, 2, 3], transaction_accounts.clone(), &[(1, false, true)], @@ -1674,7 +1650,7 @@ mod tests { // Error: Program not owned by loader process_instruction( - vec![1], + Some(1), &[0, 1, 2, 3], transaction_accounts.clone(), &[(1, false, true)], @@ -1683,7 +1659,7 @@ mod tests { // Error: Program is uninitialized process_instruction( - vec![2], + Some(2), &[0, 1, 2, 3], transaction_accounts.clone(), &[(1, false, true)], @@ -1693,7 +1669,7 @@ mod tests { // Error: Program is not deployed // This is only checked in integration with load_program_accounts() in the SVM process_instruction( - vec![3], + Some(3), &[0, 1, 2, 3], transaction_accounts.clone(), &[(1, false, true)], @@ -1702,7 +1678,7 @@ mod tests { // Error: Program fails verification process_instruction( - vec![4], + Some(4), &[0, 1, 2, 3], transaction_accounts, &[(1, false, true)], diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index a54847e58b2b18..11e454b9aa9b1d 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -43,7 +43,7 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" dependencies = [ - "cfg-if 1.0.0", + "cfg-if 1.0.3", "cipher", "cpufeatures", ] @@ -65,7 +65,7 @@ dependencies = [ [[package]] name = "agave-banking-stage-ingress-types" -version = "3.0.0" +version = "3.1.0" dependencies = [ "crossbeam-channel", "solana-perf", @@ -73,7 +73,7 @@ dependencies = [ [[package]] name = "agave-feature-set" -version = "3.0.0" +version = "3.1.0" dependencies = [ "ahash 0.8.11", "solana-epoch-schedule", @@ -85,7 +85,7 @@ dependencies = [ [[package]] name = "agave-geyser-plugin-interface" -version = "3.0.0" +version = "3.1.0" dependencies = [ "log", "solana-clock", @@ -93,12 +93,12 @@ dependencies = [ "solana-signature", "solana-transaction", "solana-transaction-status", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "agave-io-uring" -version = "3.0.0" +version = "3.1.0" dependencies = [ "io-uring", "libc", @@ -107,14 +107,18 @@ dependencies = [ "smallvec", ] +[[package]] +name = "agave-low-pass-filter" +version = "3.1.0" + [[package]] name = "agave-precompiles" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "bincode", "digest 0.10.7", - "ed25519-dalek", + "ed25519-dalek 1.0.1", "libsecp256k1 0.6.0", "openssl", "sha3", @@ -129,7 +133,7 @@ dependencies = [ [[package]] name = "agave-reserved-account-keys" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "solana-pubkey", @@ -138,7 +142,7 @@ dependencies = [ [[package]] name = "agave-syscalls" -version = "3.0.0" +version = "3.1.0" dependencies = [ "bincode", "libsecp256k1 0.6.0", @@ -150,13 +154,11 @@ dependencies = [ "solana-bn254", "solana-clock", "solana-cpi", - "solana-curve25519 3.0.0", + "solana-curve25519 3.1.0", "solana-hash", "solana-instruction", "solana-keccak-hasher", - "solana-loader-v3-interface 5.0.0", - "solana-log-collector", - "solana-measure", + "solana-loader-v3-interface", "solana-poseidon", "solana-program-entrypoint", "solana-program-runtime", @@ -166,19 +168,22 @@ dependencies = [ "solana-secp256k1-recover", "solana-sha256-hasher", "solana-stable-layout", + "solana-stake-interface", "solana-svm-callback", "solana-svm-feature-set", + "solana-svm-log-collector", + "solana-svm-measure", + "solana-svm-timings", + "solana-svm-type-overrides", "solana-sysvar", "solana-sysvar-id", - "solana-timings", "solana-transaction-context", - "solana-type-overrides", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "agave-transaction-view" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-hash", "solana-message", @@ -192,12 +197,12 @@ dependencies = [ [[package]] name = "agave-validator" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-geyser-plugin-interface", "chrono", "clap", - "console 0.16.0", + "console 0.16.1", "core_affinity", "crossbeam-channel", "fd-lock", @@ -211,6 +216,7 @@ dependencies = [ "libloading", "log", "num_cpus", + "qualifier_attr", "rand 0.8.5", "rayon", "serde", @@ -265,29 +271,72 @@ dependencies = [ "solana-version", "solana-vote-program", "symlink", - "thiserror 2.0.12", + "thiserror 2.0.16", "tikv-jemallocator", "tokio", ] [[package]] name = "agave-verified-packet-receiver" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-perf", "solana-streamer", ] +[[package]] +name = "agave-votor" +version = "3.1.0" +dependencies = [ + "anyhow", + "bincode", + "bitvec", + "bs58", + "crossbeam-channel", + "dashmap", + "itertools 0.12.1", + "log", + "parking_lot 0.12.2", + "qualifier_attr", + "rayon", + "serde", + "serde_bytes", + "serde_derive", + "solana-accounts-db", + "solana-bloom", + "solana-bls-signatures", + "solana-clock", + "solana-entry", + "solana-epoch-schedule", + "solana-gossip", + "solana-hash", + "solana-keypair", + "solana-ledger", + "solana-logger", + "solana-measure", + "solana-metrics", + "solana-pubkey", + "solana-rpc", + "solana-runtime", + "solana-signature", + "solana-signer", + "solana-signer-store", + "solana-time-utils", + "solana-transaction", + "solana-votor-messages", + "thiserror 2.0.16", +] + [[package]] name = "agave-xdp" -version = "3.0.0" +version = "3.1.0" dependencies = [ "aya", "caps", "crossbeam-channel", "libc", "log", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] @@ -307,7 +356,7 @@ version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ - "cfg-if 1.0.0", + "cfg-if 1.0.3", "getrandom 0.2.10", "once_cell", "version_check", @@ -353,12 +402,6 @@ version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" -[[package]] -name = "android-tzdata" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" - [[package]] name = "android_system_properties" version = "0.1.4" @@ -377,11 +420,61 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "anstream" +version = "0.6.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ae563653d1938f79b1ab1b5e668c87c76a9930414574a6583a7b7e11a8e6192" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is_terminal_polyfill", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd" + +[[package]] +name = "anstyle-parse" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e231f6134f61b71076a3eab506c379d4f36122f2af15a9ff04415ea4c3339e2" +dependencies = [ + "windows-sys 0.60.2", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e0633414522a32ffaac8ac6cc8f748e090c5717661fddeea04219e2344f5f2a" +dependencies = [ + "anstyle", + "once_cell_polyfill", + "windows-sys 0.60.2", +] + [[package]] name = "anyhow" -version = "1.0.98" +version = "1.0.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" +checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" [[package]] name = "aquamarine" @@ -589,17 +682,6 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" -[[package]] -name = "async-channel" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" -dependencies = [ - "concurrent-queue", - "event-listener 2.5.3", - "futures-core", -] - [[package]] name = "async-compression" version = "0.4.1" @@ -620,7 +702,7 @@ version = "3.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5fd03604047cee9b6ce9de9f70c6cd540a0520c813cbd49bae61f33ab80ed1dc" dependencies = [ - "event-listener 5.3.1", + "event-listener", "event-listener-strategy", "pin-project-lite", ] @@ -649,9 +731,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.88" +version = "0.1.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" +checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", @@ -702,7 +784,7 @@ dependencies = [ "matchit", "memchr", "mime", - "percent-encoding 2.3.1", + "percent-encoding 2.3.2", "pin-project-lite", "rustversion", "serde", @@ -737,7 +819,7 @@ checksum = "d18bc4e506fbb85ab7392ed993a7db4d1a452c71b75a246af4a80ab8c9d2dd50" dependencies = [ "assert_matches", "aya-obj", - "bitflags 2.9.1", + "bitflags 2.9.4", "bytes", "libc", "log", @@ -782,13 +864,19 @@ checksum = "4319208da049c43661739c5fade2ba182f09d1dc2299b32298d3a31692b17e12" dependencies = [ "addr2line", "cc", - "cfg-if 1.0.0", + "cfg-if 1.0.3", "libc", "miniz_oxide", "object 0.31.1", "rustc-demangle", ] +[[package]] +name = "base16ct" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" + [[package]] name = "base64" version = "0.12.3" @@ -813,6 +901,12 @@ version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" +[[package]] +name = "base64ct" +version = "1.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89e25b6adfb930f02d1981565a6e5d9c547ac15a96606256d3b59040e5cd4ca3" + [[package]] name = "bincode" version = "1.3.3" @@ -828,7 +922,7 @@ version = "0.69.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.9.4", "cexpr", "clang-sys", "itertools 0.12.1", @@ -850,9 +944,9 @@ checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" [[package]] name = "bitflags" -version = "2.9.1" +version = "2.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" +checksum = "2261d10cca569e4643e526d8dc2e62e433cc8aba21ab764233731f8d369bf394" dependencies = [ "serde", ] @@ -866,6 +960,19 @@ dependencies = [ "typenum", ] +[[package]] +name = "bitvec" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" +dependencies = [ + "funty", + "radium", + "serde", + "tap", + "wyz", +] + [[package]] name = "blake3" version = "1.8.2" @@ -875,7 +982,7 @@ dependencies = [ "arrayref", "arrayvec", "cc", - "cfg-if 1.0.0", + "cfg-if 1.0.3", "constant_time_eq", "digest 0.10.7", ] @@ -899,36 +1006,41 @@ dependencies = [ ] [[package]] -name = "borsh" -version = "0.10.3" +name = "blst" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4114279215a005bc675e386011e594e1d9b800918cea18fcadadcce864a2046b" +checksum = "4fd49896f12ac9b6dcd7a5998466b9b58263a695a3dd1ecc1aaca2e12a90b080" dependencies = [ - "borsh-derive 0.10.3", - "hashbrown 0.13.2", + "cc", + "glob", + "threadpool", + "zeroize", ] [[package]] -name = "borsh" -version = "1.5.7" +name = "blstrs" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad8646f98db542e39fc66e68a20b2144f6a732636df7c2354e74645faaa433ce" +checksum = "7a8a8ed6fefbeef4a8c7b460e4110e12c5e22a5b7cf32621aae6ad650c4dcf29" dependencies = [ - "borsh-derive 1.5.7", - "cfg_aliases", + "blst", + "byte-slice-cast", + "ff", + "group", + "pairing", + "rand_core 0.6.4", + "serde", + "subtle", ] [[package]] -name = "borsh-derive" -version = "0.10.3" +name = "borsh" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0754613691538d51f329cce9af41d7b7ca150bc973056f1156611489475f54f7" +checksum = "ad8646f98db542e39fc66e68a20b2144f6a732636df7c2354e74645faaa433ce" dependencies = [ - "borsh-derive-internal", - "borsh-schema-derive-internal", - "proc-macro-crate 0.1.5", - "proc-macro2", - "syn 1.0.109", + "borsh-derive", + "cfg_aliases", ] [[package]] @@ -944,28 +1056,6 @@ dependencies = [ "syn 2.0.87", ] -[[package]] -name = "borsh-derive-internal" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afb438156919598d2c7bad7e1c0adf3d26ed3840dbc010db1a882a65583ca2fb" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "borsh-schema-derive-internal" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634205cc43f74a1b9046ef87c4540ebda95696ec0f315024860cad7c5b0f5ccd" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "brotli" version = "3.3.4" @@ -1022,20 +1112,26 @@ dependencies = [ "serde", ] +[[package]] +name = "byte-slice-cast" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7575182f7272186991736b70173b0ea045398f984bf5ebbb3804736ce1330c9d" + [[package]] name = "bytemuck" -version = "1.23.1" +version = "1.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c76a5792e44e4abe34d3abf15636779261d45a7450612059293d1d2cfc63422" +checksum = "3995eaeebcdf32f91f980d360f78732ddc061097ab4e39991ae7a6ace9194677" dependencies = [ "bytemuck_derive", ] [[package]] name = "bytemuck_derive" -version = "1.10.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "441473f2b4b0459a68628c744bc61d23e730fb00128b841d30fa4bb3972257e4" +checksum = "4f154e572231cb6ba2bd1176980827e3d5dc04cc183a75dea38109fbdd672d29" dependencies = [ "proc-macro2", "quote", @@ -1128,9 +1224,9 @@ checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" [[package]] name = "cfg-if" -version = "1.0.0" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9" [[package]] name = "cfg_aliases" @@ -1151,11 +1247,10 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.41" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d" +checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" dependencies = [ - "android-tzdata", "iana-time-zone", "js-sys", "num-traits", @@ -1208,6 +1303,12 @@ dependencies = [ "vec_map", ] +[[package]] +name = "colorchoice" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" + [[package]] name = "combine" version = "3.8.1" @@ -1261,36 +1362,22 @@ dependencies = [ [[package]] name = "console" -version = "0.16.0" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e09ced7ebbccb63b4c65413d821f2e00ce54c5ca4514ddc6b3c892fdbcbc69d" +checksum = "b430743a6eb14e9764d4260d4c0d8123087d504eeb9c48f2b2a5e810dd369df4" dependencies = [ "encode_unicode", "libc", "once_cell", "unicode-width 0.2.0", - "windows-sys 0.60.2", -] - -[[package]] -name = "console_error_panic_hook" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a06aeb73f470f66dcdbf7223caeebb85984942f22f1adb2a088cf9668146bbbc" -dependencies = [ - "cfg-if 1.0.0", - "wasm-bindgen", + "windows-sys 0.61.0", ] [[package]] -name = "console_log" -version = "0.2.2" +name = "const-oid" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89f72f65e8501878b8a004d5a1afb780987e2ce2b4532c562e367a72c57499f" -dependencies = [ - "log", - "web-sys", -] +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "constant_time_eq" @@ -1393,7 +1480,7 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" dependencies = [ - "cfg-if 1.0.0", + "cfg-if 1.0.3", "crossbeam-epoch", "crossbeam-utils", ] @@ -1404,7 +1491,7 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ec02e091aa634e2c3ada4a392989e7c3116673ef0ac5b72232439094d73b7fd" dependencies = [ - "cfg-if 1.0.0", + "cfg-if 1.0.3", "crossbeam-utils", "lazy_static", "memoffset 0.6.4", @@ -1417,7 +1504,7 @@ version = "0.8.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3a430a770ebd84726f584a90ee7f020d28db52c6d02138900f22341f866d39c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if 1.0.3", ] [[package]] @@ -1426,6 +1513,18 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" +[[package]] +name = "crypto-bigint" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" +dependencies = [ + "generic-array", + "rand_core 0.6.4", + "subtle", + "zeroize", +] + [[package]] name = "crypto-common" version = "0.1.6" @@ -1475,7 +1574,7 @@ version = "4.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" dependencies = [ - "cfg-if 1.0.0", + "cfg-if 1.0.3", "cpufeatures", "curve25519-dalek-derive", "digest 0.10.7", @@ -1500,9 +1599,9 @@ dependencies = [ [[package]] name = "darling" -version = "0.20.1" +version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0558d22a7b463ed0241e993f76f09f30b126687447751a8638587b864e4b3944" +checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0" dependencies = [ "darling_core", "darling_macro", @@ -1510,23 +1609,23 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.1" +version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab8bfa2e259f8ee1ce5e97824a3c55ec4404a0d772ca7fa96bf19f0752a046eb" +checksum = "1247195ecd7e3c85f83c8d2a366e4210d588e802133e1e355180a9870b517ea4" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", - "strsim 0.10.0", + "strsim 0.11.1", "syn 2.0.87", ] [[package]] name = "darling_macro" -version = "0.20.1" +version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" +checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ "darling_core", "quote", @@ -1539,7 +1638,7 @@ version = "5.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" dependencies = [ - "cfg-if 1.0.0", + "cfg-if 1.0.3", "hashbrown 0.14.3", "lock_api", "once_cell", @@ -1553,6 +1652,16 @@ version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" +[[package]] +name = "der" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7c1832837b905bbfb5101e07cc24c8deddf52f93225eee6ead5f4d63d53ddcb" +dependencies = [ + "const-oid", + "zeroize", +] + [[package]] name = "der-parser" version = "8.2.0" @@ -1595,9 +1704,9 @@ dependencies = [ [[package]] name = "derive-where" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "510c292c8cf384b1a340b816a9a6cf2599eb8f566a44949024af88418000c50b" +checksum = "ef941ded77d15ca19b40374869ac6000af1c9f2a4c0f3d4c70926287e6364a8f" dependencies = [ "proc-macro2", "quote", @@ -1673,6 +1782,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer 0.10.4", + "const-oid", "crypto-common", "subtle", ] @@ -1692,7 +1802,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" dependencies = [ - "cfg-if 1.0.0", + "cfg-if 1.0.3", "dirs-sys-next", ] @@ -1759,13 +1869,37 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "abe71d579d1812060163dff96056261deb5bf6729b100fa2e36a68b9649ba3d3" +[[package]] +name = "ecdsa" +version = "0.16.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" +dependencies = [ + "der", + "digest 0.10.7", + "elliptic-curve", + "rfc6979", + "signature 2.2.0", + "spki", +] + [[package]] name = "ed25519" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf038a7b6fd7ef78ad3348b63f3a17550877b0e28f8d68bcc94894d1412158bc" dependencies = [ - "signature", + "signature 1.1.0", +] + +[[package]] +name = "ed25519" +version = "2.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" +dependencies = [ + "pkcs8", + "signature 2.2.0", ] [[package]] @@ -1775,21 +1909,36 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" dependencies = [ "curve25519-dalek 3.2.0", - "ed25519", + "ed25519 1.0.1", "rand 0.7.3", "serde", "sha2 0.9.9", "zeroize", ] +[[package]] +name = "ed25519-dalek" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70e796c081cee67dc755e1a36a0a172b897fab85fc3f6bc48307991f64e4eca9" +dependencies = [ + "curve25519-dalek 4.1.3", + "ed25519 2.2.3", + "rand_core 0.6.4", + "serde", + "sha2 0.10.9", + "subtle", + "zeroize", +] + [[package]] name = "ed25519-dalek-bip32" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d2be62a4061b872c8c0873ee4fc6f101ce7b889d039f019c5fa2af471a59908" +checksum = "6b49a684b133c4980d7ee783936af771516011c8cd15f429dbda77245e282f03" dependencies = [ "derivation-path", - "ed25519-dalek", + "ed25519-dalek 2.2.0", "hmac 0.12.1", "sha2 0.10.9", ] @@ -1821,6 +1970,25 @@ dependencies = [ "byteorder 0.5.3", ] +[[package]] +name = "elliptic-curve" +version = "0.13.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" +dependencies = [ + "base16ct", + "crypto-bigint", + "digest 0.10.7", + "ff", + "generic-array", + "group", + "pkcs8", + "rand_core 0.6.4", + "sec1", + "subtle", + "zeroize", +] + [[package]] name = "encode_unicode" version = "1.0.0" @@ -1869,17 +2037,27 @@ dependencies = [ "syn 2.0.87", ] +[[package]] +name = "env_filter" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "186e05a59d4c50738528153b83b0b0194d3a29507dfec16eccd4b342903397d0" +dependencies = [ + "log", + "regex", +] + [[package]] name = "env_logger" -version = "0.9.3" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" +checksum = "dcaee3d8e3cfc3fd92428d477bc97fc29ec8716d180c0d74c643bb26166660e0" dependencies = [ - "atty", + "anstream", + "anstyle", + "env_filter", "humantime", "log", - "regex", - "termcolor", ] [[package]] @@ -1899,32 +2077,10 @@ dependencies = [ ] [[package]] -name = "etcd-client" -version = "0.11.1" +name = "event-listener" +version = "5.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4b0ea5ef6dc2388a4b1669fa32097249bc03a15417b97cb75e38afb309e4a89" -dependencies = [ - "http 0.2.12", - "prost", - "tokio", - "tokio-stream", - "tonic", - "tonic-build", - "tower 0.4.13", - "tower-service", -] - -[[package]] -name = "event-listener" -version = "2.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" - -[[package]] -name = "event-listener" -version = "5.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" +checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" dependencies = [ "concurrent-queue", "parking", @@ -1937,7 +2093,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" dependencies = [ - "event-listener 5.3.1", + "event-listener", "pin-project-lite", ] @@ -1952,14 +2108,14 @@ dependencies = [ [[package]] name = "fastbloom" -version = "0.9.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27cea6e7f512d43b098939ff4d5a5d6fe3db07971e1d05176fe26c642d33f5b8" +checksum = "18c1ddb9231d8554c2d6bdf4cfaabf0c59251658c68b6c95cd52dd0c513a912a" dependencies = [ "getrandom 0.3.1", + "libm", "rand 0.9.0", "siphasher 1.0.1", - "wide", ] [[package]] @@ -1974,7 +2130,7 @@ version = "3.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef033ed5e9bad94e55838ca0ca906db0e043f517adda0c8b79c7a8c66c93c1b5" dependencies = [ - "cfg-if 1.0.0", + "cfg-if 1.0.3", "rustix 0.38.39", "windows-sys 0.48.0", ] @@ -1985,6 +2141,17 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "835a3dc7d1ec9e75e2b5fb4ba75396837112d2060b03f7d43bc1897c7f7211da" +[[package]] +name = "ff" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0b50bfb653653f9ca9095b427bed08ab8d75a137839d9ad64eb11810d5b6393" +dependencies = [ + "bitvec", + "rand_core 0.6.4", + "subtle", +] + [[package]] name = "fiat-crypto" version = "0.2.9" @@ -2081,11 +2248,11 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" dependencies = [ - "percent-encoding 2.3.1", + "percent-encoding 2.3.2", ] [[package]] @@ -2100,6 +2267,12 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" +[[package]] +name = "funty" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" + [[package]] name = "futures" version = "0.1.31" @@ -2211,6 +2384,7 @@ checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", + "zeroize", ] [[package]] @@ -2241,7 +2415,7 @@ version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ - "cfg-if 1.0.0", + "cfg-if 1.0.3", "js-sys", "libc", "wasi 0.11.0+wasi-snapshot-preview1", @@ -2254,7 +2428,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if 1.0.3", "js-sys", "libc", "wasi 0.13.3+wasi-0.2.2", @@ -2312,7 +2486,7 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68a7f542ee6b35af73b06abc0dad1c1bae89964e4e253bc4b587b91c9637867b" dependencies = [ - "cfg-if 1.0.0", + "cfg-if 1.0.3", "dashmap", "futures 0.3.31", "futures-timer", @@ -2326,6 +2500,19 @@ dependencies = [ "spinning_top", ] +[[package]] +name = "group" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" +dependencies = [ + "ff", + "rand 0.8.5", + "rand_core 0.6.4", + "rand_xorshift", + "subtle", +] + [[package]] name = "h2" version = "0.3.26" @@ -2338,7 +2525,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.10.0", + "indexmap 2.11.4", "slab", "tokio", "tokio-util 0.7.16", @@ -2445,7 +2632,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "03b876ecf37e86b359573c16c8366bc3eba52b689884a0fc42ba3f67203d2a8b" dependencies = [ "cc", - "cfg-if 1.0.0", + "cfg-if 1.0.3", "libc", "pkg-config", "windows-sys 0.48.0", @@ -2557,9 +2744,9 @@ checksum = "6456b8a6c8f33fee7d958fcd1b60d55b11940a79e63ae87013e6d22e26034440" [[package]] name = "humantime" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b112acc8b3adf4b107a8ec20977da0273a8c386765a3ec0229bd500a1443f9f" +checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424" [[package]] name = "hyper" @@ -2646,7 +2833,7 @@ dependencies = [ "http 1.2.0", "hyper 1.6.0", "hyper-util", - "rustls 0.23.31", + "rustls 0.23.32", "rustls-pki-types", "tokio", "tokio-rustls 0.26.2", @@ -2695,7 +2882,7 @@ dependencies = [ "hyper 1.6.0", "ipnet", "libc", - "percent-encoding 2.3.1", + "percent-encoding 2.3.2", "pin-project-lite", "socket2 0.5.10", "tokio", @@ -2853,9 +3040,9 @@ dependencies = [ [[package]] name = "idna" -version = "1.0.3" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" dependencies = [ "idna_adapter", "smallvec", @@ -2925,9 +3112,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.10.0" +version = "2.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661" +checksum = "4b0f83760fb341a774ed326568e19f5a863af4a952def8c39f9ab92fd95b88e5" dependencies = [ "equivalent", "hashbrown 0.15.1", @@ -2940,7 +3127,7 @@ version = "0.17.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4adb2ee6ad319a912210a36e56e3623555817bcc877a7e6e8802d1d69c4d8056" dependencies = [ - "console 0.16.0", + "console 0.16.1", "portable-atomic", "unicode-width 0.2.0", "unit-prefix", @@ -2953,7 +3140,7 @@ version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70a646d946d06bedbbc4cac4c218acf4bbf2d87757a784857025f4d447e4e1cd" dependencies = [ - "console 0.16.0", + "console 0.16.1", "portable-atomic", "unicode-width 0.2.0", "unit-prefix", @@ -2975,17 +3162,17 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec" dependencies = [ - "cfg-if 1.0.0", + "cfg-if 1.0.3", ] [[package]] name = "io-uring" -version = "0.7.9" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4" +checksum = "046fa2d4d00aea763528b4950358d0ead425372445dc8ff86312b3c69ff7727b" dependencies = [ - "bitflags 2.9.1", - "cfg-if 1.0.0", + "bitflags 2.9.4", + "cfg-if 1.0.3", "libc", ] @@ -3005,6 +3192,12 @@ dependencies = [ "serde", ] +[[package]] +name = "is_terminal_polyfill" +version = "1.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" + [[package]] name = "itertools" version = "0.10.5" @@ -3036,7 +3229,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a87aa2bb7d2af34197c04845522473242e1aa17c12f4935d5856491a7fb8c97" dependencies = [ "cesu8", - "cfg-if 1.0.0", + "cfg-if 1.0.3", "combine 4.6.7", "jni-sys", "log", @@ -3062,9 +3255,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.77" +version = "0.3.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +checksum = "852f13bec5eba4ba9afbeb93fd7c13fe56147f055939ae21c43a29a0ecb2702e" dependencies = [ "once_cell", "wasm-bindgen", @@ -3202,13 +3395,17 @@ dependencies = [ ] [[package]] -name = "kaigan" -version = "0.2.6" +name = "k256" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ba15de5aeb137f0f65aa3bf82187647f1285abfe5b20c80c2c37f7007ad519a" +checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" dependencies = [ - "borsh 0.10.3", - "serde", + "cfg-if 1.0.3", + "ecdsa", + "elliptic-curve", + "once_cell", + "sha2 0.10.9", + "signature 2.2.0", ] [[package]] @@ -3253,9 +3450,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.174" +version = "0.2.175" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" +checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543" [[package]] name = "libloading" @@ -3263,7 +3460,7 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" dependencies = [ - "cfg-if 1.0.0", + "cfg-if 1.0.3", "winapi 0.3.9", ] @@ -3433,9 +3630,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.27" +version = "0.4.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" +checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" [[package]] name = "lru" @@ -3500,9 +3697,9 @@ dependencies = [ [[package]] name = "memmap2" -version = "0.9.7" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "483758ad303d734cec05e5c12b41d7e93e6a6390c5e9dae6bdeb7c1259012d28" +checksum = "843a98750cd611cc2965a8213b53b43e715f13c37a9e096c6408e69990961db7" dependencies = [ "libc", ] @@ -3590,7 +3787,7 @@ version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c84490118f2ee2d74570d114f3d0493cbf02790df303d2707606c3e14e07c96" dependencies = [ - "cfg-if 1.0.0", + "cfg-if 1.0.3", "downcast", "fragile", "lazy_static", @@ -3605,7 +3802,7 @@ version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ce75669015c4f47b289fd4d4f56e894e4c96003ffdf3ac51313126f94c6cbb" dependencies = [ - "cfg-if 1.0.0", + "cfg-if 1.0.3", "proc-macro2", "quote", "syn 1.0.109", @@ -3672,8 +3869,8 @@ version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74523f3a35e05aba87a1d978330aef40f67b0304ac79c1c00b294c9830543db6" dependencies = [ - "bitflags 2.9.1", - "cfg-if 1.0.0", + "bitflags 2.9.4", + "cfg-if 1.0.3", "cfg_aliases", "libc", "memoffset 0.9.0", @@ -3859,7 +4056,7 @@ checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" dependencies = [ "crc32fast", "hashbrown 0.15.1", - "indexmap 2.10.0", + "indexmap 2.11.4", "memchr", ] @@ -3878,6 +4075,12 @@ version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" +[[package]] +name = "once_cell_polyfill" +version = "1.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" + [[package]] name = "opaque-debug" version = "0.3.0" @@ -3890,8 +4093,8 @@ version = "0.10.73" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8505734d46c8ab1e19a1dce3aef597ad87dcb4c37e7188231769bd6bd51cebf8" dependencies = [ - "bitflags 2.9.1", - "cfg-if 1.0.0", + "bitflags 2.9.4", + "cfg-if 1.0.3", "foreign-types", "libc", "once_cell", @@ -3951,12 +4154,21 @@ dependencies = [ "futures-util", "js-sys", "lazy_static", - "percent-encoding 2.3.1", + "percent-encoding 2.3.2", "pin-project", "rand 0.8.5", "thiserror 1.0.69", ] +[[package]] +name = "pairing" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81fec4625e73cf41ef4bb6846cafa6d44736525f442ba45e407c4a000a13996f" +dependencies = [ + "group", +] + [[package]] name = "parity-tokio-ipc" version = "0.9.0" @@ -4004,7 +4216,7 @@ version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" dependencies = [ - "cfg-if 1.0.0", + "cfg-if 1.0.3", "instant", "libc", "redox_syscall 0.2.10", @@ -4018,7 +4230,7 @@ version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" dependencies = [ - "cfg-if 1.0.0", + "cfg-if 1.0.3", "libc", "redox_syscall 0.3.5", "smallvec", @@ -4066,9 +4278,9 @@ checksum = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" [[package]] name = "percent-encoding" -version = "2.3.1" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" [[package]] name = "percentage" @@ -4166,6 +4378,16 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "pkcs8" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +dependencies = [ + "der", + "spki", +] + [[package]] name = "pkg-config" version = "0.3.17" @@ -4178,7 +4400,7 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25" dependencies = [ - "cfg-if 1.0.0", + "cfg-if 1.0.3", "cpufeatures", "opaque-debug", "universal-hash", @@ -4377,7 +4599,7 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d464fae65fff2680baf48019211ce37aaec0c78e9264c84a3e484717f965104e" dependencies = [ - "percent-encoding 2.3.1", + "percent-encoding 2.3.2", ] [[package]] @@ -4408,9 +4630,9 @@ dependencies = [ [[package]] name = "quinn" -version = "0.11.8" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "626214629cda6781b6dc1d316ba307189c85ba657213ce642d9c77670f8202c8" +checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20" dependencies = [ "bytes", "cfg_aliases", @@ -4418,9 +4640,9 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.0.0", - "rustls 0.23.31", - "socket2 0.5.10", - "thiserror 2.0.12", + "rustls 0.23.32", + "socket2 0.6.0", + "thiserror 2.0.16", "tokio", "tracing", "web-time", @@ -4428,9 +4650,9 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.11.12" +version = "0.11.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49df843a9161c85bb8aae55f101bc0bac8bcafd637a620d9122fd7e0b2f7422e" +checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" dependencies = [ "bytes", "fastbloom", @@ -4439,11 +4661,11 @@ dependencies = [ "rand 0.9.0", "ring", "rustc-hash 2.0.0", - "rustls 0.23.31", + "rustls 0.23.32", "rustls-pki-types", "rustls-platform-verifier", "slab", - "thiserror 2.0.12", + "thiserror 2.0.16", "tinyvec", "tracing", "web-time", @@ -4471,6 +4693,12 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + [[package]] name = "rand" version = "0.7.3" @@ -4572,6 +4800,15 @@ dependencies = [ "rand_core 0.5.1", ] +[[package]] +name = "rand_xorshift" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" +dependencies = [ + "rand_core 0.6.4", +] + [[package]] name = "rand_xoshiro" version = "0.6.0" @@ -4587,14 +4824,14 @@ version = "11.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb9ee317cfe3fbd54b36a511efc1edd42e216903c9cd575e686dd68a2ba90d8d" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.9.4", ] [[package]] name = "rayon" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" +checksum = "368f01d005bf8fd9b1206fb6fa653e6c4a81ceb1466406b81792d87c5677a58f" dependencies = [ "either", "rayon-core", @@ -4602,9 +4839,9 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.12.1" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" +checksum = "22e18b0f0062d30d4230b2e85ff77fdfe4326feb054b9783a3460d8435c8ab91" dependencies = [ "crossbeam-deque", "crossbeam-utils", @@ -4661,9 +4898,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.11.1" +version = "1.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +checksum = "23d7fd106d8c02486a8d64e778353d1cffe08ce79ac2e82f540c86d0facf6912" dependencies = [ "aho-corasick 1.0.1", "memchr", @@ -4712,7 +4949,7 @@ dependencies = [ "mime", "native-tls", "once_cell", - "percent-encoding 2.3.1", + "percent-encoding 2.3.2", "pin-project-lite", "rustls 0.21.12", "rustls-pemfile", @@ -4726,7 +4963,7 @@ dependencies = [ "tokio-rustls 0.24.1", "tokio-util 0.7.16", "tower-service", - "url 2.5.4", + "url 2.5.7", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -4736,9 +4973,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.22" +version = "0.12.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbc931937e6ca3a06e3b6c0aa7841849b160a90351d6ab467a8b9b9959767531" +checksum = "d429f34c8092b2d42c7c93cec323bb4adeb7c67698f70839adec842ec10c7ceb" dependencies = [ "async-compression", "base64 0.22.1", @@ -4754,10 +4991,10 @@ dependencies = [ "hyper-util", "js-sys", "log", - "percent-encoding 2.3.1", + "percent-encoding 2.3.2", "pin-project-lite", "quinn", - "rustls 0.23.31", + "rustls 0.23.32", "rustls-pki-types", "serde", "serde_json", @@ -4769,7 +5006,7 @@ dependencies = [ "tower 0.5.2", "tower-http", "tower-service", - "url 2.5.4", + "url 2.5.7", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -4785,12 +5022,22 @@ dependencies = [ "anyhow", "async-trait", "http 1.2.0", - "reqwest 0.12.22", + "reqwest 0.12.23", "serde", "thiserror 1.0.69", "tower-service", ] +[[package]] +name = "rfc6979" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" +dependencies = [ + "hmac 0.12.1", + "subtle", +] + [[package]] name = "ring" version = "0.17.14" @@ -4798,7 +5045,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", - "cfg-if 1.0.0", + "cfg-if 1.0.3", "getrandom 0.2.10", "libc", "untrusted", @@ -4887,7 +5134,7 @@ version = "0.38.39" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "375116bee2be9ed569afe2154ea6a99dfdffd257f533f187498c2a8f5feaf4ee" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.9.4", "errno", "libc", "linux-raw-sys 0.4.14", @@ -4900,7 +5147,7 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7178faa4b75a30e269c71e61c353ce2748cf3d76f0c44c393f4e60abf49b825" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.9.4", "errno", "libc", "linux-raw-sys 0.9.2", @@ -4921,14 +5168,14 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.31" +version = "0.23.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0ebcbd2f03de0fc1122ad9bb24b127a5a6cd51d72604a3f3c50ac459762b6cc" +checksum = "cd3c25631629d034ce7cd9940adc9d45762d46de2b0f57193c4443b92c6d4d40" dependencies = [ "once_cell", "ring", "rustls-pki-types", - "rustls-webpki 0.103.4", + "rustls-webpki 0.103.6", "subtle", "zeroize", ] @@ -4966,19 +5213,19 @@ dependencies = [ [[package]] name = "rustls-platform-verifier" -version = "0.5.1" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a5467026f437b4cb2a533865eaa73eb840019a0916f4b9ec563c6e617e086c9" +checksum = "be59af91596cac372a6942530653ad0c3a246cdd491aaa9dcaee47f88d67d5a0" dependencies = [ "core-foundation 0.10.0", "core-foundation-sys", "jni", "log", "once_cell", - "rustls 0.23.31", + "rustls 0.23.32", "rustls-native-certs", "rustls-platform-verifier-android", - "rustls-webpki 0.103.4", + "rustls-webpki 0.103.6", "security-framework 3.2.0", "security-framework-sys", "webpki-root-certs", @@ -5003,9 +5250,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.103.4" +version = "0.103.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a17884ae0c1b773f1ccd2bd4a8c72f16da897310a98b0e84bf349ad5ead92fc" +checksum = "8572f3c2cb9934231157b45499fc41e1f58c589fdfb81a844ba873265e80f8eb" dependencies = [ "ring", "rustls-pki-types", @@ -5024,15 +5271,6 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed3d612bc64430efeb3f7ee6ef26d590dce0c43249217bddc62112540c7941e1" -[[package]] -name = "safe_arch" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96b02de82ddbe1b636e6170c21be622223aea188ef2e139be0a5b219ec215323" -dependencies = [ - "bytemuck", -] - [[package]] name = "same-file" version = "1.0.6" @@ -5067,6 +5305,20 @@ dependencies = [ "untrusted", ] +[[package]] +name = "sec1" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" +dependencies = [ + "base16ct", + "der", + "generic-array", + "pkcs8", + "subtle", + "zeroize", +] + [[package]] name = "security-framework" version = "2.3.1" @@ -5086,7 +5338,7 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.9.4", "core-foundation 0.10.0", "core-foundation-sys", "libc", @@ -5105,9 +5357,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.26" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" +checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" [[package]] name = "seqlock" @@ -5120,10 +5372,11 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.219" +version = "1.0.226" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +checksum = "0dca6411025b24b60bfa7ec1fe1f8e710ac09782dca409ee8237ba74b51295fd" dependencies = [ + "serde_core", "serde_derive", ] @@ -5138,18 +5391,28 @@ dependencies = [ [[package]] name = "serde_bytes" -version = "0.11.17" +version = "0.11.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8437fd221bde2d4ca316d61b90e337e9e702b3820b87d63caa9ba6c02bd06d96" +checksum = "a5d440709e79d88e51ac01c4b72fc6cb7314017bb7da9eeff678aa94c10e3ea8" dependencies = [ "serde", + "serde_core", +] + +[[package]] +name = "serde_core" +version = "1.0.226" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba2ba63999edb9dac981fb34b3e5c0d111a69b0924e253ed29d83f7c99e966a4" +dependencies = [ + "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.219" +version = "1.0.226" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +checksum = "8db53ae22f34573731bafa1db20f04027b2d25e02d8205921b569171699cdb33" dependencies = [ "proc-macro2", "quote", @@ -5158,14 +5421,15 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.142" +version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "030fedb782600dcbd6f02d479bf0d817ac3bb40d644745b769d6a96bc3afc5a7" +checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" dependencies = [ "itoa", "memchr", "ryu", "serde", + "serde_core", ] [[package]] @@ -5182,9 +5446,9 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.14.0" +version = "3.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2c45cd61fefa9db6f254525d46e392b852e0e61d9a1fd36e5bd183450a556d5" +checksum = "c522100790450cf78eeac1507263d0a350d4d5b30df0c8e1fe051a10c22b376e" dependencies = [ "serde", "serde_derive", @@ -5193,9 +5457,9 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.14.0" +version = "3.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de90945e6565ce0d9a25098082ed4ee4002e047cb59892c318d66821e14bb30f" +checksum = "327ada00f7d64abaac1e55a6911e90cf665aa051b9a561c7006c157f4633135e" dependencies = [ "darling", "proc-macro2", @@ -5209,7 +5473,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.10.0", + "indexmap 2.11.4", "itoa", "ryu", "serde", @@ -5223,7 +5487,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" dependencies = [ "block-buffer 0.9.0", - "cfg-if 1.0.0", + "cfg-if 1.0.3", "cpufeatures", "digest 0.9.0", "opaque-debug", @@ -5235,7 +5499,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ - "cfg-if 1.0.0", + "cfg-if 1.0.3", "cpufeatures", "digest 0.10.7", ] @@ -5247,7 +5511,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" dependencies = [ "block-buffer 0.9.0", - "cfg-if 1.0.0", + "cfg-if 1.0.3", "cpufeatures", "digest 0.9.0", "opaque-debug", @@ -5259,7 +5523,7 @@ version = "0.10.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" dependencies = [ - "cfg-if 1.0.0", + "cfg-if 1.0.3", "cpufeatures", "digest 0.10.7", ] @@ -5320,6 +5584,16 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "65211b7b6fc3f14ff9fc7a2011a434e3e6880585bd2e9e9396315ae24cbf7852" +[[package]] +name = "signature" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" +dependencies = [ + "digest 0.10.7", + "rand_core 0.6.4", +] + [[package]] name = "simpl" version = "0.1.0" @@ -5350,9 +5624,9 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04dc19736151f35336d325007ac991178d504a119863a2fcb3758cdb5e52c50d" +checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" [[package]] name = "smallvec" @@ -5413,9 +5687,9 @@ dependencies = [ [[package]] name = "solana-account" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f949fe4edaeaea78c844023bfc1c898e0b1f5a100f8a8d2d0f85d0a7b090258" +checksum = "f885ce7f937871ecb56aadbeaaec963b234a580b7d6ebbdb8fa4249a36f92433" dependencies = [ "bincode", "qualifier_attr", @@ -5424,7 +5698,7 @@ dependencies = [ "serde_derive", "solana-account-info", "solana-clock", - "solana-instruction", + "solana-instruction-error", "solana-pubkey", "solana-sdk-ids", "solana-sysvar", @@ -5432,7 +5706,7 @@ dependencies = [ [[package]] name = "solana-account-decoder" -version = "3.0.0" +version = "3.1.0" dependencies = [ "Inflector", "base64 0.22.1", @@ -5446,11 +5720,11 @@ dependencies = [ "solana-account-decoder-client-types", "solana-address-lookup-table-interface", "solana-clock", - "solana-config-program-client", + "solana-config-interface", "solana-epoch-schedule", "solana-fee-calculator", "solana-instruction", - "solana-loader-v3-interface 5.0.0", + "solana-loader-v3-interface", "solana-nonce", "solana-program-option", "solana-program-pack", @@ -5467,13 +5741,13 @@ dependencies = [ "spl-token-group-interface", "spl-token-interface", "spl-token-metadata-interface", - "thiserror 2.0.12", + "thiserror 2.0.16", "zstd", ] [[package]] name = "solana-account-decoder-client-types" -version = "3.0.0" +version = "3.1.0" dependencies = [ "base64 0.22.1", "bs58", @@ -5487,9 +5761,9 @@ dependencies = [ [[package]] name = "solana-account-info" -version = "2.3.0" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8f5152a288ef1912300fc6efa6c2d1f9bb55d9398eb6c72326360b8063987da" +checksum = "82f4691b69b172c687d218dd2f1f23fc7ea5e9aa79df9ac26dab3d8dd829ce48" dependencies = [ "bincode", "serde", @@ -5500,7 +5774,7 @@ dependencies = [ [[package]] name = "solana-accounts-db" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-io-uring", "ahash 0.8.11", @@ -5512,16 +5786,17 @@ dependencies = [ "bzip2", "crossbeam-channel", "dashmap", - "indexmap 2.10.0", + "indexmap 2.11.4", "io-uring", "itertools 0.12.1", "libc", "log", "lz4", - "memmap2 0.9.7", + "memmap2 0.9.8", "modular-bitfield", "num_cpus", "num_enum", + "qualifier_attr", "rand 0.8.5", "rayon", "seqlock", @@ -5537,6 +5812,7 @@ dependencies = [ "solana-fee-calculator", "solana-genesis-config", "solana-hash", + "solana-keypair", "solana-lattice-hash", "solana-measure", "solana-message", @@ -5544,10 +5820,12 @@ dependencies = [ "solana-nohash-hasher", "solana-pubkey", "solana-rayon-threadlimit", - "solana-rent-collector", + "solana-rent", "solana-reward-info", "solana-sha256-hasher", + "solana-signer", "solana-slot-hashes", + "solana-stake-program", "solana-svm-transaction", "solana-system-interface", "solana-sysvar", @@ -5555,18 +5833,41 @@ dependencies = [ "solana-transaction", "solana-transaction-context", "solana-transaction-error", + "solana-vote-program", "spl-generic-token", "static_assertions", "tar", "tempfile", - "thiserror 2.0.12", + "thiserror 2.0.16", +] + +[[package]] +name = "solana-address" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a7a457086457ea9db9a5199d719dc8734dc2d0342fad0d8f77633c31eb62f19" +dependencies = [ + "borsh", + "bytemuck", + "bytemuck_derive", + "curve25519-dalek 4.1.3", + "five8", + "five8_const", + "rand 0.8.5", + "serde", + "serde_derive", + "solana-atomic-u64", + "solana-define-syscall 3.0.0", + "solana-program-error", + "solana-sanitize", + "solana-sha256-hasher", ] [[package]] name = "solana-address-lookup-table-interface" -version = "2.2.2" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1673f67efe870b64a65cb39e6194be5b26527691ce5922909939961a6e6b395" +checksum = "e2f56cac5e70517a2f27d05e5100b20de7182473ffd0035b23ea273307905987" dependencies = [ "bincode", "bytemuck", @@ -5574,6 +5875,7 @@ dependencies = [ "serde_derive", "solana-clock", "solana-instruction", + "solana-instruction-error", "solana-pubkey", "solana-sdk-ids", "solana-slot-hashes", @@ -5581,18 +5883,18 @@ dependencies = [ [[package]] name = "solana-atomic-u64" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52e52720efe60465b052b9e7445a01c17550666beec855cce66f44766697bc2" +checksum = "a933ff1e50aff72d02173cfcd7511bd8540b027ee720b75f353f594f834216d0" dependencies = [ "parking_lot 0.12.2", ] [[package]] name = "solana-banks-client" -version = "3.0.0" +version = "3.1.0" dependencies = [ - "borsh 1.5.7", + "borsh", "futures 0.3.31", "solana-account", "solana-banks-interface", @@ -5609,14 +5911,14 @@ dependencies = [ "solana-transaction-context", "solana-transaction-error", "tarpc", - "thiserror 2.0.12", + "thiserror 2.0.16", "tokio", "tokio-serde", ] [[package]] name = "solana-banks-interface" -version = "3.0.0" +version = "3.1.0" dependencies = [ "serde", "serde_derive", @@ -5635,7 +5937,7 @@ dependencies = [ [[package]] name = "solana-banks-server" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "bincode", @@ -5663,41 +5965,40 @@ dependencies = [ [[package]] name = "solana-big-mod-exp" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75db7f2bbac3e62cfd139065d15bcda9e2428883ba61fc8d27ccb251081e7567" +checksum = "30c80fb6d791b3925d5ec4bf23a7c169ef5090c013059ec3ed7d0b2c04efa085" dependencies = [ "num-bigint 0.4.6", "num-traits", - "solana-define-syscall", + "solana-define-syscall 3.0.0", ] [[package]] name = "solana-bincode" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19a3787b8cf9c9fe3dd360800e8b70982b9e5a8af9e11c354b6665dd4a003adc" +checksum = "534a37aecd21986089224d0c01006a75b96ac6fb2f418c24edc15baf0d2a4c99" dependencies = [ "bincode", "serde", - "solana-instruction", + "solana-instruction-error", ] [[package]] name = "solana-blake3-hasher" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1a0801e25a1b31a14494fc80882a036be0ffd290efc4c2d640bfcca120a4672" +checksum = "ffa2e3bdac3339c6d0423275e45dafc5ac25f4d43bf344d026a3cc9a85e244a6" dependencies = [ "blake3", - "solana-define-syscall", + "solana-define-syscall 3.0.0", "solana-hash", - "solana-sanitize", ] [[package]] name = "solana-bloom" -version = "3.0.0" +version = "3.1.0" dependencies = [ "bv", "fnv", @@ -5708,34 +6009,57 @@ dependencies = [ "solana-time-utils", ] +[[package]] +name = "solana-bls-signatures" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a40ce56d14f58c3ebe9275c3739c4052748ec5c4922854c12dc823dbf450ebd1" +dependencies = [ + "base64 0.22.1", + "blst", + "blstrs", + "bytemuck", + "cfg_eval", + "ff", + "group", + "pairing", + "rand 0.8.5", + "serde", + "serde_json", + "serde_with", + "solana-signature", + "solana-signer", + "subtle", + "thiserror 2.0.16", +] + [[package]] name = "solana-bn254" -version = "2.2.2" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4420f125118732833f36facf96a27e7b78314b2d642ba07fa9ffdacd8d79e243" +checksum = "20a5f01e99addb316d95d4ed31aa6eacfda557fffc00ae316b919e8ba0fc5b91" dependencies = [ "ark-bn254", "ark-ec", "ark-ff", "ark-serialize", "bytemuck", - "solana-define-syscall", - "thiserror 2.0.12", + "solana-define-syscall 3.0.0", + "thiserror 2.0.16", ] [[package]] name = "solana-borsh" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "718333bcd0a1a7aed6655aa66bef8d7fb047944922b2d3a18f49cbc13e73d004" +checksum = "dc402b16657abbfa9991cd5cbfac5a11d809f7e7d28d3bb291baeb088b39060e" dependencies = [ - "borsh 0.10.3", - "borsh 1.5.7", + "borsh", ] [[package]] name = "solana-bpf-loader-program" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-syscalls", "bincode", @@ -5744,10 +6068,8 @@ dependencies = [ "solana-bincode", "solana-clock", "solana-instruction", - "solana-loader-v3-interface 5.0.0", + "solana-loader-v3-interface", "solana-loader-v4-interface", - "solana-log-collector", - "solana-measure", "solana-packet", "solana-program-entrypoint", "solana-program-runtime", @@ -5755,19 +6077,21 @@ dependencies = [ "solana-sbpf", "solana-sdk-ids", "solana-svm-feature-set", + "solana-svm-log-collector", + "solana-svm-measure", + "solana-svm-type-overrides", "solana-system-interface", "solana-transaction-context", - "solana-type-overrides", ] [[package]] name = "solana-bucket-map" -version = "3.0.0" +version = "3.1.0" dependencies = [ "bv", "bytemuck", "bytemuck_derive", - "memmap2 0.9.7", + "memmap2 0.9.8", "modular-bitfield", "num_enum", "rand 0.8.5", @@ -5779,7 +6103,7 @@ dependencies = [ [[package]] name = "solana-builtins" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "solana-bpf-loader-program", @@ -5798,7 +6122,7 @@ dependencies = [ [[package]] name = "solana-builtins-default-costs" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "ahash 0.8.11", @@ -5815,7 +6139,7 @@ dependencies = [ [[package]] name = "solana-clap-utils" -version = "3.0.0" +version = "3.1.0" dependencies = [ "chrono", "clap", @@ -5834,15 +6158,15 @@ dependencies = [ "solana-seed-phrase", "solana-signature", "solana-signer", - "thiserror 2.0.12", + "thiserror 2.0.16", "tiny-bip39", "uriparse", - "url 2.5.4", + "url 2.5.7", ] [[package]] name = "solana-cli-config" -version = "3.0.0" +version = "3.1.0" dependencies = [ "dirs-next", "serde", @@ -5850,19 +6174,19 @@ dependencies = [ "serde_yaml", "solana-clap-utils", "solana-commitment-config", - "url 2.5.4", + "url 2.5.7", ] [[package]] name = "solana-cli-output" -version = "3.0.0" +version = "3.1.0" dependencies = [ "Inflector", "agave-reserved-account-keys", "base64 0.22.1", "chrono", "clap", - "console 0.16.0", + "console 0.16.1", "humantime", "indicatif 0.18.0", "pretty-hex", @@ -5878,7 +6202,6 @@ dependencies = [ "solana-epoch-info", "solana-hash", "solana-message", - "solana-native-token", "solana-packet", "solana-pubkey", "solana-rpc-client-api", @@ -5886,7 +6209,6 @@ dependencies = [ "solana-signature", "solana-stake-interface", "solana-system-interface", - "solana-sysvar", "solana-transaction", "solana-transaction-error", "solana-transaction-status", @@ -5897,14 +6219,14 @@ dependencies = [ [[package]] name = "solana-client" -version = "3.0.0" +version = "3.1.0" dependencies = [ "async-trait", "bincode", "dashmap", "futures 0.3.31", "futures-util", - "indexmap 2.10.0", + "indexmap 2.11.4", "indicatif 0.18.0", "log", "quinn", @@ -5919,6 +6241,7 @@ dependencies = [ "solana-keypair", "solana-measure", "solana-message", + "solana-net-utils", "solana-pubkey", "solana-pubsub-client", "solana-quic-client", @@ -5935,15 +6258,16 @@ dependencies = [ "solana-transaction-error", "solana-transaction-status-client-types", "solana-udp-client", - "thiserror 2.0.12", + "thiserror 2.0.16", "tokio", + "tokio-util 0.7.16", ] [[package]] name = "solana-client-traits" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83f0071874e629f29e0eb3dab8a863e98502ac7aba55b7e0df1803fc5cac72a7" +checksum = "08618ed587e128105510c54ae3e456b9a06d674d8640db75afe66dad65cb4e02" dependencies = [ "solana-account", "solana-commitment-config", @@ -5962,9 +6286,9 @@ dependencies = [ [[package]] name = "solana-clock" -version = "2.2.2" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bb482ab70fced82ad3d7d3d87be33d466a3498eb8aa856434ff3c0dfc2e2e31" +checksum = "fb62e9381182459a4520b5fe7fb22d423cae736239a6427fc398a88743d0ed59" dependencies = [ "serde", "serde_derive", @@ -5975,9 +6299,9 @@ dependencies = [ [[package]] name = "solana-cluster-type" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ace9fea2daa28354d107ea879cff107181d85cd4e0f78a2bedb10e1a428c97e" +checksum = "eb7692fa6bf10a1a86b450c4775526f56d7e0e2116a53313f2533b5694abea64" dependencies = [ "serde", "serde_derive", @@ -5986,9 +6310,9 @@ dependencies = [ [[package]] name = "solana-commitment-config" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac49c4dde3edfa832de1697e9bcdb7c3b3f7cb7a1981b7c62526c8bb6700fb73" +checksum = "5fa5933a62dadb7d3ed35e6329de5cebb0678acc8f9cfdf413269084eeccc63f" dependencies = [ "serde", "serde_derive", @@ -5996,7 +6320,7 @@ dependencies = [ [[package]] name = "solana-compute-budget" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-fee-structure", "solana-program-runtime", @@ -6004,7 +6328,7 @@ dependencies = [ [[package]] name = "solana-compute-budget-instruction" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "log", @@ -6018,32 +6342,32 @@ dependencies = [ "solana-sdk-ids", "solana-svm-transaction", "solana-transaction-error", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "solana-compute-budget-interface" -version = "2.2.2" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8432d2c4c22d0499aa06d62e4f7e333f81777b3d7c96050ae9e5cb71a8c3aee4" +checksum = "8292c436b269ad23cecc8b24f7da3ab07ca111661e25e00ce0e1d22771951ab9" dependencies = [ - "borsh 1.5.7", + "borsh", "solana-instruction", "solana-sdk-ids", ] [[package]] name = "solana-compute-budget-program" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-program-runtime", ] [[package]] name = "solana-config-interface" -version = "1.0.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fbdbcfedb467322ac9686ca61da0a1fdede2fd99a01fb2ed52b49452abd22e0" +checksum = "63e401ae56aed512821cc7a0adaa412ff97fecd2dff4602be7b1330d2daec0c4" dependencies = [ "bincode", "serde", @@ -6056,29 +6380,15 @@ dependencies = [ "solana-system-interface", ] -[[package]] -name = "solana-config-program-client" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef9867b9ffae6e48a97ce6349e7796fcb34084298e909a8fa1fe427f41b52fd4" -dependencies = [ - "bincode", - "borsh 0.10.3", - "kaigan", - "serde", - "solana-config-interface", - "solana-program", -] - [[package]] name = "solana-connection-cache" -version = "3.0.0" +version = "3.1.0" dependencies = [ "async-trait", "bincode", "crossbeam-channel", "futures-util", - "indexmap 2.10.0", + "indexmap 2.11.4", "log", "rand 0.8.5", "rayon", @@ -6087,18 +6397,19 @@ dependencies = [ "solana-metrics", "solana-time-utils", "solana-transaction-error", - "thiserror 2.0.12", + "thiserror 2.0.16", "tokio", ] [[package]] name = "solana-core" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-banking-stage-ingress-types", "agave-feature-set", "agave-transaction-view", "agave-verified-packet-receiver", + "agave-votor", "ahash 0.8.11", "anyhow", "arrayvec", @@ -6107,13 +6418,13 @@ dependencies = [ "base64 0.22.1", "bincode", "bs58", + "bytemuck", "bytes", "chrono", "conditional-mod", "crossbeam-channel", "dashmap", "derive_more 1.0.0", - "etcd-client", "futures 0.3.31", "histogram", "itertools 0.12.1", @@ -6129,7 +6440,7 @@ dependencies = [ "rand_chacha 0.3.1", "rayon", "rolling-file", - "rustls 0.23.31", + "rustls 0.23.32", "serde", "serde_bytes", "serde_derive", @@ -6142,6 +6453,7 @@ dependencies = [ "solana-builtins-default-costs", "solana-client", "solana-clock", + "solana-cluster-type", "solana-compute-budget", "solana-compute-budget-instruction", "solana-compute-budget-interface", @@ -6160,7 +6472,7 @@ dependencies = [ "solana-instruction", "solana-keypair", "solana-ledger", - "solana-loader-v3-interface 5.0.0", + "solana-loader-v3-interface", "solana-measure", "solana-message", "solana-metrics", @@ -6193,12 +6505,12 @@ dependencies = [ "solana-slot-history", "solana-streamer", "solana-svm", + "solana-svm-timings", "solana-svm-transaction", "solana-system-interface", "solana-system-transaction", "solana-sysvar", "solana-time-utils", - "solana-timings", "solana-tls-utils", "solana-tpu-client", "solana-tpu-client-next", @@ -6218,7 +6530,7 @@ dependencies = [ "sys-info", "sysctl", "tempfile", - "thiserror 2.0.12", + "thiserror 2.0.16", "tikv-jemallocator", "tokio", "tokio-util 0.7.16", @@ -6227,7 +6539,7 @@ dependencies = [ [[package]] name = "solana-cost-model" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "ahash 0.8.11", @@ -6253,12 +6565,12 @@ dependencies = [ [[package]] name = "solana-cpi" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dc71126edddc2ba014622fc32d0f5e2e78ec6c5a1e0eb511b85618c09e9ea11" +checksum = "16238feb63d1cbdf915fb287f29ef7a7ebf81469bd6214f8b72a53866b593f8f" dependencies = [ "solana-account-info", - "solana-define-syscall", + "solana-define-syscall 3.0.0", "solana-instruction", "solana-program-error", "solana-pubkey", @@ -6267,50 +6579,47 @@ dependencies = [ [[package]] name = "solana-curve25519" -version = "2.2.15" +version = "2.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "def3cfe5279edb64fc39111cff6dcf77b01fbfba2c02c13ced41e6a48baf4cbe" +checksum = "b162f50499b391b785d57b2f2c73e3b9754d88fd4894bef444960b00bda8dcca" dependencies = [ "bytemuck", "bytemuck_derive", "curve25519-dalek 4.1.3", - "solana-define-syscall", + "solana-define-syscall 2.3.0", "subtle", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "solana-curve25519" -version = "3.0.0" +version = "3.1.0" dependencies = [ "bytemuck", "bytemuck_derive", "curve25519-dalek 4.1.3", - "solana-define-syscall", + "solana-define-syscall 3.0.0", "subtle", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] -name = "solana-decode-error" -version = "2.2.1" +name = "solana-define-syscall" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10a6a6383af236708048f8bd8d03db8ca4ff7baf4a48e5d580f4cce545925470" -dependencies = [ - "num-traits", -] +checksum = "2ae3e2abcf541c8122eafe9a625d4d194b4023c20adde1e251f94e056bb1aee2" [[package]] name = "solana-define-syscall" -version = "2.3.0" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ae3e2abcf541c8122eafe9a625d4d194b4023c20adde1e251f94e056bb1aee2" +checksum = "f9697086a4e102d28a156b8d6b521730335d6951bd39a5e766512bbe09007cee" [[package]] name = "solana-derivation-path" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "939756d798b25c5ec3cca10e06212bdca3b1443cb9bb740a38124f58b258737b" +checksum = "ff71743072690fdbdfcdc37700ae1cb77485aaad49019473a81aee099b1e0b8c" dependencies = [ "derivation-path", "qstring", @@ -6319,7 +6628,7 @@ dependencies = [ [[package]] name = "solana-download-utils" -version = "3.0.0" +version = "3.1.0" dependencies = [ "log", "solana-clock", @@ -6330,22 +6639,19 @@ dependencies = [ [[package]] name = "solana-ed25519-program" -version = "2.2.3" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1feafa1691ea3ae588f99056f4bdd1293212c7ece28243d7da257c443e84753" +checksum = "e1419197f1c06abf760043f6d64ba9d79a03ad5a43f18c7586471937122094da" dependencies = [ "bytemuck", "bytemuck_derive", - "ed25519-dalek", - "solana-feature-set", "solana-instruction", - "solana-precompile-error", "solana-sdk-ids", ] [[package]] name = "solana-entry" -version = "3.0.0" +version = "3.1.0" dependencies = [ "bincode", "crossbeam-channel", @@ -6369,9 +6675,9 @@ dependencies = [ [[package]] name = "solana-epoch-info" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90ef6f0b449290b0b9f32973eefd95af35b01c5c0c34c569f936c34c5b20d77b" +checksum = "f8a6b69bd71386f61344f2bcf0f527f5fd6dd3b22add5880e2e1bf1dd1fa8059" dependencies = [ "serde", "serde_derive", @@ -6379,9 +6685,9 @@ dependencies = [ [[package]] name = "solana-epoch-rewards" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b575d3dd323b9ea10bb6fe89bf6bf93e249b215ba8ed7f68f1a3633f384db7" +checksum = "b319a4ed70390af911090c020571f0ff1f4ec432522d05ab89f5c08080381995" dependencies = [ "serde", "serde_derive", @@ -6393,9 +6699,9 @@ dependencies = [ [[package]] name = "solana-epoch-rewards-hasher" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96c5fd2662ae7574810904585fd443545ed2b568dbd304b25a31e79ccc76e81b" +checksum = "e507099d0c2c5d7870c9b1848281ea67bbeee80d171ca85003ee5767994c9c38" dependencies = [ "siphasher 0.3.11", "solana-hash", @@ -6404,9 +6710,9 @@ dependencies = [ [[package]] name = "solana-epoch-schedule" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fce071fbddecc55d727b1d7ed16a629afe4f6e4c217bc8d00af3b785f6f67ed" +checksum = "6e5481e72cc4d52c169db73e4c0cd16de8bc943078aac587ec4817a75cc6388f" dependencies = [ "serde", "serde_derive", @@ -6415,11 +6721,21 @@ dependencies = [ "solana-sysvar-id", ] +[[package]] +name = "solana-epoch-stake" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc6693d0ea833b880514b9b88d95afb80b42762dca98b0712465d1fcbbcb89e" +dependencies = [ + "solana-define-syscall 3.0.0", + "solana-pubkey", +] + [[package]] name = "solana-example-mocks" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84461d56cbb8bb8d539347151e0525b53910102e4bced875d49d5139708e39d3" +checksum = "978855d164845c1b0235d4b4d101cadc55373fffaf0b5b6cfa2194d25b2ed658" dependencies = [ "serde", "serde_derive", @@ -6433,12 +6749,12 @@ dependencies = [ "solana-pubkey", "solana-sdk-ids", "solana-system-interface", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "solana-faucet" -version = "3.0.0" +version = "3.1.0" dependencies = [ "bincode", "clap", @@ -6448,13 +6764,14 @@ dependencies = [ "serde_derive", "solana-clap-utils", "solana-cli-config", + "solana-cli-output", "solana-hash", "solana-instruction", "solana-keypair", "solana-logger", "solana-message", "solana-metrics", - "solana-native-token", + "solana-net-utils", "solana-packet", "solana-pubkey", "solana-signer", @@ -6463,15 +6780,15 @@ dependencies = [ "solana-transaction", "solana-version", "spl-memo-interface", - "thiserror 2.0.12", + "thiserror 2.0.16", "tokio", ] [[package]] name = "solana-feature-gate-interface" -version = "2.2.2" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43f5c5382b449e8e4e3016fb05e418c53d57782d8b5c30aa372fc265654b956d" +checksum = "7347ab62e6d47a82e340c865133795b394feea7c2b2771d293f57691c6544c3f" dependencies = [ "bincode", "serde", @@ -6486,23 +6803,9 @@ dependencies = [ "solana-system-interface", ] -[[package]] -name = "solana-feature-set" -version = "2.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92f6c09cc41059c0e03ccbee7f5d4cc0a315d68ef0d59b67eb90246adfd8cc35" -dependencies = [ - "ahash 0.8.11", - "lazy_static", - "solana-epoch-schedule", - "solana-hash", - "solana-pubkey", - "solana-sha256-hasher", -] - [[package]] name = "solana-fee" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "solana-fee-structure", @@ -6511,9 +6814,9 @@ dependencies = [ [[package]] name = "solana-fee-calculator" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d89bc408da0fb3812bc3008189d148b4d3e08252c79ad810b245482a3f70cd8d" +checksum = "2a73cc03ca4bed871ca174558108835f8323e85917bb38b9c81c7af2ab853efe" dependencies = [ "log", "serde", @@ -6522,21 +6825,19 @@ dependencies = [ [[package]] name = "solana-fee-structure" -version = "2.3.0" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33adf673581c38e810bf618f745bf31b683a0a4a4377682e6aaac5d9a058dd4e" +checksum = "5e2abdb1223eea8ec64136f39cb1ffcf257e00f915c957c35c0dd9e3f4e700b0" dependencies = [ "serde", "serde_derive", - "solana-message", - "solana-native-token", ] [[package]] name = "solana-file-download" -version = "2.2.2" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05a9744774fdbd7ae8575e5bd6d5df6946f321fb9b6019410b300a515369a37d" +checksum = "842227f0ae5ebffdfe686597a909cb406d2bd9b92432c516503b8cbd490a3ea6" dependencies = [ "console 0.15.11", "indicatif 0.17.12", @@ -6546,9 +6847,9 @@ dependencies = [ [[package]] name = "solana-genesis-config" -version = "2.3.0" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3725085d47b96d37fef07a29d78d2787fc89a0b9004c66eed7753d1e554989f" +checksum = "749eccc960e85c9b33608450093d256006253e1cb436b8380e71777840a3f675" dependencies = [ "bincode", "chrono", @@ -6563,7 +6864,6 @@ dependencies = [ "solana-hash", "solana-inflation", "solana-keypair", - "solana-logger", "solana-poh-config", "solana-pubkey", "solana-rent", @@ -6576,7 +6876,7 @@ dependencies = [ [[package]] name = "solana-genesis-utils" -version = "3.0.0" +version = "3.1.0" dependencies = [ "log", "solana-accounts-db", @@ -6588,7 +6888,7 @@ dependencies = [ [[package]] name = "solana-geyser-plugin-manager" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-geyser-plugin-interface", "bs58", @@ -6612,15 +6912,16 @@ dependencies = [ "solana-signature", "solana-transaction", "solana-transaction-status", - "thiserror 2.0.12", + "thiserror 2.0.16", "tokio", ] [[package]] name = "solana-gossip" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", + "agave-low-pass-filter", "arrayvec", "assert_matches", "bincode", @@ -6628,7 +6929,7 @@ dependencies = [ "clap", "crossbeam-channel", "flate2", - "indexmap 2.10.0", + "indexmap 2.11.4", "itertools 0.12.1", "log", "lru", @@ -6641,10 +6942,12 @@ dependencies = [ "serde_bytes", "serde_derive", "siphasher 1.0.1", + "solana-account", "solana-bloom", "solana-clap-utils", "solana-client", "solana-clock", + "solana-cluster-type", "solana-connection-cache", "solana-entry", "solana-epoch-schedule", @@ -6677,14 +6980,14 @@ dependencies = [ "solana-vote", "solana-vote-program", "static_assertions", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "solana-hard-forks" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c28371f878e2ead55611d8ba1b5fb879847156d04edea13693700ad1a28baf" +checksum = "0abacc4b66ce471f135f48f22facf75cbbb0f8a252fbe2c1e0aa59d5b203f519" dependencies = [ "serde", "serde_derive", @@ -6692,27 +6995,25 @@ dependencies = [ [[package]] name = "solana-hash" -version = "2.3.0" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5b96e9f0300fa287b545613f007dfe20043d7812bee255f418c1eb649c93b63" +checksum = "8a063723b9e84c14d8c0d2cdf0268207dc7adecf546e31251f9e07c7b00b566c" dependencies = [ - "borsh 1.5.7", + "borsh", "bytemuck", "bytemuck_derive", "five8", - "js-sys", "serde", "serde_derive", "solana-atomic-u64", "solana-sanitize", - "wasm-bindgen", ] [[package]] name = "solana-inflation" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23eef6a09eb8e568ce6839573e4966850e85e9ce71e6ae1a6c930c1c43947de3" +checksum = "e92f37a14e7c660628752833250dd3dcd8e95309876aee751d7f8769a27947c6" dependencies = [ "serde", "serde_derive", @@ -6720,31 +7021,41 @@ dependencies = [ [[package]] name = "solana-instruction" -version = "2.3.0" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47298e2ce82876b64f71e9d13a46bc4b9056194e7f9937ad3084385befa50885" +checksum = "8df4e8fcba01d7efa647ed20a081c234475df5e11a93acb4393cc2c9a7b99bab" dependencies = [ "bincode", - "borsh 1.5.7", - "getrandom 0.2.10", - "js-sys", - "num-traits", + "borsh", "serde", "serde_derive", - "solana-define-syscall", + "solana-define-syscall 3.0.0", + "solana-instruction-error", "solana-pubkey", - "wasm-bindgen", +] + +[[package]] +name = "solana-instruction-error" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f0d483b8ae387178d9210e0575b666b05cdd4bd0f2f188128249f6e454d39d" +dependencies = [ + "num-traits", + "serde", + "serde_derive", + "solana-program-error", ] [[package]] name = "solana-instructions-sysvar" -version = "2.2.2" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0e85a6fad5c2d0c4f5b91d34b8ca47118fc593af706e523cdbedf846a954f57" +checksum = "7ddf67876c541aa1e21ee1acae35c95c6fbc61119814bfef70579317a5e26955" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.9.4", "solana-account-info", "solana-instruction", + "solana-instruction-error", "solana-program-error", "solana-pubkey", "solana-sanitize", @@ -6755,40 +7066,38 @@ dependencies = [ [[package]] name = "solana-keccak-hasher" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7aeb957fbd42a451b99235df4942d96db7ef678e8d5061ef34c9b34cae12f79" +checksum = "57eebd3012946913c8c1b8b43cdf8a6249edb09c0b6be3604ae910332a3acd97" dependencies = [ "sha3", - "solana-define-syscall", + "solana-define-syscall 3.0.0", "solana-hash", - "solana-sanitize", ] [[package]] name = "solana-keypair" -version = "2.2.1" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dbb7042c2e0c561afa07242b2099d55c57bd1b1da3b6476932197d84e15e3e4" +checksum = "952ed9074c12edd2060cb09c2a8c664303f4ab7f7056a407ac37dd1da7bdaa3e" dependencies = [ - "bs58", - "ed25519-dalek", + "ed25519-dalek 2.2.0", "ed25519-dalek-bip32", - "rand 0.7.3", + "five8", + "rand 0.8.5", "solana-derivation-path", "solana-pubkey", "solana-seed-derivable", "solana-seed-phrase", "solana-signature", "solana-signer", - "wasm-bindgen", ] [[package]] name = "solana-last-restart-slot" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a6360ac2fdc72e7463565cd256eedcf10d7ef0c28a1249d261ec168c1b55cdd" +checksum = "dcda154ec827f5fc1e4da0af3417951b7e9b8157540f81f936c4a8b1156134d0" dependencies = [ "serde", "serde_derive", @@ -6799,7 +7108,7 @@ dependencies = [ [[package]] name = "solana-lattice-hash" -version = "3.0.0" +version = "3.1.0" dependencies = [ "base64 0.22.1", "blake3", @@ -6809,17 +7118,19 @@ dependencies = [ [[package]] name = "solana-ledger" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "agave-reserved-account-keys", "anyhow", "assert_matches", "bincode", - "bitflags 2.9.1", + "bitflags 2.9.4", + "bytes", "bzip2", "chrono", "chrono-humanize", + "conditional-mod", "crossbeam-channel", "dashmap", "eager", @@ -6862,6 +7173,7 @@ dependencies = [ "solana-metrics", "solana-native-token", "solana-net-utils", + "solana-nohash-hasher", "solana-packet", "solana-perf", "solana-program-runtime", @@ -6880,11 +7192,11 @@ dependencies = [ "solana-storage-proto", "solana-streamer", "solana-svm", + "solana-svm-timings", "solana-svm-transaction", "solana-system-interface", "solana-system-transaction", "solana-time-utils", - "solana-timings", "solana-transaction", "solana-transaction-context", "solana-transaction-error", @@ -6896,7 +7208,7 @@ dependencies = [ "strum_macros", "tar", "tempfile", - "thiserror 2.0.12", + "thiserror 2.0.16", "tokio", "tokio-stream", "trees", @@ -6904,23 +7216,9 @@ dependencies = [ [[package]] name = "solana-loader-v2-interface" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8ab08006dad78ae7cd30df8eea0539e207d08d91eaefb3e1d49a446e1c49654" -dependencies = [ - "serde", - "serde_bytes", - "serde_derive", - "solana-instruction", - "solana-pubkey", - "solana-sdk-ids", -] - -[[package]] -name = "solana-loader-v3-interface" version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa4be76cfa9afd84ca2f35ebc09f0da0f0092935ccdac0595d98447f259538c2" +checksum = "1e4a6f0ad4fd9c30679bfee2ce3ea6a449cac38049f210480b751f65676dfe82" dependencies = [ "serde", "serde_bytes", @@ -6928,14 +7226,13 @@ dependencies = [ "solana-instruction", "solana-pubkey", "solana-sdk-ids", - "solana-system-interface", ] [[package]] name = "solana-loader-v3-interface" -version = "5.0.0" +version = "6.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f7162a05b8b0773156b443bccd674ea78bb9aa406325b467ea78c06c99a63a2" +checksum = "dee44c9b1328c5c712c68966fb8de07b47f3e7bac006e74ddd1bb053d3e46e5d" dependencies = [ "serde", "serde_bytes", @@ -6948,9 +7245,9 @@ dependencies = [ [[package]] name = "solana-loader-v4-interface" -version = "2.2.1" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "706a777242f1f39a83e2a96a2a6cb034cb41169c6ecbee2cf09cb873d9659e7e" +checksum = "e4c948b33ff81fa89699911b207059e493defdba9647eaf18f23abdf3674e0fb" dependencies = [ "serde", "serde_bytes", @@ -6963,7 +7260,7 @@ dependencies = [ [[package]] name = "solana-loader-v4-program" -version = "3.0.0" +version = "3.1.0" dependencies = [ "log", "qualifier_attr", @@ -6971,31 +7268,24 @@ dependencies = [ "solana-bincode", "solana-bpf-loader-program", "solana-instruction", - "solana-loader-v3-interface 5.0.0", + "solana-loader-v3-interface", "solana-loader-v4-interface", - "solana-log-collector", - "solana-measure", "solana-packet", "solana-program-runtime", "solana-pubkey", "solana-sbpf", "solana-sdk-ids", + "solana-svm-log-collector", + "solana-svm-measure", + "solana-svm-type-overrides", "solana-transaction-context", - "solana-type-overrides", -] - -[[package]] -name = "solana-log-collector" -version = "3.0.0" -dependencies = [ - "log", ] [[package]] name = "solana-logger" -version = "2.3.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db8e777ec1afd733939b532a42492d888ec7c88d8b4127a5d867eb45c6eb5cd5" +checksum = "ef7421d1092680d72065edbf5c7605856719b021bf5f173656c71febcdd5d003" dependencies = [ "env_logger", "lazy_static", @@ -7006,11 +7296,11 @@ dependencies = [ [[package]] name = "solana-measure" -version = "3.0.0" +version = "3.1.0" [[package]] name = "solana-merkle-tree" -version = "3.0.0" +version = "3.1.0" dependencies = [ "fast-math", "solana-hash", @@ -7019,63 +7309,62 @@ dependencies = [ [[package]] name = "solana-message" -version = "2.4.0" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1796aabce376ff74bf89b78d268fa5e683d7d7a96a0a4e4813ec34de49d5314b" +checksum = "85666605c9fd727f865ed381665db0a8fc29f984a030ecc1e40f43bfb2541623" dependencies = [ "bincode", "blake3", "lazy_static", "serde", "serde_derive", - "solana-bincode", + "solana-address", "solana-hash", "solana-instruction", - "solana-pubkey", "solana-sanitize", "solana-sdk-ids", "solana-short-vec", - "solana-system-interface", "solana-transaction-error", - "wasm-bindgen", ] [[package]] name = "solana-metrics" -version = "3.0.0" +version = "3.1.0" dependencies = [ "crossbeam-channel", "gethostname", "log", - "reqwest 0.12.22", + "reqwest 0.12.23", "solana-cluster-type", "solana-sha256-hasher", "solana-time-utils", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "solana-msg" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f36a1a14399afaabc2781a1db09cb14ee4cc4ee5c7a5a3cfcc601811379a8092" +checksum = "264275c556ea7e22b9d3f87d56305546a38d4eee8ec884f3b126236cb7dcbbb4" dependencies = [ - "solana-define-syscall", + "solana-define-syscall 3.0.0", ] [[package]] name = "solana-native-token" -version = "2.2.2" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "307fb2f78060995979e9b4f68f833623565ed4e55d3725f100454ce78a99a1a3" +checksum = "ae8dd4c280dca9d046139eb5b7a5ac9ad10403fbd64964c7d7571214950d758f" [[package]] name = "solana-net-utils" -version = "3.0.0" +version = "3.1.0" dependencies = [ "anyhow", "bincode", "bytes", + "cfg-if 1.0.3", + "dashmap", "itertools 0.12.1", "log", "nix", @@ -7084,8 +7373,9 @@ dependencies = [ "serde_derive", "socket2 0.6.0", "solana-serde", + "solana-svm-type-overrides", "tokio", - "url 2.5.4", + "url 2.5.7", ] [[package]] @@ -7096,9 +7386,9 @@ checksum = "8b8a731ed60e89177c8a7ab05fe0f1511cedd3e70e773f288f9de33a9cfdc21e" [[package]] name = "solana-nonce" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "703e22eb185537e06204a5bd9d509b948f0066f2d1d814a6f475dafb3ddf1325" +checksum = "abbdc6c8caf1c08db9f36a50967539d0f72b9f1d4aea04fec5430f532e5afadc" dependencies = [ "serde", "serde_derive", @@ -7110,9 +7400,9 @@ dependencies = [ [[package]] name = "solana-nonce-account" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cde971a20b8dbf60144d6a84439dda86b5466e00e2843091fe731083cda614da" +checksum = "805fd25b29e5a1a0e6c3dd6320c9da80f275fbe4ff6e392617c303a2085c435e" dependencies = [ "solana-account", "solana-hash", @@ -7122,9 +7412,9 @@ dependencies = [ [[package]] name = "solana-offchain-message" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b526398ade5dea37f1f147ce55dae49aa017a5d7326606359b0445ca8d946581" +checksum = "f6e2a1141a673f72a05cf406b99e4b2b8a457792b7c01afa07b3f00d4e2de393" dependencies = [ "num_enum", "solana-hash", @@ -7137,12 +7427,12 @@ dependencies = [ [[package]] name = "solana-packet" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "004f2d2daf407b3ec1a1ca5ec34b3ccdfd6866dd2d3c7d0715004a96e4b6d127" +checksum = "6edf2f25743c95229ac0fdc32f8f5893ef738dbf332c669e9861d33ddb0f469d" dependencies = [ "bincode", - "bitflags 2.9.1", + "bitflags 2.9.4", "cfg_eval", "serde", "serde_derive", @@ -7151,7 +7441,7 @@ dependencies = [ [[package]] name = "solana-perf" -version = "3.0.0" +version = "3.1.0" dependencies = [ "ahash 0.8.11", "bincode", @@ -7181,7 +7471,7 @@ dependencies = [ [[package]] name = "solana-poh" -version = "3.0.0" +version = "3.1.0" dependencies = [ "arc-swap", "core_affinity", @@ -7199,14 +7489,14 @@ dependencies = [ "solana-runtime", "solana-time-utils", "solana-transaction", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "solana-poh-config" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d650c3b4b9060082ac6b0efbbb66865089c58405bfb45de449f3f2b91eccee75" +checksum = "2f1fef1f2ff2480fdbcc64bef5e3c47bec6e1647270db88b43f23e3a55f8d9cf" dependencies = [ "serde", "serde_derive", @@ -7214,29 +7504,28 @@ dependencies = [ [[package]] name = "solana-poseidon" -version = "3.0.0" +version = "3.1.0" dependencies = [ "ark-bn254", "light-poseidon", - "solana-define-syscall", - "thiserror 2.0.12", + "solana-define-syscall 3.0.0", + "thiserror 2.0.16", ] [[package]] name = "solana-precompile-error" -version = "2.2.2" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d87b2c1f5de77dfe2b175ee8dd318d196aaca4d0f66f02842f80c852811f9f8" +checksum = "cafcd950de74c6c39d55dc8ca108bbb007799842ab370ef26cf45a34453c31e1" dependencies = [ "num-traits", - "solana-decode-error", ] [[package]] name = "solana-presigner" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81a57a24e6a4125fc69510b6774cd93402b943191b6cddad05de7281491c90fe" +checksum = "0f704eaf825be3180832445b9e4983b875340696e8e7239bf2d535b0f86c14a2" dependencies = [ "solana-pubkey", "solana-signature", @@ -7245,57 +7534,31 @@ dependencies = [ [[package]] name = "solana-program" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "586469467e93ceb79048f8d8e3a619bf61d05396ee7de95cb40280301a589d05" +checksum = "91b12305dd81045d705f427acd0435a2e46444b65367d7179d7bdcfc3bc5f5eb" dependencies = [ - "bincode", - "blake3", - "borsh 0.10.3", - "borsh 1.5.7", - "bs58", - "bytemuck", - "console_error_panic_hook", - "console_log", - "getrandom 0.2.10", - "lazy_static", - "log", "memoffset 0.9.0", - "num-bigint 0.4.6", - "num-derive", - "num-traits", - "rand 0.8.5", - "serde", - "serde_bytes", - "serde_derive", "solana-account-info", - "solana-address-lookup-table-interface", - "solana-atomic-u64", "solana-big-mod-exp", - "solana-bincode", "solana-blake3-hasher", "solana-borsh", "solana-clock", "solana-cpi", - "solana-decode-error", - "solana-define-syscall", + "solana-define-syscall 3.0.0", "solana-epoch-rewards", "solana-epoch-schedule", + "solana-epoch-stake", "solana-example-mocks", - "solana-feature-gate-interface", "solana-fee-calculator", "solana-hash", "solana-instruction", + "solana-instruction-error", "solana-instructions-sysvar", "solana-keccak-hasher", "solana-last-restart-slot", - "solana-loader-v2-interface", - "solana-loader-v3-interface 3.0.0", - "solana-loader-v4-interface", - "solana-message", "solana-msg", "solana-native-token", - "solana-nonce", "solana-program-entrypoint", "solana-program-error", "solana-program-memory", @@ -7303,9 +7566,7 @@ dependencies = [ "solana-program-pack", "solana-pubkey", "solana-rent", - "solana-sanitize", "solana-sdk-ids", - "solana-sdk-macro", "solana-secp256k1-recover", "solana-serde-varint", "solana-serialize-utils", @@ -7314,22 +7575,32 @@ dependencies = [ "solana-slot-hashes", "solana-slot-history", "solana-stable-layout", - "solana-stake-interface", - "solana-system-interface", "solana-sysvar", "solana-sysvar-id", - "solana-vote-interface", - "thiserror 2.0.12", - "wasm-bindgen", +] + +[[package]] +name = "solana-program-binaries" +version = "3.1.0" +dependencies = [ + "bincode", + "serde", + "solana-account", + "solana-loader-v3-interface", + "solana-pubkey", + "solana-rent", + "solana-sdk-ids", + "spl-generic-token", ] [[package]] name = "solana-program-entrypoint" -version = "2.3.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32ce041b1a0ed275290a5008ee1a4a6c48f5054c8a3d78d313c08958a06aedbd" +checksum = "6557cf5b5e91745d1667447438a1baa7823c6086e4ece67f8e6ebfa7a8f72660" dependencies = [ "solana-account-info", + "solana-define-syscall 3.0.0", "solana-msg", "solana-program-error", "solana-pubkey", @@ -7337,58 +7608,52 @@ dependencies = [ [[package]] name = "solana-program-error" -version = "2.2.2" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ee2e0217d642e2ea4bee237f37bd61bb02aec60da3647c48ff88f6556ade775" +checksum = "a1af32c995a7b692a915bb7414d5f8e838450cf7c70414e763d8abcae7b51f28" dependencies = [ - "borsh 1.5.7", - "num-traits", + "borsh", "serde", "serde_derive", - "solana-decode-error", - "solana-instruction", - "solana-msg", - "solana-pubkey", ] [[package]] name = "solana-program-memory" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b0268f6c89825fb634a34bd0c3b8fdaeaecfc3728be1d622a8ee6dd577b60d4" +checksum = "10e5660c60749c7bfb30b447542529758e4dbcecd31b1e8af1fdc92e2bdde90a" dependencies = [ - "num-traits", - "solana-define-syscall", + "solana-define-syscall 3.0.0", ] [[package]] name = "solana-program-option" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc677a2e9bc616eda6dbdab834d463372b92848b2bfe4a1ed4e4b4adba3397d0" +checksum = "8e7b4ddb464f274deb4a497712664c3b612e3f5f82471d4e47710fc4ab1c3095" [[package]] name = "solana-program-pack" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "319f0ef15e6e12dc37c597faccb7d62525a509fec5f6975ecb9419efddeb277b" +checksum = "c169359de21f6034a63ebf96d6b380980307df17a8d371344ff04a883ec4e9d0" dependencies = [ "solana-program-error", ] [[package]] name = "solana-program-runtime" -version = "3.0.0" +version = "3.1.0" dependencies = [ "base64 0.22.1", "bincode", - "enum-iterator", "itertools 0.12.1", "log", "percentage", "rand 0.8.5", "serde", "solana-account", + "solana-account-info", "solana-clock", "solana-epoch-rewards", "solana-epoch-schedule", @@ -7396,9 +7661,7 @@ dependencies = [ "solana-hash", "solana-instruction", "solana-last-restart-slot", - "solana-log-collector", - "solana-measure", - "solana-metrics", + "solana-loader-v3-interface", "solana-program-entrypoint", "solana-pubkey", "solana-rent", @@ -7406,21 +7669,24 @@ dependencies = [ "solana-sdk-ids", "solana-slot-hashes", "solana-stable-layout", + "solana-stake-interface", "solana-svm-callback", "solana-svm-feature-set", + "solana-svm-log-collector", + "solana-svm-measure", + "solana-svm-timings", "solana-svm-transaction", + "solana-svm-type-overrides", "solana-system-interface", "solana-sysvar", "solana-sysvar-id", - "solana-timings", "solana-transaction-context", - "solana-type-overrides", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "solana-program-test" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "assert_matches", @@ -7438,6 +7704,7 @@ dependencies = [ "solana-banks-interface", "solana-banks-server", "solana-clock", + "solana-cluster-type", "solana-commitment-config", "solana-compute-budget", "solana-epoch-rewards", @@ -7447,13 +7714,13 @@ dependencies = [ "solana-hash", "solana-instruction", "solana-keypair", - "solana-loader-v3-interface 5.0.0", - "solana-log-collector", + "solana-loader-v3-interface", "solana-logger", "solana-message", "solana-msg", "solana-native-token", "solana-poh-config", + "solana-program-binaries", "solana-program-entrypoint", "solana-program-error", "solana-program-runtime", @@ -7466,49 +7733,33 @@ dependencies = [ "solana-stable-layout", "solana-stake-interface", "solana-svm", + "solana-svm-log-collector", + "solana-svm-timings", "solana-system-interface", "solana-sysvar", "solana-sysvar-id", - "solana-timings", "solana-transaction", "solana-transaction-context", "solana-transaction-error", "solana-vote-program", "spl-generic-token", - "thiserror 2.0.12", + "thiserror 2.0.16", "tokio", ] [[package]] name = "solana-pubkey" -version = "2.4.0" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b62adb9c3261a052ca1f999398c388f1daf558a1b492f60a6d9e64857db4ff1" +checksum = "8909d399deb0851aa524420beeb5646b115fd253ef446e35fe4504c904da3941" dependencies = [ - "borsh 0.10.3", - "borsh 1.5.7", - "bytemuck", - "bytemuck_derive", - "curve25519-dalek 4.1.3", - "five8", - "five8_const", - "getrandom 0.2.10", - "js-sys", - "num-traits", "rand 0.8.5", - "serde", - "serde_derive", - "solana-atomic-u64", - "solana-decode-error", - "solana-define-syscall", - "solana-sanitize", - "solana-sha256-hasher", - "wasm-bindgen", + "solana-address", ] [[package]] name = "solana-pubsub-client" -version = "3.0.0" +version = "3.1.0" dependencies = [ "crossbeam-channel", "futures-util", @@ -7523,17 +7774,17 @@ dependencies = [ "solana-pubkey", "solana-rpc-client-types", "solana-signature", - "thiserror 2.0.12", + "thiserror 2.0.16", "tokio", "tokio-stream", "tokio-tungstenite", "tungstenite", - "url 2.5.4", + "url 2.5.7", ] [[package]] name = "solana-quic-client" -version = "3.0.0" +version = "3.1.0" dependencies = [ "async-lock", "async-trait", @@ -7542,7 +7793,7 @@ dependencies = [ "log", "quinn", "quinn-proto", - "rustls 0.23.31", + "rustls 0.23.32", "solana-connection-cache", "solana-keypair", "solana-measure", @@ -7555,22 +7806,22 @@ dependencies = [ "solana-streamer", "solana-tls-utils", "solana-transaction-error", - "thiserror 2.0.12", + "thiserror 2.0.16", "tokio", ] [[package]] name = "solana-quic-definitions" -version = "2.3.0" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7011ee2af2baad991762b6d63ea94b08d06f7928effb76ce273b232c9902c205" +checksum = "15319accf7d3afd845817aeffa6edd8cc185f135cefbc6b985df29cfd8c09609" dependencies = [ "solana-keypair", ] [[package]] name = "solana-rayon-threadlimit" -version = "3.0.0" +version = "3.1.0" dependencies = [ "log", "num_cpus", @@ -7578,9 +7829,9 @@ dependencies = [ [[package]] name = "solana-remote-wallet" -version = "3.0.0" +version = "3.1.0" dependencies = [ - "console 0.16.0", + "console 0.16.1", "dialoguer", "hidapi", "log", @@ -7594,45 +7845,28 @@ dependencies = [ "solana-pubkey", "solana-signature", "solana-signer", - "thiserror 2.0.12", + "thiserror 2.0.16", "uriparse", ] [[package]] name = "solana-rent" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1aea8fdea9de98ca6e8c2da5827707fb3842833521b528a713810ca685d2480" -dependencies = [ - "serde", - "serde_derive", - "solana-sdk-ids", - "solana-sdk-macro", - "solana-sysvar-id", -] - -[[package]] -name = "solana-rent-collector" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c1e19f5d5108b0d824244425e43bc78bbb9476e2199e979b0230c9f632d3bf4" -dependencies = [ - "serde", - "serde_derive", - "solana-account", - "solana-clock", - "solana-epoch-schedule", - "solana-genesis-config", - "solana-pubkey", - "solana-rent", +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b702d8c43711e3c8a9284a4f1bbc6a3de2553deb25b0c8142f9a44ef0ce5ddc1" +dependencies = [ + "serde", + "serde_derive", "solana-sdk-ids", + "solana-sdk-macro", + "solana-sysvar-id", ] [[package]] name = "solana-reward-info" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18205b69139b1ae0ab8f6e11cdcb627328c0814422ad2482000fa2ca54ae4a2f" +checksum = "82be7946105c2ee6be9f9ee7bd18a068b558389221d29efa92b906476102bfcc" dependencies = [ "serde", "serde_derive", @@ -7640,7 +7874,7 @@ dependencies = [ [[package]] name = "solana-rpc" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "base64 0.22.1", @@ -7665,6 +7899,7 @@ dependencies = [ "solana-account", "solana-account-decoder", "solana-accounts-db", + "solana-cli-output", "solana-client", "solana-clock", "solana-commitment-config", @@ -7717,14 +7952,14 @@ dependencies = [ "spl-token-2022-interface", "spl-token-interface", "stream-cancel", - "thiserror 2.0.12", + "thiserror 2.0.16", "tokio", "tokio-util 0.7.16", ] [[package]] name = "solana-rpc-client" -version = "3.0.0" +version = "3.1.0" dependencies = [ "async-trait", "base64 0.22.1", @@ -7733,7 +7968,7 @@ dependencies = [ "futures 0.3.31", "indicatif 0.18.0", "log", - "reqwest 0.12.22", + "reqwest 0.12.23", "reqwest-middleware", "semver", "serde", @@ -7762,11 +7997,11 @@ dependencies = [ [[package]] name = "solana-rpc-client-api" -version = "3.0.0" +version = "3.1.0" dependencies = [ "anyhow", "jsonrpc-core", - "reqwest 0.12.22", + "reqwest 0.12.23", "reqwest-middleware", "serde", "serde_derive", @@ -7777,12 +8012,12 @@ dependencies = [ "solana-signer", "solana-transaction-error", "solana-transaction-status-client-types", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "solana-rpc-client-nonce-utils" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-account", "solana-commitment-config", @@ -7792,12 +8027,12 @@ dependencies = [ "solana-pubkey", "solana-rpc-client", "solana-sdk-ids", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "solana-rpc-client-types" -version = "3.0.0" +version = "3.1.0" dependencies = [ "base64 0.22.1", "bs58", @@ -7807,21 +8042,21 @@ dependencies = [ "serde_json", "solana-account", "solana-account-decoder-client-types", + "solana-address", "solana-clock", "solana-commitment-config", "solana-fee-calculator", "solana-inflation", - "solana-pubkey", "solana-transaction-error", "solana-transaction-status-client-types", "solana-version", "spl-generic-token", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "solana-runtime" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "agave-precompiles", @@ -7846,7 +8081,7 @@ dependencies = [ "libc", "log", "lz4", - "memmap2 0.9.7", + "memmap2 0.9.8", "mockall", "modular-bitfield", "num-derive", @@ -7858,6 +8093,7 @@ dependencies = [ "rand 0.8.5", "rayon", "regex", + "semver", "serde", "serde_derive", "serde_json", @@ -7866,11 +8102,13 @@ dependencies = [ "solana-account-info", "solana-accounts-db", "solana-address-lookup-table-interface", + "solana-bls-signatures", "solana-bpf-loader-program", "solana-bucket-map", "solana-builtins", "solana-client-traits", "solana-clock", + "solana-cluster-type", "solana-commitment-config", "solana-compute-budget", "solana-compute-budget-instruction", @@ -7892,7 +8130,7 @@ dependencies = [ "solana-instruction", "solana-keypair", "solana-lattice-hash", - "solana-loader-v3-interface 5.0.0", + "solana-loader-v3-interface", "solana-loader-v4-interface", "solana-measure", "solana-message", @@ -7909,7 +8147,6 @@ dependencies = [ "solana-pubkey", "solana-rayon-threadlimit", "solana-rent", - "solana-rent-collector", "solana-reward-info", "solana-runtime-transaction", "solana-sdk-ids", @@ -7925,6 +8162,7 @@ dependencies = [ "solana-stake-program", "solana-svm", "solana-svm-callback", + "solana-svm-timings", "solana-svm-transaction", "solana-system-interface", "solana-system-program", @@ -7932,7 +8170,6 @@ dependencies = [ "solana-sysvar", "solana-sysvar-id", "solana-time-utils", - "solana-timings", "solana-transaction", "solana-transaction-context", "solana-transaction-error", @@ -7949,13 +8186,13 @@ dependencies = [ "symlink", "tar", "tempfile", - "thiserror 2.0.12", + "thiserror 2.0.16", "zstd", ] [[package]] name = "solana-runtime-transaction" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-transaction-view", "log", @@ -7969,25 +8206,25 @@ dependencies = [ "solana-svm-transaction", "solana-transaction", "solana-transaction-error", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "solana-sanitize" -version = "2.2.1" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61f1bc1357b8188d9c4a3af3fc55276e56987265eb7ad073ae6f8180ee54cecf" +checksum = "dcf09694a0fc14e5ffb18f9b7b7c0f15ecb6eac5b5610bf76a1853459d19daf9" [[package]] name = "solana-sbf-programs" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "agave-reserved-account-keys", "agave-syscalls", "agave-validator", "bincode", - "borsh 1.5.7", + "borsh", "byteorder 1.5.0", "elf", "itertools 0.12.1", @@ -8002,6 +8239,7 @@ dependencies = [ "solana-cli-output", "solana-client-traits", "solana-clock", + "solana-cluster-type", "solana-compute-budget", "solana-compute-budget-instruction", "solana-compute-budget-interface", @@ -8013,9 +8251,8 @@ dependencies = [ "solana-instruction", "solana-keypair", "solana-ledger", - "solana-loader-v3-interface 5.0.0", + "solana-loader-v3-interface", "solana-loader-v4-interface", - "solana-log-collector", "solana-logger", "solana-measure", "solana-message", @@ -8036,22 +8273,24 @@ dependencies = [ "solana-svm", "solana-svm-callback", "solana-svm-feature-set", + "solana-svm-log-collector", + "solana-svm-timings", "solana-svm-transaction", + "solana-svm-type-overrides", "solana-system-interface", "solana-sysvar", - "solana-timings", "solana-transaction", "solana-transaction-context", "solana-transaction-error", "solana-transaction-status", - "solana-type-overrides", "solana-vote", "solana-vote-program", + "test-case", ] [[package]] name = "solana-sbf-rust-128bit" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-program-entrypoint", "solana-sbf-rust-128bit-dep", @@ -8059,11 +8298,11 @@ dependencies = [ [[package]] name = "solana-sbf-rust-128bit-dep" -version = "3.0.0" +version = "3.1.0" [[package]] name = "solana-sbf-rust-account-mem" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-account-info", "solana-program-entrypoint", @@ -8074,7 +8313,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-account-mem-deprecated" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-account-info", "solana-program", @@ -8085,7 +8324,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-alloc" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-msg", "solana-program", @@ -8095,7 +8334,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-alt-bn128" -version = "3.0.0" +version = "3.1.0" dependencies = [ "array-bytes", "solana-bn254", @@ -8105,7 +8344,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-alt-bn128-compression" -version = "3.0.0" +version = "3.1.0" dependencies = [ "array-bytes", "solana-bn254", @@ -8115,7 +8354,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-big-mod-exp" -version = "3.0.0" +version = "3.1.0" dependencies = [ "array-bytes", "serde", @@ -8128,9 +8367,9 @@ dependencies = [ [[package]] name = "solana-sbf-rust-call-args" -version = "3.0.0" +version = "3.1.0" dependencies = [ - "borsh 1.5.7", + "borsh", "solana-account-info", "solana-program", "solana-program-entrypoint", @@ -8140,7 +8379,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-call-depth" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-msg", "solana-program", @@ -8150,7 +8389,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-caller-access" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-account-info", "solana-instruction", @@ -8163,16 +8402,16 @@ dependencies = [ [[package]] name = "solana-sbf-rust-curve25519" -version = "3.0.0" +version = "3.1.0" dependencies = [ - "solana-curve25519 3.0.0", + "solana-curve25519 3.1.0", "solana-msg", "solana-program-entrypoint", ] [[package]] name = "solana-sbf-rust-custom-heap" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-account-info", "solana-msg", @@ -8183,7 +8422,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-dep-crate" -version = "3.0.0" +version = "3.1.0" dependencies = [ "byteorder 1.5.0", "solana-program-entrypoint", @@ -8191,7 +8430,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-deprecated-loader" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-account-info", "solana-instruction", @@ -8207,7 +8446,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-divide-by-zero" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-account-info", "solana-program-entrypoint", @@ -8217,7 +8456,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-dup-accounts" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-account-info", "solana-instruction", @@ -8230,7 +8469,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-error-handling" -version = "3.0.0" +version = "3.1.0" dependencies = [ "num-derive", "num-traits", @@ -8244,7 +8483,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-external-spend" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-account-info", "solana-program-entrypoint", @@ -8254,7 +8493,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-get-minimum-delegation" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-account-info", "solana-msg", @@ -8266,7 +8505,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-inner_instruction_alignment_check" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-account-info", "solana-instruction", @@ -8279,7 +8518,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-instruction-introspection" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-account-info", "solana-instruction", @@ -8294,7 +8533,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-invoke" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-account-info", "solana-instruction", @@ -8313,7 +8552,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-invoke-and-error" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-account-info", "solana-instruction", @@ -8325,7 +8564,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-invoke-and-ok" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-account-info", "solana-instruction", @@ -8337,7 +8576,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-invoke-and-return" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-account-info", "solana-instruction", @@ -8349,11 +8588,11 @@ dependencies = [ [[package]] name = "solana-sbf-rust-invoke-dep" -version = "3.0.0" +version = "3.1.0" [[package]] name = "solana-sbf-rust-invoked" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-account-info", "solana-msg", @@ -8369,7 +8608,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-invoked-dep" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-instruction", "solana-pubkey", @@ -8377,7 +8616,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-iter" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-program", "solana-program-entrypoint", @@ -8385,7 +8624,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-log-data" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-account-info", "solana-program", @@ -8397,7 +8636,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-many-args" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-msg", "solana-program", @@ -8407,7 +8646,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-many-args-dep" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-msg", "solana-program", @@ -8415,7 +8654,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-mem" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-account-info", "solana-program-entrypoint", @@ -8427,11 +8666,11 @@ dependencies = [ [[package]] name = "solana-sbf-rust-mem-dep" -version = "3.0.0" +version = "3.1.0" [[package]] name = "solana-sbf-rust-membuiltins" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-program-entrypoint", "solana-sbf-rust-mem-dep", @@ -8439,7 +8678,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-noop" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-account-info", "solana-program-entrypoint", @@ -8449,7 +8688,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-panic" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-account-info", "solana-msg", @@ -8460,7 +8699,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-param-passing" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-program", "solana-program-entrypoint", @@ -8469,11 +8708,11 @@ dependencies = [ [[package]] name = "solana-sbf-rust-param-passing-dep" -version = "3.0.0" +version = "3.1.0" [[package]] name = "solana-sbf-rust-poseidon" -version = "3.0.0" +version = "3.1.0" dependencies = [ "array-bytes", "solana-msg", @@ -8481,9 +8720,17 @@ dependencies = [ "solana-program-entrypoint", ] +[[package]] +name = "solana-sbf-rust-r2-instruction-data-pointer" +version = "3.1.0" +dependencies = [ + "solana-cpi", + "solana-program-entrypoint", +] + [[package]] name = "solana-sbf-rust-rand" -version = "3.0.0" +version = "3.1.0" dependencies = [ "getrandom 0.2.10", "rand 0.8.5", @@ -8496,7 +8743,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-realloc" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-account-info", "solana-msg", @@ -8510,7 +8757,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-realloc-dep" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-instruction", "solana-pubkey", @@ -8518,7 +8765,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-realloc-invoke" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-account-info", "solana-instruction", @@ -8534,11 +8781,11 @@ dependencies = [ [[package]] name = "solana-sbf-rust-realloc-invoke-dep" -version = "3.0.0" +version = "3.1.0" [[package]] name = "solana-sbf-rust-remaining-compute-units" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-account-info", "solana-msg", @@ -8550,7 +8797,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-ro-account_modify" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-account-info", "solana-instruction", @@ -8563,7 +8810,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-ro-modify" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-account-info", "solana-msg", @@ -8576,7 +8823,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-sanity" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-account-info", "solana-msg", @@ -8589,9 +8836,11 @@ dependencies = [ [[package]] name = "solana-sbf-rust-secp256k1-recover" -version = "3.0.0" +version = "3.1.0" dependencies = [ "libsecp256k1 0.7.0", + "sha3", + "solana-hash", "solana-keccak-hasher", "solana-msg", "solana-program-entrypoint", @@ -8600,10 +8849,13 @@ dependencies = [ [[package]] name = "solana-sbf-rust-sha" -version = "3.0.0" +version = "3.1.0" dependencies = [ "blake3", + "sha2 0.10.9", + "sha3", "solana-blake3-hasher", + "solana-hash", "solana-keccak-hasher", "solana-msg", "solana-program-entrypoint", @@ -8612,7 +8864,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-sibling-inner-instructions" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-account-info", "solana-instruction", @@ -8624,7 +8876,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-sibling-instructions" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-account-info", "solana-instruction", @@ -8637,7 +8889,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-simulation" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-account-info", "solana-clock", @@ -8650,7 +8902,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-spoof1" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-account-info", "solana-instruction", @@ -8664,7 +8916,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-spoof1-system" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-account-info", "solana-program-entrypoint", @@ -8674,11 +8926,11 @@ dependencies = [ [[package]] name = "solana-sbf-rust-sysvar" -version = "3.0.0" +version = "3.1.0" dependencies = [ "bincode", "solana-account-info", - "solana-define-syscall", + "solana-define-syscall 3.0.0", "solana-instruction", "solana-instructions-sysvar", "solana-msg", @@ -8686,12 +8938,13 @@ dependencies = [ "solana-program-error", "solana-pubkey", "solana-sdk-ids", + "solana-stake-interface", "solana-sysvar", ] [[package]] name = "solana-sbf-rust-upgradeable" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-account-info", "solana-msg", @@ -8703,7 +8956,7 @@ dependencies = [ [[package]] name = "solana-sbf-rust-upgraded" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-account-info", "solana-msg", @@ -8715,7 +8968,7 @@ dependencies = [ [[package]] name = "solana-sbf-syscall-get-epoch-stake" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-account-info", "solana-msg", @@ -8727,9 +8980,9 @@ dependencies = [ [[package]] name = "solana-sbpf" -version = "0.12.0" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c7a3d3cff34df928b804917bf111d3ede779af406703580cd7ed8fb239f5acf" +checksum = "0f224d906c14efc7ed7f42bc5fe9588f3f09db8cabe7f6023adda62a69678e1a" dependencies = [ "byteorder 1.5.0", "combine 3.8.1", @@ -8738,24 +8991,24 @@ dependencies = [ "log", "rand 0.8.5", "rustc-demangle", - "thiserror 2.0.12", + "thiserror 2.0.16", "winapi 0.3.9", ] [[package]] name = "solana-sdk-ids" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5d8b9cc68d5c88b062a33e23a6466722467dde0035152d8fb1afbcdf350a5f" +checksum = "b1b6d6aaf60669c592838d382266b173881c65fb1cdec83b37cb8ce7cb89f9ad" dependencies = [ "solana-pubkey", ] [[package]] name = "solana-sdk-macro" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86280da8b99d03560f6ab5aca9de2e38805681df34e0bb8f238e69b29433b9df" +checksum = "d6430000e97083460b71d9fbadc52a2ab2f88f53b3a4c5e58c5ae3640a0e8c00" dependencies = [ "bs58", "proc-macro2", @@ -8765,12 +9018,12 @@ dependencies = [ [[package]] name = "solana-secp256k1-program" -version = "2.2.3" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f19833e4bc21558fe9ec61f239553abe7d05224347b57d65c2218aeeb82d6149" +checksum = "8efa767b0188f577edae7080e8bf080e5db9458e2b6ee5beaa73e2e6bb54e99d" dependencies = [ "digest 0.10.7", - "libsecp256k1 0.6.0", + "k256", "serde", "serde_derive", "sha3", @@ -8779,43 +9032,41 @@ dependencies = [ [[package]] name = "solana-secp256k1-recover" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baa3120b6cdaa270f39444f5093a90a7b03d296d362878f7a6991d6de3bbe496" +checksum = "394a4470477d66296af5217970a905b1c5569032a7732c367fb69e5666c8607e" dependencies = [ - "libsecp256k1 0.6.0", - "solana-define-syscall", - "thiserror 2.0.12", + "k256", + "solana-define-syscall 3.0.0", + "thiserror 2.0.16", ] [[package]] name = "solana-secp256r1-program" -version = "2.2.4" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce0ae46da3071a900f02d367d99b2f3058fe2e90c5062ac50c4f20cfedad8f0f" +checksum = "445d8e12592631d76fc4dc57858bae66c9fd7cc838c306c62a472547fc9d0ce6" dependencies = [ "bytemuck", "openssl", - "solana-feature-set", "solana-instruction", - "solana-precompile-error", "solana-sdk-ids", ] [[package]] name = "solana-seed-derivable" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3beb82b5adb266c6ea90e5cf3967235644848eac476c5a1f2f9283a143b7c97f" +checksum = "ff7bdb72758e3bec33ed0e2658a920f1f35dfb9ed576b951d20d63cb61ecd95c" dependencies = [ "solana-derivation-path", ] [[package]] name = "solana-seed-phrase" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36187af2324f079f65a675ec22b31c24919cb4ac22c79472e85d819db9bbbc15" +checksum = "dc905b200a95f2ea9146e43f2a7181e3aeb55de6bc12afb36462d00a3c7310de" dependencies = [ "hmac 0.12.1", "pbkdf2 0.11.0", @@ -8824,7 +9075,7 @@ dependencies = [ [[package]] name = "solana-send-transaction-service" -version = "3.0.0" +version = "3.1.0" dependencies = [ "async-trait", "crossbeam-channel", @@ -8850,58 +9101,58 @@ dependencies = [ [[package]] name = "solana-serde" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1931484a408af466e14171556a47adaa215953c7f48b24e5f6b0282763818b04" +checksum = "709a93cab694c70f40b279d497639788fc2ccbcf9b4aa32273d4b361322c02dd" dependencies = [ "serde", ] [[package]] name = "solana-serde-varint" -version = "2.2.2" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a7e155eba458ecfb0107b98236088c3764a09ddf0201ec29e52a0be40857113" +checksum = "3e5174c57d5ff3c1995f274d17156964664566e2cde18a07bba1586d35a70d3b" dependencies = [ "serde", ] [[package]] name = "solana-serialize-utils" -version = "2.2.1" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "817a284b63197d2b27afdba829c5ab34231da4a9b4e763466a003c40ca4f535e" +checksum = "56e41dd8feea239516c623a02f0a81c2367f4b604d7965237fed0751aeec33ed" dependencies = [ - "solana-instruction", + "solana-instruction-error", "solana-pubkey", "solana-sanitize", ] [[package]] name = "solana-sha256-hasher" -version = "2.3.0" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aa3feb32c28765f6aa1ce8f3feac30936f16c5c3f7eb73d63a5b8f6f8ecdc44" +checksum = "a9b912ba6f71cb202c0c3773ec77bf898fa9fe0c78691a2d6859b3b5b8954719" dependencies = [ "sha2 0.10.9", - "solana-define-syscall", + "solana-define-syscall 3.0.0", "solana-hash", ] [[package]] name = "solana-short-vec" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c54c66f19b9766a56fa0057d060de8378676cb64987533fa088861858fc5a69" +checksum = "b69d029da5428fc1c57f7d49101b2077c61f049d4112cd5fb8456567cc7d2638" dependencies = [ "serde", ] [[package]] name = "solana-shred-version" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afd3db0461089d1ad1a78d9ba3f15b563899ca2386351d38428faa5350c60a98" +checksum = "94953e22ca28fe4541a3447d6baeaf519cc4ddc063253bfa673b721f34c136bb" dependencies = [ "solana-hard-forks", "solana-hash", @@ -8910,11 +9161,11 @@ dependencies = [ [[package]] name = "solana-signature" -version = "2.3.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64c8ec8e657aecfc187522fc67495142c12f35e55ddeca8698edbb738b8dbd8c" +checksum = "4bb8057cc0e9f7b5e89883d49de6f407df655bb6f3a71d0b7baf9986a2218fd9" dependencies = [ - "ed25519-dalek", + "ed25519-dalek 2.2.0", "five8", "serde", "serde-big-array", @@ -8924,20 +9175,31 @@ dependencies = [ [[package]] name = "solana-signer" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c41991508a4b02f021c1342ba00bcfa098630b213726ceadc7cb032e051975b" +checksum = "5bfea97951fee8bae0d6038f39a5efcb6230ecdfe33425ac75196d1a1e3e3235" dependencies = [ "solana-pubkey", "solana-signature", "solana-transaction-error", ] +[[package]] +name = "solana-signer-store" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36329bba208f0e41954389ae4ad5d973fe15952672cfd71a9b49deb7d2ecbc2f" +dependencies = [ + "bitvec", + "num-derive", + "num-traits", +] + [[package]] name = "solana-slot-hashes" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c8691982114513763e88d04094c9caa0376b867a29577939011331134c301ce" +checksum = "80a293f952293281443c04f4d96afd9d547721923d596e92b4377ed2360f1746" dependencies = [ "serde", "serde_derive", @@ -8948,9 +9210,9 @@ dependencies = [ [[package]] name = "solana-slot-history" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97ccc1b2067ca22754d5283afb2b0126d61eae734fc616d23871b0943b0d935e" +checksum = "f914f6b108f5bba14a280b458d023e3621c9973f27f015a4d755b50e88d89e97" dependencies = [ "bv", "serde", @@ -8961,9 +9223,9 @@ dependencies = [ [[package]] name = "solana-stable-layout" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f14f7d02af8f2bc1b5efeeae71bc1c2b7f0f65cd75bcc7d8180f2c762a57f54" +checksum = "1da74507795b6e8fb60b7c7306c0c36e2c315805d16eaaf479452661234685ac" dependencies = [ "solana-instruction", "solana-pubkey", @@ -8971,55 +9233,47 @@ dependencies = [ [[package]] name = "solana-stake-interface" -version = "1.2.1" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5269e89fde216b4d7e1d1739cf5303f8398a1ff372a81232abbee80e554a838c" +checksum = "f6f912ae679b683365348dea482dbd9468d22ff258b554fd36e3d3683c2122e3" dependencies = [ - "borsh 0.10.3", - "borsh 1.5.7", "num-traits", "serde", "serde_derive", "solana-clock", "solana-cpi", - "solana-decode-error", "solana-instruction", "solana-program-error", "solana-pubkey", "solana-system-interface", + "solana-sysvar", "solana-sysvar-id", ] [[package]] name = "solana-stake-program" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "bincode", "log", "solana-account", - "solana-bincode", "solana-clock", - "solana-config-program-client", + "solana-config-interface", "solana-genesis-config", - "solana-instruction", - "solana-log-collector", "solana-native-token", - "solana-packet", - "solana-program-runtime", "solana-pubkey", "solana-rent", "solana-sdk-ids", "solana-stake-interface", "solana-sysvar", "solana-transaction-context", - "solana-type-overrides", "solana-vote-interface", ] [[package]] name = "solana-storage-bigtable" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-reserved-account-keys", "backoff", @@ -9051,7 +9305,7 @@ dependencies = [ "solana-transaction", "solana-transaction-error", "solana-transaction-status", - "thiserror 2.0.12", + "thiserror 2.0.16", "tokio", "tonic", "zstd", @@ -9059,7 +9313,7 @@ dependencies = [ [[package]] name = "solana-storage-proto" -version = "3.0.0" +version = "3.1.0" dependencies = [ "bincode", "bs58", @@ -9082,10 +9336,9 @@ dependencies = [ [[package]] name = "solana-streamer" -version = "3.0.0" +version = "3.1.0" dependencies = [ "arc-swap", - "async-channel", "bytes", "crossbeam-channel", "dashmap", @@ -9093,7 +9346,7 @@ dependencies = [ "futures-util", "governor", "histogram", - "indexmap 2.10.0", + "indexmap 2.11.4", "itertools 0.12.1", "libc", "log", @@ -9104,7 +9357,7 @@ dependencies = [ "quinn", "quinn-proto", "rand 0.8.5", - "rustls 0.23.31", + "rustls 0.23.32", "smallvec", "socket2 0.6.0", "solana-keypair", @@ -9121,7 +9374,7 @@ dependencies = [ "solana-tls-utils", "solana-transaction-error", "solana-transaction-metrics-tracker", - "thiserror 2.0.12", + "thiserror 2.0.16", "tokio", "tokio-util 0.7.16", "x509-parser", @@ -9129,10 +9382,9 @@ dependencies = [ [[package]] name = "solana-svm" -version = "3.0.0" +version = "3.1.0" dependencies = [ "ahash 0.8.11", - "itertools 0.12.1", "log", "percentage", "qualifier_attr", @@ -9144,11 +9396,9 @@ dependencies = [ "solana-hash", "solana-instruction", "solana-instructions-sysvar", - "solana-loader-v3-interface 5.0.0", + "solana-loader-v3-interface", "solana-loader-v4-interface", "solana-loader-v4-program", - "solana-log-collector", - "solana-measure", "solana-message", "solana-nonce", "solana-nonce-account", @@ -9157,38 +9407,59 @@ dependencies = [ "solana-program-runtime", "solana-pubkey", "solana-rent", - "solana-rent-collector", "solana-sdk-ids", - "solana-slot-hashes", "solana-svm-callback", "solana-svm-feature-set", + "solana-svm-log-collector", + "solana-svm-measure", + "solana-svm-timings", "solana-svm-transaction", + "solana-svm-type-overrides", "solana-system-interface", "solana-sysvar-id", - "solana-timings", "solana-transaction-context", "solana-transaction-error", - "solana-type-overrides", "spl-generic-token", - "thiserror 2.0.12", + "thiserror 2.0.16", +] + +[[package]] +name = "solana-svm-callback" +version = "3.1.0" +dependencies = [ + "solana-account", + "solana-clock", + "solana-precompile-error", + "solana-pubkey", +] + +[[package]] +name = "solana-svm-feature-set" +version = "3.1.0" + +[[package]] +name = "solana-svm-log-collector" +version = "3.1.0" +dependencies = [ + "log", ] [[package]] -name = "solana-svm-callback" -version = "3.0.0" +name = "solana-svm-measure" +version = "3.1.0" + +[[package]] +name = "solana-svm-timings" +version = "3.1.0" dependencies = [ - "solana-account", - "solana-precompile-error", + "eager", + "enum-iterator", "solana-pubkey", ] -[[package]] -name = "solana-svm-feature-set" -version = "3.0.0" - [[package]] name = "solana-svm-transaction" -version = "3.0.0" +version = "3.1.0" dependencies = [ "solana-hash", "solana-message", @@ -9198,25 +9469,31 @@ dependencies = [ "solana-transaction", ] +[[package]] +name = "solana-svm-type-overrides" +version = "3.1.0" +dependencies = [ + "rand 0.8.5", +] + [[package]] name = "solana-system-interface" -version = "1.0.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94d7c18cb1a91c6be5f5a8ac9276a1d7c737e39a21beba9ea710ab4b9c63bc90" +checksum = "4e1790547bfc3061f1ee68ea9d8dc6c973c02a163697b24263a8e9f2e6d4afa2" dependencies = [ - "js-sys", "num-traits", "serde", "serde_derive", - "solana-decode-error", "solana-instruction", + "solana-msg", + "solana-program-error", "solana-pubkey", - "wasm-bindgen", ] [[package]] name = "solana-system-program" -version = "3.0.0" +version = "3.1.0" dependencies = [ "bincode", "log", @@ -9226,24 +9503,24 @@ dependencies = [ "solana-bincode", "solana-fee-calculator", "solana-instruction", - "solana-log-collector", "solana-nonce", "solana-nonce-account", "solana-packet", "solana-program-runtime", "solana-pubkey", "solana-sdk-ids", + "solana-svm-log-collector", + "solana-svm-type-overrides", "solana-system-interface", "solana-sysvar", "solana-transaction-context", - "solana-type-overrides", ] [[package]] name = "solana-system-transaction" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bd98a25e5bcba8b6be8bcbb7b84b24c2a6a8178d7fb0e3077a916855ceba91a" +checksum = "a31b5699ec533621515e714f1533ee6b3b0e71c463301d919eb59b8c1e249d30" dependencies = [ "solana-hash", "solana-keypair", @@ -9256,9 +9533,9 @@ dependencies = [ [[package]] name = "solana-sysvar" -version = "2.2.2" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d50c92bc019c590f5e42c61939676e18d14809ed00b2a59695dd5c67ae72c097" +checksum = "63205e68d680bcc315337dec311b616ab32fea0a612db3b883ce4de02e0953f9" dependencies = [ "base64 0.22.1", "bincode", @@ -9269,33 +9546,30 @@ dependencies = [ "serde_derive", "solana-account-info", "solana-clock", - "solana-define-syscall", + "solana-define-syscall 3.0.0", "solana-epoch-rewards", "solana-epoch-schedule", "solana-fee-calculator", "solana-hash", "solana-instruction", - "solana-instructions-sysvar", "solana-last-restart-slot", "solana-program-entrypoint", "solana-program-error", "solana-program-memory", "solana-pubkey", "solana-rent", - "solana-sanitize", "solana-sdk-ids", "solana-sdk-macro", "solana-slot-hashes", "solana-slot-history", - "solana-stake-interface", "solana-sysvar-id", ] [[package]] name = "solana-sysvar-id" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5762b273d3325b047cfda250787f8d796d781746860d5d0a746ee29f3e8812c1" +checksum = "5051bc1a16d5d96a96bc33b5b2ec707495c48fe978097bdaba68d3c47987eb32" dependencies = [ "solana-pubkey", "solana-sdk-ids", @@ -9303,7 +9577,7 @@ dependencies = [ [[package]] name = "solana-test-validator" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "base64 0.22.1", @@ -9329,11 +9603,12 @@ dependencies = [ "solana-instruction", "solana-keypair", "solana-ledger", - "solana-loader-v3-interface 5.0.0", + "solana-loader-v3-interface", "solana-logger", "solana-message", "solana-native-token", "solana-net-utils", + "solana-program-binaries", "solana-program-test", "solana-pubkey", "solana-rent", @@ -9352,24 +9627,15 @@ dependencies = [ [[package]] name = "solana-time-utils" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6af261afb0e8c39252a04d026e3ea9c405342b08c871a2ad8aa5448e068c784c" - -[[package]] -name = "solana-timings" version = "3.0.0" -dependencies = [ - "eager", - "enum-iterator", - "solana-pubkey", -] +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ced92c60aa76ec4780a9d93f3bd64dfa916e1b998eacc6f1c110f3f444f02c9" [[package]] name = "solana-tls-utils" -version = "3.0.0" +version = "3.1.0" dependencies = [ - "rustls 0.23.31", + "rustls 0.23.32", "solana-keypair", "solana-pubkey", "solana-signer", @@ -9378,12 +9644,12 @@ dependencies = [ [[package]] name = "solana-tpu-client" -version = "3.0.0" +version = "3.1.0" dependencies = [ "async-trait", "bincode", "futures-util", - "indexmap 2.10.0", + "indexmap 2.11.4", "indicatif 0.18.0", "log", "rayon", @@ -9404,19 +9670,19 @@ dependencies = [ "solana-signer", "solana-transaction", "solana-transaction-error", - "thiserror 2.0.12", + "thiserror 2.0.16", "tokio", ] [[package]] name = "solana-tpu-client-next" -version = "3.0.0" +version = "3.1.0" dependencies = [ "async-trait", "log", "lru", "quinn", - "rustls 0.23.31", + "rustls 0.23.32", "solana-clock", "solana-connection-cache", "solana-keypair", @@ -9428,41 +9694,39 @@ dependencies = [ "solana-time-utils", "solana-tls-utils", "solana-tpu-client", - "thiserror 2.0.12", + "thiserror 2.0.16", "tokio", "tokio-util 0.7.16", ] [[package]] name = "solana-transaction" -version = "2.2.3" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80657d6088f721148f5d889c828ca60c7daeedac9a8679f9ec215e0c42bcbf41" +checksum = "64928e6af3058dcddd6da6680cbe08324b4e071ad73115738235bbaa9e9f72a5" dependencies = [ "bincode", "serde", "serde_derive", - "solana-bincode", + "solana-address", "solana-hash", "solana-instruction", - "solana-keypair", + "solana-instruction-error", "solana-message", - "solana-pubkey", "solana-sanitize", "solana-sdk-ids", "solana-short-vec", "solana-signature", "solana-signer", - "solana-system-interface", "solana-transaction-error", - "wasm-bindgen", ] [[package]] name = "solana-transaction-context" -version = "3.0.0" +version = "3.1.0" dependencies = [ "bincode", + "qualifier_attr", "serde", "serde_derive", "solana-account", @@ -9476,19 +9740,19 @@ dependencies = [ [[package]] name = "solana-transaction-error" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "222a9dc8fdb61c6088baab34fc3a8b8473a03a7a5fd404ed8dd502fa79b67cb1" +checksum = "4222065402340d7e6aec9dc3e54d22992ddcf923d91edcd815443c2bfca3144a" dependencies = [ "serde", "serde_derive", - "solana-instruction", + "solana-instruction-error", "solana-sanitize", ] [[package]] name = "solana-transaction-metrics-tracker" -version = "3.0.0" +version = "3.1.0" dependencies = [ "base64 0.22.1", "bincode", @@ -9502,13 +9766,13 @@ dependencies = [ [[package]] name = "solana-transaction-status" -version = "3.0.0" +version = "3.1.0" dependencies = [ "Inflector", "agave-reserved-account-keys", "base64 0.22.1", "bincode", - "borsh 1.5.7", + "borsh", "bs58", "log", "serde", @@ -9520,7 +9784,7 @@ dependencies = [ "solana-hash", "solana-instruction", "solana-loader-v2-interface", - "solana-loader-v3-interface 5.0.0", + "solana-loader-v3-interface", "solana-message", "solana-program-option", "solana-pubkey", @@ -9539,12 +9803,12 @@ dependencies = [ "spl-token-group-interface", "spl-token-interface", "spl-token-metadata-interface", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "solana-transaction-status-client-types" -version = "3.0.0" +version = "3.1.0" dependencies = [ "base64 0.22.1", "bincode", @@ -9562,12 +9826,12 @@ dependencies = [ "solana-transaction", "solana-transaction-context", "solana-transaction-error", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "solana-turbine" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "agave-xdp", @@ -9584,7 +9848,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rayon", - "rustls 0.23.31", + "rustls 0.23.32", "solana-clock", "solana-cluster-type", "solana-entry", @@ -9596,6 +9860,7 @@ dependencies = [ "solana-metrics", "solana-native-token", "solana-net-utils", + "solana-nohash-hasher", "solana-perf", "solana-poh", "solana-pubkey", @@ -9612,20 +9877,13 @@ dependencies = [ "solana-tls-utils", "solana-transaction-error", "static_assertions", - "thiserror 2.0.12", + "thiserror 2.0.16", "tokio", ] -[[package]] -name = "solana-type-overrides" -version = "3.0.0" -dependencies = [ - "rand 0.8.5", -] - [[package]] name = "solana-udp-client" -version = "3.0.0" +version = "3.1.0" dependencies = [ "async-trait", "solana-connection-cache", @@ -9633,13 +9891,13 @@ dependencies = [ "solana-net-utils", "solana-streamer", "solana-transaction-error", - "thiserror 2.0.12", + "thiserror 2.0.16", "tokio", ] [[package]] name = "solana-unified-scheduler-logic" -version = "3.0.0" +version = "3.1.0" dependencies = [ "assert_matches", "solana-pubkey", @@ -9651,7 +9909,7 @@ dependencies = [ [[package]] name = "solana-unified-scheduler-pool" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-banking-stage-ingress-types", "aquamarine", @@ -9673,7 +9931,7 @@ dependencies = [ "solana-runtime", "solana-runtime-transaction", "solana-svm", - "solana-timings", + "solana-svm-timings", "solana-transaction", "solana-transaction-error", "solana-unified-scheduler-logic", @@ -9685,13 +9943,13 @@ dependencies = [ [[package]] name = "solana-validator-exit" -version = "2.2.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bbf6d7a3c0b28dd5335c52c0e9eae49d0ae489a8f324917faf0ded65a812c1d" +checksum = "c5d2face763df5afeaa9509b9019968860e69cc1531ec8b4a2e6c7b702204d5a" [[package]] name = "solana-version" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "rand 0.8.5", @@ -9704,7 +9962,7 @@ dependencies = [ [[package]] name = "solana-vote" -version = "3.0.0" +version = "3.1.0" dependencies = [ "bincode", "itertools 0.12.1", @@ -9727,24 +9985,26 @@ dependencies = [ "solana-svm-transaction", "solana-transaction", "solana-vote-interface", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "solana-vote-interface" -version = "2.2.6" +version = "4.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b80d57478d6599d30acc31cc5ae7f93ec2361a06aefe8ea79bc81739a08af4c3" +checksum = "c33f1a30b1e61944e52afef0992a2be93720c5770eaf1f6d8e6e34f87d90e754" dependencies = [ "bincode", + "cfg_eval", "num-derive", "num-traits", "serde", "serde_derive", + "serde_with", "solana-clock", - "solana-decode-error", "solana-hash", "solana-instruction", + "solana-instruction-error", "solana-pubkey", "solana-rent", "solana-sdk-ids", @@ -9756,7 +10016,7 @@ dependencies = [ [[package]] name = "solana-vote-program" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "bincode", @@ -9783,12 +10043,23 @@ dependencies = [ "solana-transaction", "solana-transaction-context", "solana-vote-interface", - "thiserror 2.0.12", + "thiserror 2.0.16", +] + +[[package]] +name = "solana-votor-messages" +version = "3.1.0" +dependencies = [ + "serde", + "solana-bls-signatures", + "solana-clock", + "solana-hash", + "solana-logger", ] [[package]] name = "solana-wen-restart" -version = "3.0.0" +version = "3.1.0" dependencies = [ "anyhow", "log", @@ -9805,8 +10076,8 @@ dependencies = [ "solana-pubkey", "solana-runtime", "solana-shred-version", + "solana-svm-timings", "solana-time-utils", - "solana-timings", "solana-vote", "solana-vote-interface", "solana-vote-program", @@ -9814,60 +10085,24 @@ dependencies = [ [[package]] name = "solana-zk-elgamal-proof-program" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "bytemuck", "num-derive", "num-traits", "solana-instruction", - "solana-log-collector", "solana-program-runtime", "solana-sdk-ids", - "solana-zk-sdk 3.0.0", -] - -[[package]] -name = "solana-zk-sdk" -version = "2.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05857892ac50fe03c125d8445fd790c6768015b76f4ad1e4b4b1499938b357f0" -dependencies = [ - "aes-gcm-siv", - "base64 0.22.1", - "bincode", - "bytemuck", - "bytemuck_derive", - "curve25519-dalek 4.1.3", - "itertools 0.12.1", - "js-sys", - "merlin", - "num-derive", - "num-traits", - "rand 0.8.5", - "serde", - "serde_derive", - "serde_json", - "sha3", - "solana-derivation-path", - "solana-instruction", - "solana-pubkey", - "solana-sdk-ids", - "solana-seed-derivable", - "solana-seed-phrase", - "solana-signature", - "solana-signer", - "subtle", - "thiserror 2.0.12", - "wasm-bindgen", - "zeroize", + "solana-svm-log-collector", + "solana-zk-sdk", ] [[package]] name = "solana-zk-sdk" -version = "3.0.0" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dffbd0b7537f4249d69b74c632f8eac1d2726572022791f9ead65a67d3f6905" +checksum = "9602bcb1f7af15caef92b91132ec2347e1c51a72ecdbefdaefa3eac4b8711475" dependencies = [ "aes-gcm-siv", "base64 0.22.1", @@ -9875,6 +10110,7 @@ dependencies = [ "bytemuck", "bytemuck_derive", "curve25519-dalek 4.1.3", + "getrandom 0.2.10", "itertools 0.12.1", "js-sys", "merlin", @@ -9894,29 +10130,29 @@ dependencies = [ "solana-signature", "solana-signer", "subtle", - "thiserror 2.0.12", + "thiserror 2.0.16", "wasm-bindgen", "zeroize", ] [[package]] name = "solana-zk-token-proof-program" -version = "3.0.0" +version = "3.1.0" dependencies = [ "agave-feature-set", "bytemuck", "num-derive", "num-traits", "solana-instruction", - "solana-log-collector", "solana-program-runtime", "solana-sdk-ids", + "solana-svm-log-collector", "solana-zk-token-sdk", ] [[package]] name = "solana-zk-token-sdk" -version = "3.0.0" +version = "3.1.0" dependencies = [ "aes-gcm-siv", "base64 0.22.1", @@ -9933,7 +10169,7 @@ dependencies = [ "serde_derive", "serde_json", "sha3", - "solana-curve25519 3.0.0", + "solana-curve25519 3.1.0", "solana-derivation-path", "solana-instruction", "solana-pubkey", @@ -9943,7 +10179,7 @@ dependencies = [ "solana-signature", "solana-signer", "subtle", - "thiserror 2.0.12", + "thiserror 2.0.16", "zeroize", ] @@ -9962,22 +10198,32 @@ dependencies = [ "lock_api", ] +[[package]] +name = "spki" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" +dependencies = [ + "base64ct", + "der", +] + [[package]] name = "spl-associated-token-account-interface" -version = "1.0.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e6bbe0794e532ac08428d3abf5bf8ae75bd81dfddd785c388e326c00c92c6f5" +checksum = "e6433917b60441d68d99a17e121d9db0ea15a9a69c0e5afa34649cf5ba12612f" dependencies = [ - "borsh 1.5.7", + "borsh", "solana-instruction", "solana-pubkey", ] [[package]] name = "spl-discriminator" -version = "0.4.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a20542d4c8264856d205c0090512f374dbf7b3124479a3d93ab6184ae3631aa" +checksum = "d48cc11459e265d5b501534144266620289720b4c44522a47bc6b63cd295d2f3" dependencies = [ "bytemuck", "solana-program-error", @@ -10011,9 +10257,9 @@ dependencies = [ [[package]] name = "spl-generic-token" -version = "1.0.1" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "741a62a566d97c58d33f9ed32337ceedd4e35109a686e31b1866c5dfa56abddc" +checksum = "233df81b75ab99b42f002b5cdd6e65a7505ffa930624f7096a7580a56765e9cf" dependencies = [ "bytemuck", "solana-pubkey", @@ -10021,9 +10267,9 @@ dependencies = [ [[package]] name = "spl-memo-interface" -version = "1.0.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24af0730130fea732616be9425fe8eb77782e2aab2f0e76837b6a66aaba96c6b" +checksum = "3d4e2aedd58f858337fa609af5ad7100d4a243fdaf6a40d6eb4c28c5f19505d3" dependencies = [ "solana-instruction", "solana-pubkey", @@ -10031,29 +10277,28 @@ dependencies = [ [[package]] name = "spl-pod" -version = "0.5.1" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d994afaf86b779104b4a95ba9ca75b8ced3fdb17ee934e38cb69e72afbe17799" +checksum = "b1233fdecd7461611d69bb87bc2e95af742df47291975d21232a0be8217da9de" dependencies = [ - "borsh 1.5.7", + "borsh", "bytemuck", "bytemuck_derive", "num-derive", "num-traits", - "solana-decode-error", - "solana-msg", + "num_enum", "solana-program-error", "solana-program-option", "solana-pubkey", - "solana-zk-sdk 2.3.6", - "thiserror 2.0.12", + "solana-zk-sdk", + "thiserror 2.0.16", ] [[package]] name = "spl-token-2022-interface" -version = "1.0.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62d7ae2ee6b856f8ddcbdc3b3a9f4d2141582bbe150f93e5298ee97e0251fa04" +checksum = "0888304af6b3d839e435712e6c84025e09513017425ff62045b6b8c41feb77d9" dependencies = [ "arrayref", "bytemuck", @@ -10061,79 +10306,76 @@ dependencies = [ "num-traits", "num_enum", "solana-account-info", - "solana-decode-error", "solana-instruction", - "solana-msg", "solana-program-error", "solana-program-option", "solana-program-pack", "solana-pubkey", "solana-sdk-ids", - "solana-zk-sdk 2.3.6", + "solana-zk-sdk", "spl-pod", "spl-token-confidential-transfer-proof-extraction", "spl-token-confidential-transfer-proof-generation", "spl-token-group-interface", "spl-token-metadata-interface", "spl-type-length-value", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "spl-token-confidential-transfer-proof-extraction" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bedc4675c80409a004da46978674e4073c65c4b1c611bf33d120381edeffe036" +checksum = "7a22217af69b7a61ca813f47c018afb0b00b02a74a4c70ff099cd4287740bc3d" dependencies = [ "bytemuck", "solana-account-info", - "solana-curve25519 2.2.15", + "solana-curve25519 2.3.7", "solana-instruction", "solana-instructions-sysvar", "solana-msg", "solana-program-error", "solana-pubkey", "solana-sdk-ids", - "solana-zk-sdk 2.3.6", + "solana-zk-sdk", "spl-pod", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "spl-token-confidential-transfer-proof-generation" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae5b124840d4aed474cef101d946a798b806b46a509ee4df91021e1ab1cef3ef" +checksum = "f63a2b41095945dc15274b924b21ccae9b3ec9dc2fdd43dbc08de8c33bbcd915" dependencies = [ "curve25519-dalek 4.1.3", - "solana-zk-sdk 2.3.6", - "thiserror 2.0.12", + "solana-zk-sdk", + "thiserror 2.0.16", ] [[package]] name = "spl-token-group-interface" -version = "0.6.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5597b4cd76f85ce7cd206045b7dc22da8c25516573d42d267c8d1fd128db5129" +checksum = "452d0f758af20caaa10d9a6f7608232e000d4c74462f248540b3d2ddfa419776" dependencies = [ "bytemuck", "num-derive", "num-traits", - "solana-decode-error", + "num_enum", "solana-instruction", - "solana-msg", "solana-program-error", "solana-pubkey", "spl-discriminator", "spl-pod", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "spl-token-interface" -version = "1.0.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06e0c2d4e38ef5834cf7fb1b592b8a8c6eab8485f5ac7a04a151b502c63a0aaa" +checksum = "8c564ac05a7c8d8b12e988a37d82695b5ba4db376d07ea98bc4882c81f96c7f3" dependencies = [ "arrayref", "bytemuck", @@ -10146,46 +10388,44 @@ dependencies = [ "solana-program-pack", "solana-pubkey", "solana-sdk-ids", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "spl-token-metadata-interface" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "304d6e06f0de0c13a621464b1fd5d4b1bebf60d15ca71a44d3839958e0da16ee" +checksum = "9c467c7c3bd056f8fe60119e7ec34ddd6f23052c2fa8f1f51999098063b72676" dependencies = [ - "borsh 1.5.7", + "borsh", "num-derive", "num-traits", "solana-borsh", - "solana-decode-error", "solana-instruction", - "solana-msg", "solana-program-error", "solana-pubkey", "spl-discriminator", "spl-pod", "spl-type-length-value", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] name = "spl-type-length-value" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d417eb548214fa822d93f84444024b4e57c13ed6719d4dcc68eec24fb481e9f5" +checksum = "ca20a1a19f4507a98ca4b28ff5ed54cac9b9d34ed27863e2bde50a3238f9a6ac" dependencies = [ "bytemuck", "num-derive", "num-traits", + "num_enum", "solana-account-info", - "solana-decode-error", "solana-msg", "solana-program-error", "spl-discriminator", "spl-pod", - "thiserror 2.0.12", + "thiserror 2.0.16", ] [[package]] @@ -10219,9 +10459,9 @@ checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" [[package]] name = "strsim" -version = "0.10.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "strum" @@ -10361,6 +10601,12 @@ dependencies = [ "libc", ] +[[package]] +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + [[package]] name = "tar" version = "0.4.44" @@ -10409,31 +10655,55 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.20.0" +version = "3.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" +checksum = "84fa4d11fadde498443cca10fd3ac23c951f0dc59e080e9f4b93d4df4e4eea53" dependencies = [ "fastrand", "getrandom 0.3.1", "once_cell", "rustix 1.0.2", - "windows-sys 0.59.0", + "windows-sys 0.61.0", ] [[package]] -name = "termcolor" -version = "1.1.3" +name = "termtree" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" + +[[package]] +name = "test-case" +version = "3.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" +checksum = "eb2550dd13afcd286853192af8601920d959b14c401fcece38071d53bf0768a8" dependencies = [ - "winapi-util", + "test-case-macros", ] [[package]] -name = "termtree" -version = "0.4.1" +name = "test-case-core" +version = "3.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" +checksum = "adcb7fd841cd518e279be3d5a3eb0636409487998a4aff22f3de87b81e88384f" +dependencies = [ + "cfg-if 1.0.3", + "proc-macro2", + "quote", + "syn 2.0.87", +] + +[[package]] +name = "test-case-macros" +version = "3.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c89e72a01ed4c579669add59014b9a524d609c0c88c6a585ce37485879f6ffb" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", + "test-case-core", +] [[package]] name = "textwrap" @@ -10455,11 +10725,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.12" +version = "2.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" +checksum = "3467d614147380f2e4e374161426ff399c91084acd2363eaf549172b3d5e60c0" dependencies = [ - "thiserror-impl 2.0.12", + "thiserror-impl 2.0.16", ] [[package]] @@ -10475,9 +10745,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "2.0.12" +version = "2.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" +checksum = "6c5e1be1c48b9172ee610da68fd9cd2770e7a4056cb3fc98710ee6906f0c7960" dependencies = [ "proc-macro2", "quote", @@ -10490,10 +10760,19 @@ version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if 1.0.3", "once_cell", ] +[[package]] +name = "threadpool" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" +dependencies = [ + "num_cpus", +] + [[package]] name = "tikv-jemalloc-sys" version = "0.6.0+5.3.0-1-ge13ca993e8ccb9ba9847cc330696e02839f328f7" @@ -10656,7 +10935,7 @@ version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" dependencies = [ - "rustls 0.23.31", + "rustls 0.23.32", "tokio", ] @@ -10752,7 +11031,7 @@ version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ - "indexmap 2.10.0", + "indexmap 2.11.4", "toml_datetime", "winnow", ] @@ -10775,7 +11054,7 @@ dependencies = [ "http-body 0.4.5", "hyper 0.14.32", "hyper-timeout", - "percent-encoding 2.3.1", + "percent-encoding 2.3.2", "pin-project", "prost", "rustls-pemfile", @@ -10842,7 +11121,7 @@ version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.9.4", "bytes", "futures-util", "http 1.2.0", @@ -10868,11 +11147,10 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.35" +version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a400e31aa60b9d44a52a8ee0343b5b18566b03a8321e0d321f695cf56e940160" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" dependencies = [ - "cfg-if 1.0.0", "log", "pin-project-lite", "tracing-attributes", @@ -10881,9 +11159,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.27" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" +checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" dependencies = [ "proc-macro2", "quote", @@ -10892,9 +11170,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.32" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" dependencies = [ "once_cell", "valuable", @@ -10915,9 +11193,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.18" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +checksum = "2054a14f5307d601f88daf0553e1cbf472acc4f2c51afab632431cdcd72124d5" dependencies = [ "sharded-slab", "thread_local", @@ -10963,7 +11241,7 @@ dependencies = [ "rustls 0.21.12", "sha1", "thiserror 1.0.69", - "url 2.5.4", + "url 2.5.7", "utf-8", "webpki-roots 0.24.0", ] @@ -11100,13 +11378,14 @@ dependencies = [ [[package]] name = "url" -version = "2.5.4" +version = "2.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" +checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b" dependencies = [ "form_urlencoded", - "idna 1.0.3", - "percent-encoding 2.3.1", + "idna 1.1.0", + "percent-encoding 2.3.2", + "serde", ] [[package]] @@ -11127,6 +11406,12 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" +[[package]] +name = "utf8parse" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" + [[package]] name = "valuable" version = "0.1.0" @@ -11207,21 +11492,22 @@ dependencies = [ [[package]] name = "wasm-bindgen" -version = "0.2.100" +version = "0.2.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +checksum = "ab10a69fbd0a177f5f649ad4d8d3305499c42bab9aef2f7ff592d0ec8f833819" dependencies = [ - "cfg-if 1.0.0", + "cfg-if 1.0.3", "once_cell", "rustversion", "wasm-bindgen-macro", + "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.100" +version = "0.2.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" +checksum = "0bb702423545a6007bbc368fde243ba47ca275e549c8a28617f56f6ba53b1d1c" dependencies = [ "bumpalo", "log", @@ -11237,7 +11523,7 @@ version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73157efb9af26fb564bb59a009afd1c7c334a44db171d280690d0c3faaec3468" dependencies = [ - "cfg-if 1.0.0", + "cfg-if 1.0.3", "js-sys", "wasm-bindgen", "web-sys", @@ -11245,9 +11531,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.100" +version = "0.2.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +checksum = "fc65f4f411d91494355917b605e1480033152658d71f722a90647f56a70c88a0" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -11255,9 +11541,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.100" +version = "0.2.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +checksum = "ffc003a991398a8ee604a401e194b6b3a39677b3173d6e74495eb51b82e99a32" dependencies = [ "proc-macro2", "quote", @@ -11268,9 +11554,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.100" +version = "0.2.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +checksum = "293c37f4efa430ca14db3721dfbe48d8c33308096bd44d80ebaa775ab71ba1cf" dependencies = [ "unicode-ident", ] @@ -11297,9 +11583,9 @@ dependencies = [ [[package]] name = "webpki-root-certs" -version = "0.26.6" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8c6dfa3ac045bc517de14c7b1384298de1dbd229d38e08e169d9ae8c170937c" +checksum = "4e4ffd8df1c57e87c325000a3d6ef93db75279dc3a231125aac571650f22b12a" dependencies = [ "rustls-pki-types", ] @@ -11348,16 +11634,6 @@ dependencies = [ "libc", ] -[[package]] -name = "wide" -version = "0.7.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41b5576b9a81633f3e8df296ce0063042a73507636cbe956c61133dd7034ab22" -dependencies = [ - "bytemuck", - "safe_arch", -] - [[package]] name = "winapi" version = "0.2.8" @@ -11403,9 +11679,9 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-link" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dccfd733ce2b1753b03b6d3c65edf020262ea35e20ccdf3e288043e6dd620e3" +checksum = "45e46c0661abb7180e7b9c281db115305d49ca1709ab8242adf09666d2173c65" [[package]] name = "windows-sys" @@ -11452,6 +11728,15 @@ dependencies = [ "windows-targets 0.53.2", ] +[[package]] +name = "windows-sys" +version = "0.61.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e201184e40b2ede64bc2ea34968b28e33622acdbbf37104f0e4a33f7abe657aa" +dependencies = [ + "windows-link", +] + [[package]] name = "windows-targets" version = "0.42.2" @@ -11709,7 +11994,7 @@ version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" dependencies = [ - "cfg-if 1.0.0", + "cfg-if 1.0.3", "windows-sys 0.48.0", ] @@ -11719,7 +12004,7 @@ version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.9.4", ] [[package]] @@ -11734,6 +12019,15 @@ version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" +[[package]] +name = "wyz" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" +dependencies = [ + "tap", +] + [[package]] name = "x509-parser" version = "0.14.0" diff --git a/programs/sbf/Cargo.toml b/programs/sbf/Cargo.toml index 1e56c3032d233a..567051ad96bd77 100644 --- a/programs/sbf/Cargo.toml +++ b/programs/sbf/Cargo.toml @@ -1,12 +1,11 @@ - [package] name = "solana-sbf-programs" -description = "Blockchain, Rebuilt for Scale" documentation = "https://docs.rs/solana" readme = "README.md" publish = false version = { workspace = true } authors = { workspace = true } +description = { workspace = true } repository = { workspace = true } homepage = { workspace = true } license = { workspace = true } @@ -53,6 +52,7 @@ members = [ "rust/param_passing", "rust/param_passing_dep", "rust/poseidon", + "rust/r2_instruction_data_pointer", "rust/rand", "rust/realloc", "rust/realloc_invoke", @@ -73,7 +73,7 @@ members = [ "rust/upgraded", ] [workspace.package] -version = "3.0.0" +version = "3.1.0" description = "Solana SBF test program written in Rust" authors = ["Anza Maintainers "] repository = "https://github.com/anza-xyz/agave" @@ -90,10 +90,10 @@ check-cfg = [ ] [workspace.dependencies] -agave-feature-set = { path = "../../feature-set", version = "=3.0.0" } -agave-reserved-account-keys = { path = "../../reserved-account-keys", version = "=3.0.0" } -agave-syscalls = { path = "../../syscalls", version = "=3.0.0" } -agave-validator = { path = "../../validator", version = "=3.0.0" } +agave-feature-set = { path = "../../feature-set", version = "=3.1.0" } +agave-reserved-account-keys = { path = "../../reserved-account-keys", version = "=3.1.0" } +agave-syscalls = { path = "../../syscalls", version = "=3.1.0" } +agave-validator = { path = "../../validator", version = "=3.1.0" } array-bytes = "=1.4.1" bincode = { version = "1.1.4", default-features = false } blake3 = "1.0.0" @@ -112,63 +112,68 @@ rand = "0.8" serde = "1.0.112" # must match the serde_derive version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 serde_derive = "1.0.112" # must match the serde version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 serde_json = "1.0.56" -solana-account-decoder = { path = "../../account-decoder", version = "=3.0.0" } -solana-account-info = "=2.3.0" -solana-accounts-db = { path = "../../accounts-db", version = "=3.0.0" } -solana-big-mod-exp = "=2.2.1" -solana-blake3-hasher = { version = "=2.2.1", features = ["blake3"] } -solana-bn254 = "=2.2.2" -solana-bpf-loader-program = { path = "../bpf_loader", version = "=3.0.0" } -solana-cli-output = { path = "../../cli-output", version = "=3.0.0" } -solana-clock = { version = "=2.2.2", features = ["serde", "sysvar"] } -solana-compute-budget = { path = "../../compute-budget", version = "=3.0.0" } -solana-compute-budget-instruction = { path = "../../compute-budget-instruction", version = "=3.0.0" } -solana-curve25519 = { path = "../../curves/curve25519", version = "=3.0.0" } -solana-define-syscall = "=2.3.0" -solana-fee = { path = "../../fee", version = "=3.0.0" } -solana-hash = { version = "=2.3.0", features = ["bytemuck", "serde", "std"] } -solana-instruction = "=2.3.0" -solana-instructions-sysvar = "=2.2.2" -solana-keccak-hasher = { version = "=2.2.1", features = ["sha3"] } -solana-ledger = { path = "../../ledger", version = "=3.0.0" } -solana-log-collector = { path = "../../log-collector", version = "=3.0.0" } -solana-logger = "=2.3.1" -solana-measure = { path = "../../measure", version = "=3.0.0" } -solana-msg = "=2.2.1" -solana-poseidon = { path = "../../poseidon/", version = "=3.0.0" } -solana-program = "=2.2.1" -solana-program-entrypoint = "=2.3.0" -solana-program-error = "=2.2.2" -solana-program-memory = "=2.2.1" -solana-program-runtime = { path = "../../program-runtime", version = "=3.0.0" } -solana-pubkey = { version = "=2.4.0", default-features = false } -solana-runtime = { path = "../../runtime", version = "=3.0.0" } -solana-runtime-transaction = { path = "../../runtime-transaction", version = "=3.0.0" } -solana-sbf-rust-128bit-dep = { path = "rust/128bit_dep", version = "=3.0.0" } -solana-sbf-rust-invoke-dep = { path = "rust/invoke_dep", version = "=3.0.0" } -solana-sbf-rust-invoked-dep = { path = "rust/invoked_dep", version = "=3.0.0" } -solana-sbf-rust-many-args-dep = { path = "rust/many_args_dep", version = "=3.0.0" } -solana-sbf-rust-mem-dep = { path = "rust/mem_dep", version = "=3.0.0" } -solana-sbf-rust-param-passing-dep = { path = "rust/param_passing_dep", version = "=3.0.0" } -solana-sbf-rust-realloc-dep = { path = "rust/realloc_dep", version = "=3.0.0" } -solana-sbf-rust-realloc-invoke-dep = { path = "rust/realloc_invoke_dep", version = "=3.0.0" } -solana-sbpf = "=0.12.0" -solana-sdk-ids = "=2.2.1" -solana-secp256k1-recover = "=2.2.1" -solana-sha256-hasher = { version = "=2.3.0", features = ["sha2"] } -solana-stake-interface = { version = "=1.2.1", features = ["bincode"] } -solana-svm = { path = "../../svm", version = "=3.0.0" } -solana-svm-callback = { path = "../../svm-callback", version = "=3.0.0" } -solana-svm-feature-set = { path = "../../svm-feature-set", version = "=3.0.0" } -solana-svm-transaction = { path = "../../svm-transaction", version = "=3.0.0" } -solana-system-interface = { version = "=1.0", features = ["bincode"] } -solana-sysvar = "=2.2.2" -solana-timings = { path = "../../timings", version = "=3.0.0" } -solana-transaction-context = { path = "../../transaction-context", version = "=3.0.0" } -solana-transaction-status = { path = "../../transaction-status", version = "=3.0.0" } -solana-type-overrides = { path = "../../type-overrides", version = "=3.0.0" } -solana-vote = { path = "../../vote", version = "=3.0.0" } -solana-vote-program = { path = "../../programs/vote", version = "=3.0.0" } +sha2 = "0.10.8" +sha3 = "0.10.8" +solana-account-decoder = { path = "../../account-decoder", version = "=3.1.0" } +solana-account-info = "=3.0.0" +solana-accounts-db = { path = "../../accounts-db", version = "=3.1.0" } +solana-big-mod-exp = "=3.0.0" +solana-blake3-hasher = { version = "=3.0.0", features = ["blake3"] } +solana-bn254 = "=3.0.0" +solana-bpf-loader-program = { path = "../bpf_loader", version = "=3.1.0" } +solana-cli-output = { path = "../../cli-output", version = "=3.1.0" } +solana-clock = { version = "=3.0.0", features = ["serde", "sysvar"] } +solana-compute-budget = { path = "../../compute-budget", version = "=3.1.0" } +solana-compute-budget-instruction = { path = "../../compute-budget-instruction", version = "=3.1.0" } +solana-cpi = "=3.0.0" +solana-curve25519 = { path = "../../curves/curve25519", version = "=3.1.0" } +solana-define-syscall = "=3.0.0" +solana-fee = { path = "../../fee", version = "=3.1.0" } +solana-hash = { version = "=3.0.0", features = ["bytemuck", "serde", "std"] } +solana-instruction = "=3.0.0" +solana-instructions-sysvar = "=3.0.0" +solana-keccak-hasher = { version = "=3.0.0", features = ["sha3"] } +solana-ledger = { path = "../../ledger", version = "=3.1.0" } +solana-logger = "=3.0.0" +solana-measure = { path = "../../measure", version = "=3.1.0" } +solana-msg = "=3.0.0" +solana-poseidon = { path = "../../poseidon/", version = "=3.1.0" } +solana-program = "=3.0.0" +solana-program-entrypoint = "=3.1.0" +solana-program-error = "=3.0.0" +solana-program-memory = "=3.0.0" +solana-program-runtime = { path = "../../program-runtime", version = "=3.1.0" } +solana-pubkey = { version = "=3.0.0", default-features = false } +solana-runtime = { path = "../../runtime", version = "=3.1.0" } +solana-runtime-transaction = { path = "../../runtime-transaction", version = "=3.1.0" } +solana-sbf-rust-128bit-dep = { path = "rust/128bit_dep", version = "=3.1.0" } +solana-sbf-rust-invoke-dep = { path = "rust/invoke_dep", version = "=3.1.0" } +solana-sbf-rust-invoked-dep = { path = "rust/invoked_dep", version = "=3.1.0" } +solana-sbf-rust-many-args-dep = { path = "rust/many_args_dep", version = "=3.1.0" } +solana-sbf-rust-mem-dep = { path = "rust/mem_dep", version = "=3.1.0" } +solana-sbf-rust-param-passing-dep = { path = "rust/param_passing_dep", version = "=3.1.0" } +solana-sbf-rust-r2-instruction-data-pointer = { path = "rust/r2_instruction_data_pointer", version = "=3.1.0" } +solana-sbf-rust-realloc-dep = { path = "rust/realloc_dep", version = "=3.1.0" } +solana-sbf-rust-realloc-invoke-dep = { path = "rust/realloc_invoke_dep", version = "=3.1.0" } +solana-sbpf = "=0.12.2" +solana-sdk-ids = "=3.0.0" +solana-secp256k1-recover = "=3.0.0" +solana-sha256-hasher = { version = "=3.0.0", features = ["sha2"] } +solana-stake-interface = { version = "=2.0.1", features = ["bincode"] } +solana-svm = { path = "../../svm", version = "=3.1.0" } +solana-svm-callback = { path = "../../svm-callback", version = "=3.1.0" } +solana-svm-feature-set = { path = "../../svm-feature-set", version = "=3.1.0" } +solana-svm-log-collector = { path = "../../svm-log-collector", version = "=3.1.0" } +solana-svm-timings = { path = "../../svm-timings", version = "=3.1.0" } +solana-svm-transaction = { path = "../../svm-transaction", version = "=3.1.0" } +solana-svm-type-overrides = { path = "../../svm-type-overrides", version = "=3.1.0" } +solana-system-interface = { version = "=2.0", features = ["bincode"] } +solana-sysvar = "=3.0.0" +solana-transaction-context = { path = "../../transaction-context", version = "=3.1.0" } +solana-transaction-status = { path = "../../transaction-status", version = "=3.1.0" } +solana-vote = { path = "../../vote", version = "=3.1.0" } +solana-vote-program = { path = "../../programs/vote", version = "=3.1.0" } +test-case = "3.3.1" thiserror = "1.0" [features] @@ -192,38 +197,38 @@ itertools = { workspace = true } log = { workspace = true } miow = { workspace = true } net2 = { workspace = true } -solana-account = "2.2.1" +solana-account = "3.0.0" solana-account-decoder = { workspace = true } -solana-account-info = "2.3.0" +solana-account-info = "3.0.0" solana-accounts-db = { workspace = true } solana-bpf-loader-program = { workspace = true } solana-cli-output = { workspace = true } -solana-client-traits = "2.2.1" -solana-clock = "2.2.1" +solana-client-traits = "3.0.0" +solana-clock = "3.0.0" +solana-cluster-type = "3.0.0" solana-compute-budget = { workspace = true } solana-compute-budget-instruction = { workspace = true, features = [ "dev-context-only-utils", ] } -solana-compute-budget-interface = "2.2.2" +solana-compute-budget-interface = "3.0.0" solana-fee = { workspace = true } -solana-fee-calculator = "2.2.1" -solana-fee-structure = "2.3.0" -solana-genesis-config = "2.2.1" -solana-hash = "2.2.1" -solana-instruction = "2.2.1" -solana-keypair = "2.2.1" +solana-fee-calculator = "3.0.0" +solana-fee-structure = "3.0.0" +solana-genesis-config = "3.0.0" +solana-hash = "3.0.0" +solana-instruction = "3.0.0" +solana-keypair = "3.0.0" solana-ledger = { workspace = true } -solana-loader-v3-interface = "5.0.0" -solana-loader-v4-interface = "2.2.1" -solana-log-collector = { workspace = true } +solana-loader-v3-interface = "6.1.0" +solana-loader-v4-interface = "3.1.0" solana-logger = { workspace = true } solana-measure = { workspace = true } -solana-message = "2.3.0" +solana-message = "3.0.0" solana-program = { workspace = true } -solana-program-entrypoint = "2.3.0" +solana-program-entrypoint = "3.1.0" solana-program-runtime = { workspace = true } -solana-pubkey = "2.4.0" -solana-rent = "2.2.1" +solana-pubkey = "3.0.0" +solana-rent = "3.0.0" solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } solana-runtime-transaction = { workspace = true, features = [ "dev-context-only-utils", @@ -231,24 +236,26 @@ solana-runtime-transaction = { workspace = true, features = [ solana-sbf-rust-invoke-dep = { workspace = true } solana-sbf-rust-realloc-dep = { workspace = true } solana-sbf-rust-realloc-invoke-dep = { workspace = true } -solana-sbpf = { workspace = true } -solana-sdk-ids = "2.2.1" -solana-signer = "2.2.1" -solana-stake-interface = "1.2.1" +solana-sbpf = { workspace = true, features = ["jit"] } +solana-sdk-ids = "3.0.0" +solana-signer = "3.0.0" +solana-stake-interface = "2.0.1" solana-svm = { workspace = true } solana-svm-callback = { workspace = true } solana-svm-feature-set = { workspace = true } +solana-svm-log-collector = { workspace = true } +solana-svm-timings = { workspace = true } solana-svm-transaction = { workspace = true } -solana-system-interface = "1.0" -solana-sysvar = "2.2.1" -solana-timings = { workspace = true } -solana-transaction = "2.2.2" +solana-svm-type-overrides = { workspace = true } +solana-system-interface = "2.0" +solana-sysvar = "3.0.0" +solana-transaction = "3.0.0" solana-transaction-context = { workspace = true, features = ["dev-context-only-utils"] } -solana-transaction-error = "2.2.1" +solana-transaction-error = "3.0.0" solana-transaction-status = { workspace = true } -solana-type-overrides = { workspace = true } solana-vote = { workspace = true } solana-vote-program = { workspace = true } +test-case = { workspace = true } [profile.release] # The test programs are build in release mode diff --git a/programs/sbf/Makefile b/programs/sbf/Makefile index d92c21b8c3b0bf..7e3095a59215ac 100755 --- a/programs/sbf/Makefile +++ b/programs/sbf/Makefile @@ -1,6 +1,7 @@ SBF_SDK_PATH := ../../platform-tools-sdk/sbf SRC_DIR := c/src OUT_DIR := target/deploy +TOOLCHAIN := 1.84.1-sbpf-solana-v1.51 test-v3: mkdir -p target/deploy ; \ @@ -30,11 +31,11 @@ test-version: $(MAKE) test-all rust-v0: - cargo +1.84.1-sbpf-solana-v1.50 build --release --target sbpf-solana-solana --workspace ; \ + cargo +$(TOOLCHAIN) build --release --target sbpf-solana-solana --workspace ; \ cp -r target/sbpf-solana-solana/release/* target/deploy rust-new: - RUSTFLAGS="-C instrument-coverage=no" cargo +1.84.1-sbpf-solana-v1.50 build --release --target sbpf$(VER)-solana-solana --workspace ; \ + RUSTFLAGS="-C instrument-coverage=no" cargo +$(TOOLCHAIN) build --release --target sbpf$(VER)-solana-solana --workspace ; \ cp -r target/sbpf$(VER)-solana-solana/release/* target/deploy .PHONY: test-v3 diff --git a/programs/sbf/benches/bpf_loader.rs b/programs/sbf/benches/bpf_loader.rs index 4866f53213881e..5f6eada4f2c4fc 100644 --- a/programs/sbf/benches/bpf_loader.rs +++ b/programs/sbf/benches/bpf_loader.rs @@ -61,7 +61,7 @@ macro_rules! with_mock_invoke_context { AccountSharedData::new(2, $account_size, &program_key), ), ]; - let instruction_accounts = vec![InstructionAccount::new(2, 0, false, true)]; + let instruction_accounts = vec![InstructionAccount::new(2, false, true)]; solana_program_runtime::with_mock_invoke_context!( $invoke_context, transaction_context, @@ -69,9 +69,8 @@ macro_rules! with_mock_invoke_context { ); $invoke_context .transaction_context - .get_next_instruction_context_mut() - .unwrap() - .configure(vec![0, 1], instruction_accounts, &[]); + .configure_next_instruction_for_tests(1, instruction_accounts, &[]) + .unwrap(); $invoke_context.push().unwrap(); }; } @@ -152,7 +151,10 @@ fn bench_program_alu(bencher: &mut Bencher) { assert!(0f64 != summary.median); let mips = (instructions * (ns_per_s / summary.median as u64)) / one_million; println!(" {:?} MIPS", mips); - println!("{{ \"type\": \"bench\", \"name\": \"bench_program_alu_interpreted_mips\", \"median\": {:?}, \"deviation\": 0 }}", mips); + println!( + "{{ \"type\": \"bench\", \"name\": \"bench_program_alu_interpreted_mips\", \"median\": \ + {mips:?}, \"deviation\": 0 }}", + ); println!("JIT to native:"); assert_eq!(SUCCESS, vm.execute_program(&executable, false).1.unwrap()); @@ -173,7 +175,10 @@ fn bench_program_alu(bencher: &mut Bencher) { assert!(0f64 != summary.median); let mips = (instructions * (ns_per_s / summary.median as u64)) / one_million; println!(" {:?} MIPS", mips); - println!("{{ \"type\": \"bench\", \"name\": \"bench_program_alu_jit_to_native_mips\", \"median\": {:?}, \"deviation\": 0 }}", mips); + println!( + "{{ \"type\": \"bench\", \"name\": \"bench_program_alu_jit_to_native_mips\", \"median\": \ + {mips:?}, \"deviation\": 0 }}", + ); } #[bench] @@ -227,6 +232,7 @@ fn bench_create_vm(bencher: &mut Bencher) { let stricter_abi_and_runtime_constraints = invoke_context .get_feature_set() .stricter_abi_and_runtime_constraints; + let account_data_direct_mapping = invoke_context.get_feature_set().account_data_direct_mapping; let raise_cpi_nesting_limit_to_8 = invoke_context .get_feature_set() .raise_cpi_nesting_limit_to_8; @@ -243,15 +249,14 @@ fn bench_create_vm(bencher: &mut Bencher) { executable.verify::().unwrap(); // Serialize account data - let (_serialized, regions, account_lengths) = serialize_parameters( - invoke_context.transaction_context, - invoke_context + let (_serialized, regions, account_lengths, _instruction_data_offset) = serialize_parameters( + &invoke_context .transaction_context .get_current_instruction_context() .unwrap(), stricter_abi_and_runtime_constraints, - false, // account_data_direct_mapping - true, // mask_out_rent_epoch_in_vm_serialization + account_data_direct_mapping, + true, // mask_out_rent_epoch_in_vm_serialization ) .unwrap(); @@ -277,17 +282,17 @@ fn bench_instruction_count_tuner(_bencher: &mut Bencher) { let stricter_abi_and_runtime_constraints = invoke_context .get_feature_set() .stricter_abi_and_runtime_constraints; + let account_data_direct_mapping = invoke_context.get_feature_set().account_data_direct_mapping; // Serialize account data - let (_serialized, regions, account_lengths) = serialize_parameters( - invoke_context.transaction_context, - invoke_context + let (_serialized, regions, account_lengths, _instruction_data_offset) = serialize_parameters( + &invoke_context .transaction_context .get_current_instruction_context() .unwrap(), stricter_abi_and_runtime_constraints, - false, // account_data_direct_mapping - true, // mask_out_rent_epoch_in_vm_serialization + account_data_direct_mapping, + true, // mask_out_rent_epoch_in_vm_serialization ) .unwrap(); diff --git a/programs/sbf/c/src/invoke/invoke.c b/programs/sbf/c/src/invoke/invoke.c index 559e28fb11c33f..c4b28ad42f9178 100644 --- a/programs/sbf/c/src/invoke/invoke.c +++ b/programs/sbf/c/src/invoke/invoke.c @@ -40,8 +40,8 @@ static const uint8_t TEST_CPI_INVALID_LAMPORTS_POINTER = 37; static const uint8_t TEST_CPI_INVALID_DATA_POINTER = 38; static const uint8_t TEST_WRITE_ACCOUNT = 40; static const uint8_t TEST_ACCOUNT_INFO_IN_ACCOUNT = 43; -static const uint8_t TEST_NESTED_INVOKE_SIMD_0296_OK = 46; -static const uint8_t TEST_NESTED_INVOKE_SIMD_0296_TOO_DEEP = 47; +static const uint8_t TEST_NESTED_INVOKE_SIMD_0268_OK = 46; +static const uint8_t TEST_NESTED_INVOKE_SIMD_0268_TOO_DEEP = 47; static const int MINT_INDEX = 0; static const int ARGUMENT_INDEX = 1; @@ -629,11 +629,11 @@ extern uint64_t entrypoint(const uint8_t *input) { do_nested_invokes(5, accounts, params.ka_num); break; } - case TEST_NESTED_INVOKE_SIMD_0296_OK: { + case TEST_NESTED_INVOKE_SIMD_0268_OK: { do_nested_invokes(8, accounts, params.ka_num); break; } - case TEST_NESTED_INVOKE_SIMD_0296_TOO_DEEP: { + case TEST_NESTED_INVOKE_SIMD_0268_TOO_DEEP: { do_nested_invokes(9, accounts, params.ka_num); break; } diff --git a/programs/sbf/rust/account_mem/src/lib.rs b/programs/sbf/rust/account_mem/src/lib.rs index 7d20446b9f0101..3cbc0857b3453d 100644 --- a/programs/sbf/rust/account_mem/src/lib.rs +++ b/programs/sbf/rust/account_mem/src/lib.rs @@ -27,107 +27,103 @@ pub fn process_instruction( unsafe { std::slice::from_raw_parts_mut(data, data_len) } }; - match instruction_data[0] { - 0 => { - // memcmp overlaps begining - #[allow(clippy::manual_memcpy)] - for i in 0..500 { - buf[i] = too_early(8)[i]; - } + unsafe { + match instruction_data[0] { + 0 => { + // memcmp overlaps begining + #[allow(clippy::manual_memcpy)] + for i in 0..500 { + buf[i] = too_early(8)[i]; + } - sol_memcmp(too_early(8), &buf, 500); - } - 1 => { - // memcmp overlaps begining - #[allow(clippy::manual_memcpy)] - for i in 0..12 { - buf[i] = too_early(9)[i]; + sol_memcmp(too_early(8), &buf, 500); } + 1 => { + // memcmp overlaps begining + #[allow(clippy::manual_memcpy)] + for i in 0..12 { + buf[i] = too_early(9)[i]; + } - sol_memcmp(&buf, too_early(9), 12); - } - 2 => { - // memcmp overlaps begining - #[allow(clippy::manual_memcpy)] - for i in 0..3 { - buf[i] = too_early(2)[i]; + sol_memcmp(&buf, too_early(9), 12); } + 2 => { + // memcmp overlaps begining + #[allow(clippy::manual_memcpy)] + for i in 0..3 { + buf[i] = too_early(2)[i]; + } - // memset overlaps begin of account area - sol_memset(too_early(2), 3, 3); - sol_memcpy(too_early(2), &buf, 3); - } - 3 => { - // memcpy src overlaps begin of account - sol_memcpy(&mut buf, too_early(3), 10); - } - 4 => { - // memmov src overlaps begin of account - unsafe { sol_memmove(buf.as_mut_ptr(), too_early(3).as_ptr(), 10) }; - } - 5 => { - // memcpy dst overlaps begin of account - sol_memcpy(too_early(3), &buf, 10); - } - 6 => { - // memmov dst overlaps begin of account - unsafe { sol_memmove(too_early(3).as_mut_ptr(), buf.as_ptr(), 10) }; - } - 7 => { - // memmove dst overlaps begin of account, reverse order - unsafe { sol_memmove(too_early(0).as_mut_ptr(), too_early(3).as_ptr(), 10) }; - } - 8 => { - // memcmp overlaps end - sol_memcmp(&buf, &data[data_len.saturating_sub(8)..], 16); - } - 9 => { - // memcmp overlaps end - sol_memcmp(&data[data_len.saturating_sub(7)..], &buf, 15); - } - 10 => { - // memset overlaps end of account - sol_memset(&mut data[data_len.saturating_sub(2)..], 0, 3); - } - 11 => { - // memcpy src overlaps end of account - sol_memcpy(&mut buf, &data[data_len.saturating_sub(3)..], 10); - } - 12 => { - // memmov src overlaps end of account - unsafe { + // memset overlaps begin of account area + sol_memset(too_early(2), 3, 3); + sol_memcpy(too_early(2), &buf, 3); + } + 3 => { + // memcpy src overlaps begin of account + sol_memcpy(&mut buf, too_early(3), 10); + } + 4 => { + // memmov src overlaps begin of account + sol_memmove(buf.as_mut_ptr(), too_early(3).as_ptr(), 10); + } + 5 => { + // memcpy dst overlaps begin of account + sol_memcpy(too_early(3), &buf, 10); + } + 6 => { + // memmov dst overlaps begin of account + sol_memmove(too_early(3).as_mut_ptr(), buf.as_ptr(), 10); + } + 7 => { + // memmove dst overlaps begin of account, reverse order + sol_memmove(too_early(0).as_mut_ptr(), too_early(3).as_ptr(), 10); + } + 8 => { + // memcmp overlaps end + sol_memcmp(&buf, &data[data_len.saturating_sub(8)..], 16); + } + 9 => { + // memcmp overlaps end + sol_memcmp(&data[data_len.saturating_sub(7)..], &buf, 15); + } + 10 => { + // memset overlaps end of account + sol_memset(&mut data[data_len.saturating_sub(2)..], 0, 3); + } + 11 => { + // memcpy src overlaps end of account + sol_memcpy(&mut buf, &data[data_len.saturating_sub(3)..], 10); + } + 12 => { + // memmov src overlaps end of account sol_memmove( buf.as_mut_ptr(), data[data_len.saturating_sub(3)..].as_ptr(), 10, - ) - }; - } - 13 => { - // memcpy dst overlaps end of account - sol_memcpy(&mut data[data_len.saturating_sub(3)..], &buf, 10); - } - 14 => { - // memmov dst overlaps end of account - unsafe { + ); + } + 13 => { + // memcpy dst overlaps end of account + sol_memcpy(&mut data[data_len.saturating_sub(3)..], &buf, 10); + } + 14 => { + // memmov dst overlaps end of account sol_memmove( data[data_len.saturating_sub(3)..].as_mut_ptr(), buf.as_ptr(), 10, - ) - }; - } - 15 => { - // memmove dst overlaps end of account, reverse order - unsafe { + ); + } + 15 => { + // memmove dst overlaps end of account, reverse order sol_memmove( data[data_len..].as_mut_ptr(), data[data_len.saturating_sub(3)..].as_mut_ptr(), 10, - ) - }; + ); + } + _ => {} } - _ => {} } Ok(()) diff --git a/programs/sbf/rust/account_mem_deprecated/src/lib.rs b/programs/sbf/rust/account_mem_deprecated/src/lib.rs index 4a95e6b8202618..7719cf949b64ca 100644 --- a/programs/sbf/rust/account_mem_deprecated/src/lib.rs +++ b/programs/sbf/rust/account_mem_deprecated/src/lib.rs @@ -27,100 +27,96 @@ pub fn process_instruction( unsafe { std::slice::from_raw_parts_mut(data, data_len.wrapping_add(100)) } }; - match instruction_data[0] { - 0 => { - // memcmp overlaps begining - #[allow(clippy::manual_memcpy)] - for i in 0..90 { - buf[i] = too_early(8)[i]; - } + unsafe { + match instruction_data[0] { + 0 => { + // memcmp overlaps begining + #[allow(clippy::manual_memcpy)] + for i in 0..90 { + buf[i] = too_early(8)[i]; + } - sol_memcmp(too_early(8), &buf, 90); - } - 1 => { - // memcmp overlaps begining - #[allow(clippy::manual_memcpy)] - for i in 0..12 { - buf[i] = too_early(9)[i]; + sol_memcmp(too_early(8), &buf, 90); } + 1 => { + // memcmp overlaps begining + #[allow(clippy::manual_memcpy)] + for i in 0..12 { + buf[i] = too_early(9)[i]; + } - sol_memcmp(&buf, too_early(9), 12); - } - 2 => { - // memset overlaps begin of account area - sol_memset(too_early(2), 3, 3); - } - 3 => { - // memcpy src overlaps begin of account - sol_memcpy(&mut buf, too_early(3), 10); - } - 4 => { - // memmov src overlaps begin of account - unsafe { sol_memmove(buf.as_mut_ptr(), too_early(3).as_ptr(), 10) }; - } - 5 => { - // memcpy dst overlaps begin of account - sol_memcpy(too_early(3), &buf, 10); - } - 6 => { - // memmov dst overlaps begin of account - unsafe { sol_memmove(too_early(3).as_mut_ptr(), buf.as_ptr(), 10) }; - } - 7 => { - // memmove dst overlaps begin of account, reverse order - unsafe { sol_memmove(too_early(0).as_mut_ptr(), too_early(3).as_ptr(), 10) }; - } - 8 => { - // memcmp overlaps end - sol_memcmp(&buf, &data[data_len.saturating_sub(8)..], 16); - } - 9 => { - // memcmp overlaps end - sol_memcmp(&data[data_len.saturating_sub(7)..], &buf, 15); - } - 10 => { - // memset overlaps end of account - sol_memset(&mut data[data_len.saturating_sub(2)..], 0, 3); - } - 11 => { - // memcpy src overlaps end of account - sol_memcpy(&mut buf, &data[data_len.saturating_sub(3)..], 10); - } - 12 => { - // memmov src overlaps end of account - unsafe { + sol_memcmp(&buf, too_early(9), 12); + } + 2 => { + // memset overlaps begin of account area + sol_memset(too_early(2), 3, 3); + } + 3 => { + // memcpy src overlaps begin of account + sol_memcpy(&mut buf, too_early(3), 10); + } + 4 => { + // memmov src overlaps begin of account + sol_memmove(buf.as_mut_ptr(), too_early(3).as_ptr(), 10); + } + 5 => { + // memcpy dst overlaps begin of account + sol_memcpy(too_early(3), &buf, 10); + } + 6 => { + // memmov dst overlaps begin of account + sol_memmove(too_early(3).as_mut_ptr(), buf.as_ptr(), 10); + } + 7 => { + // memmove dst overlaps begin of account, reverse order + sol_memmove(too_early(0).as_mut_ptr(), too_early(3).as_ptr(), 10); + } + 8 => { + // memcmp overlaps end + sol_memcmp(&buf, &data[data_len.saturating_sub(8)..], 16); + } + 9 => { + // memcmp overlaps end + sol_memcmp(&data[data_len.saturating_sub(7)..], &buf, 15); + } + 10 => { + // memset overlaps end of account + sol_memset(&mut data[data_len.saturating_sub(2)..], 0, 3); + } + 11 => { + // memcpy src overlaps end of account + sol_memcpy(&mut buf, &data[data_len.saturating_sub(3)..], 10); + } + 12 => { + // memmov src overlaps end of account sol_memmove( buf.as_mut_ptr(), data[data_len.saturating_sub(3)..].as_ptr(), 10, - ) - }; - } - 13 => { - // memcpy dst overlaps end of account - sol_memcpy(&mut data[data_len.saturating_sub(3)..], &buf, 10); - } - 14 => { - // memmov dst overlaps end of account - unsafe { + ); + } + 13 => { + // memcpy dst overlaps end of account + sol_memcpy(&mut data[data_len.saturating_sub(3)..], &buf, 10); + } + 14 => { + // memmov dst overlaps end of account sol_memmove( data[data_len.saturating_sub(3)..].as_mut_ptr(), buf.as_ptr(), 10, - ) - }; - } - 15 => { - // memmove dst overlaps end of account, reverse order - unsafe { + ); + } + 15 => { + // memmove dst overlaps end of account, reverse order sol_memmove( data[data_len..].as_mut_ptr(), data[data_len.saturating_sub(3)..].as_mut_ptr(), 10, - ) - }; + ); + } + _ => {} } - _ => {} } Ok(()) diff --git a/programs/sbf/rust/caller_access/src/lib.rs b/programs/sbf/rust/caller_access/src/lib.rs index 69cee1fd8c570d..0cd5363d7a721a 100644 --- a/programs/sbf/rust/caller_access/src/lib.rs +++ b/programs/sbf/rust/caller_access/src/lib.rs @@ -20,8 +20,7 @@ fn process_instruction( let mut lamports = accounts[0].lamports(); let owner = &accounts[0].owner; let mut data = accounts[0].try_borrow_mut_data()?; - let account = - AccountInfo::new(&key, false, false, &mut lamports, &mut data, owner, true, 0); + let account = AccountInfo::new(&key, false, false, &mut lamports, &mut data, owner, true); msg!("{:?} calling {:?}", program_id, key); invoke(&ix, &[account])?; } else { diff --git a/programs/sbf/rust/dep_crate/src/lib.rs b/programs/sbf/rust/dep_crate/src/lib.rs index e8b8365e1eaae6..ef21fee51957ef 100644 --- a/programs/sbf/rust/dep_crate/src/lib.rs +++ b/programs/sbf/rust/dep_crate/src/lib.rs @@ -9,10 +9,12 @@ use { pub extern "C" fn entrypoint(_input: *mut u8) -> u64 { let mut buf = [0; 4]; LittleEndian::write_u32(&mut buf, 1_000_000); + std::hint::black_box(&mut buf); assert_eq!(1_000_000, LittleEndian::read_u32(&buf)); let mut buf = [0; 2]; LittleEndian::write_i16(&mut buf, -5_000); + std::hint::black_box(&mut buf); assert_eq!(-5_000, LittleEndian::read_i16(&buf)); SUCCESS diff --git a/programs/sbf/rust/deprecated_loader/src/lib.rs b/programs/sbf/rust/deprecated_loader/src/lib.rs index 872fd73aa7ae17..0c78c2e19e453d 100644 --- a/programs/sbf/rust/deprecated_loader/src/lib.rs +++ b/programs/sbf/rust/deprecated_loader/src/lib.rs @@ -136,7 +136,10 @@ fn process_instruction( } } Some(&TEST_CPI_ACCOUNT_UPDATE_CALLEE_SHRINKS_SMALLER_THAN_ORIGINAL_LEN) => { - msg!("DEPRECATED LOADER TEST_CPI_ACCOUNT_UPDATE_CALLEE_SHRINKS_SMALLER_THAN_ORIGINAL_LEN"); + msg!( + "DEPRECATED LOADER \ + TEST_CPI_ACCOUNT_UPDATE_CALLEE_SHRINKS_SMALLER_THAN_ORIGINAL_LEN" + ); const ARGUMENT_INDEX: usize = 1; const REALLOC_PROGRAM_INDEX: usize = 2; const INVOKE_PROGRAM_INDEX: usize = 3; diff --git a/programs/sbf/rust/error_handling/src/lib.rs b/programs/sbf/rust/error_handling/src/lib.rs index 91ec09378ee3f6..b60fdf49973038 100644 --- a/programs/sbf/rust/error_handling/src/lib.rs +++ b/programs/sbf/rust/error_handling/src/lib.rs @@ -23,10 +23,7 @@ impl From for ProgramError { } } impl ToStr for MyError { - fn to_str(&self) -> &'static str - where - E: 'static + ToStr + TryFrom, - { + fn to_str(&self) -> &'static str { match self { MyError::DefaultEnumStart => "Error: Default enum start", MyError::TheAnswer => "Error: The Answer", diff --git a/programs/sbf/rust/invoke/src/lib.rs b/programs/sbf/rust/invoke/src/lib.rs index 0949e58d0a0564..6b8d2edaea42d9 100644 --- a/programs/sbf/rust/invoke/src/lib.rs +++ b/programs/sbf/rust/invoke/src/lib.rs @@ -538,7 +538,7 @@ fn process_instruction<'a>( let data = unsafe { std::slice::from_raw_parts_mut(ptr, len) }; let mut lamports = accounts[FROM_INDEX].lamports(); let from_info = - AccountInfo::new(&pubkey, false, true, &mut lamports, data, &owner, false, 0); + AccountInfo::new(&pubkey, false, true, &mut lamports, data, &owner, false); let pubkey = *accounts[DERIVED_KEY1_INDEX].key; let owner = *accounts[DERIVED_KEY1_INDEX].owner; @@ -546,7 +546,7 @@ fn process_instruction<'a>( let data = unsafe { std::slice::from_raw_parts_mut(0x300007ff8 as *mut _, 0) }; let mut lamports = accounts[DERIVED_KEY1_INDEX].lamports(); let derived_info = - AccountInfo::new(&pubkey, false, true, &mut lamports, data, &owner, false, 0); + AccountInfo::new(&pubkey, false, true, &mut lamports, data, &owner, false); let pubkey = *accounts[SYSTEM_PROGRAM_INDEX].key; let owner = *accounts[SYSTEM_PROGRAM_INDEX].owner; @@ -555,7 +555,7 @@ fn process_instruction<'a>( let data = unsafe { std::slice::from_raw_parts_mut(ptr, len) }; let mut lamports = accounts[SYSTEM_PROGRAM_INDEX].lamports(); let system_info = - AccountInfo::new(&pubkey, false, false, &mut lamports, data, &owner, true, 0); + AccountInfo::new(&pubkey, false, false, &mut lamports, data, &owner, true); let instruction = system_instruction::create_account( accounts[FROM_INDEX].key, @@ -652,12 +652,12 @@ fn process_instruction<'a>( TEST_NESTED_INVOKE_TOO_DEEP => { let _ = do_nested_invokes(5, accounts); } - TEST_NESTED_INVOKE_SIMD_0296_OK => { - // Test that 8 nested invokes succeed with SIMD-0296 enabled + TEST_NESTED_INVOKE_SIMD_0268_OK => { + // Test that 8 nested invokes succeed with SIMD-0268 enabled let _ = do_nested_invokes(8, accounts); } - TEST_NESTED_INVOKE_SIMD_0296_TOO_DEEP => { - // Test that 9 nested invokes fail even with SIMD-0296 enabled + TEST_NESTED_INVOKE_SIMD_0268_TOO_DEEP => { + // Test that 9 nested invokes fail even with SIMD-0268 enabled let _ = do_nested_invokes(9, accounts); } TEST_CALL_PRECOMPILE => { @@ -1379,7 +1379,7 @@ fn process_instruction<'a>( #[cfg(target_feature = "dynamic-frames")] // When we have dynamic frames, the stack grows from the higher addresses, so we // compare from zero until the beginning of a function frame. - { + unsafe { const ZEROED_BYTES_LENGTH: usize = (MAX_CALL_DEPTH - 2) * STACK_FRAME_SIZE; assert_eq!(sol_memcmp(stack, &ZEROS, ZEROED_BYTES_LENGTH), 0); stack[..ZEROED_BYTES_LENGTH].fill(42); diff --git a/programs/sbf/rust/invoke_dep/src/lib.rs b/programs/sbf/rust/invoke_dep/src/lib.rs index 685ac9c0ceb9f0..c965ef18660437 100644 --- a/programs/sbf/rust/invoke_dep/src/lib.rs +++ b/programs/sbf/rust/invoke_dep/src/lib.rs @@ -45,8 +45,8 @@ pub const TEST_STACK_HEAP_ZEROED: u8 = 42; pub const TEST_ACCOUNT_INFO_IN_ACCOUNT: u8 = 43; pub const TEST_ACCOUNT_INFO_LAMPORTS_RC: u8 = 44; pub const TEST_ACCOUNT_INFO_DATA_RC: u8 = 45; -pub const TEST_NESTED_INVOKE_SIMD_0296_OK: u8 = 46; -pub const TEST_NESTED_INVOKE_SIMD_0296_TOO_DEEP: u8 = 47; +pub const TEST_NESTED_INVOKE_SIMD_0268_OK: u8 = 46; +pub const TEST_NESTED_INVOKE_SIMD_0268_TOO_DEEP: u8 = 47; pub const MINT_INDEX: usize = 0; pub const ARGUMENT_INDEX: usize = 1; diff --git a/programs/sbf/rust/invoked/src/lib.rs b/programs/sbf/rust/invoked/src/lib.rs index 2d577803c9d153..1262ccc83a3e51 100644 --- a/programs/sbf/rust/invoked/src/lib.rs +++ b/programs/sbf/rust/invoked/src/lib.rs @@ -47,7 +47,6 @@ fn process_instruction( assert_eq!(accounts[ARGUMENT_INDEX].data_len(), 100); assert!(accounts[ARGUMENT_INDEX].is_signer); assert!(accounts[ARGUMENT_INDEX].is_writable); - assert_eq!(accounts[ARGUMENT_INDEX].rent_epoch, u64::MAX); assert!(!accounts[ARGUMENT_INDEX].executable); { let data = accounts[ARGUMENT_INDEX].try_borrow_data()?; @@ -64,14 +63,12 @@ fn process_instruction( assert_eq!(accounts[INVOKED_ARGUMENT_INDEX].data_len(), 10); assert!(accounts[INVOKED_ARGUMENT_INDEX].is_signer); assert!(accounts[INVOKED_ARGUMENT_INDEX].is_writable); - assert_eq!(accounts[INVOKED_ARGUMENT_INDEX].rent_epoch, u64::MAX); assert!(!accounts[INVOKED_ARGUMENT_INDEX].executable); assert_eq!(accounts[INVOKED_PROGRAM_INDEX].key, program_id); assert_eq!(accounts[INVOKED_PROGRAM_INDEX].owner, &loader_v4::id()); assert!(!accounts[INVOKED_PROGRAM_INDEX].is_signer); assert!(!accounts[INVOKED_PROGRAM_INDEX].is_writable); - assert_eq!(accounts[INVOKED_PROGRAM_INDEX].rent_epoch, u64::MAX); assert!(accounts[INVOKED_PROGRAM_INDEX].executable); assert_eq!( @@ -94,10 +91,13 @@ fn process_instruction( accounts[INVOKED_PROGRAM_INDEX].is_writable, accounts[INVOKED_PROGRAM_DUP_INDEX].is_writable ); - assert_eq!( - accounts[INVOKED_PROGRAM_INDEX].rent_epoch, - accounts[INVOKED_PROGRAM_DUP_INDEX].rent_epoch - ); + #[allow(deprecated)] + { + assert_eq!( + accounts[INVOKED_PROGRAM_INDEX]._unused, + accounts[INVOKED_PROGRAM_DUP_INDEX]._unused + ); + } assert_eq!( accounts[INVOKED_PROGRAM_INDEX].executable, accounts[INVOKED_PROGRAM_DUP_INDEX].executable diff --git a/programs/sbf/rust/mem/src/lib.rs b/programs/sbf/rust/mem/src/lib.rs index 0fb4aac820bd34..1c305469b4cf8a 100644 --- a/programs/sbf/rust/mem/src/lib.rs +++ b/programs/sbf/rust/mem/src/lib.rs @@ -18,16 +18,16 @@ pub fn process_instruction( #[derive(Default)] struct MemOpSyscalls(); impl MemOps for MemOpSyscalls { - fn memcpy(&self, dst: &mut [u8], src: &[u8], n: usize) { + unsafe fn memcpy(&self, dst: &mut [u8], src: &[u8], n: usize) { sol_memcpy(dst, src, n) } unsafe fn memmove(&self, dst: *mut u8, src: *mut u8, n: usize) { sol_memmove(dst, src, n) } - fn memset(&self, s: &mut [u8], c: u8, n: usize) { + unsafe fn memset(&self, s: &mut [u8], c: u8, n: usize) { sol_memset(s, c, n) } - fn memcmp(&self, s1: &[u8], s2: &[u8], n: usize) -> i32 { + unsafe fn memcmp(&self, s1: &[u8], s2: &[u8], n: usize) -> i32 { sol_memcmp(s1, s2, n) } } diff --git a/programs/sbf/rust/mem_dep/src/lib.rs b/programs/sbf/rust/mem_dep/src/lib.rs index 42624569d3726b..09bee16265b789 100644 --- a/programs/sbf/rust/mem_dep/src/lib.rs +++ b/programs/sbf/rust/mem_dep/src/lib.rs @@ -1,40 +1,45 @@ //! Test mem functions pub trait MemOps { - fn memcpy(&self, dst: &mut [u8], src: &[u8], n: usize); + /// # Safety + unsafe fn memcpy(&self, dst: &mut [u8], src: &[u8], n: usize); /// # Safety unsafe fn memmove(&self, dst: *mut u8, src: *mut u8, n: usize); - fn memset(&self, s: &mut [u8], c: u8, n: usize); - fn memcmp(&self, s1: &[u8], s2: &[u8], n: usize) -> i32; + /// # Safety + unsafe fn memset(&self, s: &mut [u8], c: u8, n: usize); + /// # Safety + unsafe fn memcmp(&self, s1: &[u8], s2: &[u8], n: usize) -> i32; } pub fn run_mem_tests(mem_ops: T) { // memcpy - let src = &[1_u8; 18]; - let dst = &mut [0_u8; 1]; - mem_ops.memcpy(dst, src, 1); - assert_eq!(&src[..1], dst); - let dst = &mut [0_u8; 3]; - mem_ops.memcpy(dst, src, 3); - assert_eq!(&src[..3], dst); - let dst = &mut [0_u8; 8]; - mem_ops.memcpy(dst, src, 8); - assert_eq!(&src[..8], dst); - let dst = &mut [0_u8; 9]; - mem_ops.memcpy(dst, src, 9); - assert_eq!(&src[..9], dst); - let dst = &mut [0_u8; 16]; - mem_ops.memcpy(dst, src, 16); - assert_eq!(&src[..16], dst); - let dst = &mut [0_u8; 18]; - mem_ops.memcpy(dst, src, 18); - assert_eq!(&src[..18], dst); - let dst = &mut [0_u8; 18]; - mem_ops.memcpy(dst, &src[1..], 17); - assert_eq!(&src[1..], &dst[..17]); - let dst = &mut [0_u8; 18]; - mem_ops.memcpy(&mut dst[1..], &src[1..], 17); - assert_eq!(&src[1..], &dst[1..]); + unsafe { + let src = &[1_u8; 18]; + let dst = &mut [0_u8; 1]; + mem_ops.memcpy(dst, src, 1); + assert_eq!(&src[..1], dst); + let dst = &mut [0_u8; 3]; + mem_ops.memcpy(dst, src, 3); + assert_eq!(&src[..3], dst); + let dst = &mut [0_u8; 8]; + mem_ops.memcpy(dst, src, 8); + assert_eq!(&src[..8], dst); + let dst = &mut [0_u8; 9]; + mem_ops.memcpy(dst, src, 9); + assert_eq!(&src[..9], dst); + let dst = &mut [0_u8; 16]; + mem_ops.memcpy(dst, src, 16); + assert_eq!(&src[..16], dst); + let dst = &mut [0_u8; 18]; + mem_ops.memcpy(dst, src, 18); + assert_eq!(&src[..18], dst); + let dst = &mut [0_u8; 18]; + mem_ops.memcpy(dst, &src[1..], 17); + assert_eq!(&src[1..], &dst[..17]); + let dst = &mut [0_u8; 18]; + mem_ops.memcpy(&mut dst[1..], &src[1..], 17); + assert_eq!(&src[1..], &dst[1..]); + } // memmove unsafe { @@ -107,56 +112,60 @@ pub fn run_mem_tests(mem_ops: T) { } // memset - let exp = &[1_u8; 18]; - let buf = &mut [0_u8; 18]; - mem_ops.memset(&mut buf[0..], 1, 1); - assert_eq!(exp[..1], buf[..1]); - mem_ops.memset(&mut buf[0..], 1, 3); - assert_eq!(exp[..3], buf[..3]); - mem_ops.memset(&mut buf[0..], 1, 8); - assert_eq!(exp[..8], buf[..8]); - mem_ops.memset(&mut buf[0..], 1, 9); - assert_eq!(exp[..9], buf[..9]); - mem_ops.memset(&mut buf[0..], 1, 16); - assert_eq!(exp[..16], buf[..16]); - mem_ops.memset(&mut buf[0..], 1, 18); - assert_eq!(exp[..18], buf[..18]); - mem_ops.memset(&mut buf[1..], 1, 17); - assert_eq!(exp[1..18], buf[1..18]); + unsafe { + let exp = &[1_u8; 18]; + let buf = &mut [0_u8; 18]; + mem_ops.memset(&mut buf[0..], 1, 1); + assert_eq!(exp[..1], buf[..1]); + mem_ops.memset(&mut buf[0..], 1, 3); + assert_eq!(exp[..3], buf[..3]); + mem_ops.memset(&mut buf[0..], 1, 8); + assert_eq!(exp[..8], buf[..8]); + mem_ops.memset(&mut buf[0..], 1, 9); + assert_eq!(exp[..9], buf[..9]); + mem_ops.memset(&mut buf[0..], 1, 16); + assert_eq!(exp[..16], buf[..16]); + mem_ops.memset(&mut buf[0..], 1, 18); + assert_eq!(exp[..18], buf[..18]); + mem_ops.memset(&mut buf[1..], 1, 17); + assert_eq!(exp[1..18], buf[1..18]); + } // memcmp - assert_eq!(-1, mem_ops.memcmp(&[0_u8], &[1_u8], 1)); - assert_eq!(-1, mem_ops.memcmp(&[0_u8, 0, 0], &[0_u8, 0, 1], 3)); - assert_eq!( - 0, - mem_ops.memcmp( - &[0_u8, 0, 0, 0, 0, 0, 0, 0, 0], - &[0_u8, 0, 0, 0, 0, 0, 0, 0, 0], - 9 - ) - ); - assert_eq!( - -1, - mem_ops.memcmp( - &[0_u8, 0, 0, 0, 0, 0, 0, 0, 0], - &[0_u8, 0, 0, 0, 0, 0, 0, 0, 1], - 9 - ) - ); - assert_eq!( - -1, - mem_ops.memcmp( - &[0_u8, 0, 0, 0, 0, 0, 0, 0, 0, 0], - &[0_u8, 0, 0, 0, 0, 0, 0, 0, 0, 1], - 10 - ) - ); - assert_eq!(0, mem_ops.memcmp(&[0_u8; 8], &[0_u8; 8], 8)); - assert_eq!(-1, mem_ops.memcmp(&[0_u8; 8], &[1_u8; 8], 8)); - assert_eq!(-1, mem_ops.memcmp(&[0_u8; 16], &[1_u8; 16], 16)); - assert_eq!(-1, mem_ops.memcmp(&[0_u8; 18], &[1_u8; 18], 18)); - let one = &[0_u8; 18]; - let two = &[1_u8; 18]; - assert_eq!(-1, mem_ops.memcmp(&one[1..], &two[0..], 17)); - assert_eq!(-1, mem_ops.memcmp(&one[1..], &two[1..], 17)); + unsafe { + assert_eq!(-1, mem_ops.memcmp(&[0_u8], &[1_u8], 1)); + assert_eq!(-1, mem_ops.memcmp(&[0_u8, 0, 0], &[0_u8, 0, 1], 3)); + assert_eq!( + 0, + mem_ops.memcmp( + &[0_u8, 0, 0, 0, 0, 0, 0, 0, 0], + &[0_u8, 0, 0, 0, 0, 0, 0, 0, 0], + 9 + ) + ); + assert_eq!( + -1, + mem_ops.memcmp( + &[0_u8, 0, 0, 0, 0, 0, 0, 0, 0], + &[0_u8, 0, 0, 0, 0, 0, 0, 0, 1], + 9 + ) + ); + assert_eq!( + -1, + mem_ops.memcmp( + &[0_u8, 0, 0, 0, 0, 0, 0, 0, 0, 0], + &[0_u8, 0, 0, 0, 0, 0, 0, 0, 0, 1], + 10 + ) + ); + assert_eq!(0, mem_ops.memcmp(&[0_u8; 8], &[0_u8; 8], 8)); + assert_eq!(-1, mem_ops.memcmp(&[0_u8; 8], &[1_u8; 8], 8)); + assert_eq!(-1, mem_ops.memcmp(&[0_u8; 16], &[1_u8; 16], 16)); + assert_eq!(-1, mem_ops.memcmp(&[0_u8; 18], &[1_u8; 18], 18)); + let one = &[0_u8; 18]; + let two = &[1_u8; 18]; + assert_eq!(-1, mem_ops.memcmp(&one[1..], &two[0..], 17)); + assert_eq!(-1, mem_ops.memcmp(&one[1..], &two[1..], 17)); + } } diff --git a/programs/sbf/rust/membuiltins/src/lib.rs b/programs/sbf/rust/membuiltins/src/lib.rs index da41a6634673a4..037f85c2df859a 100644 --- a/programs/sbf/rust/membuiltins/src/lib.rs +++ b/programs/sbf/rust/membuiltins/src/lib.rs @@ -14,7 +14,7 @@ pub extern "C" fn entrypoint(_input: *mut u8) -> u64 { #[derive(Default)] struct MemOpSyscalls(); impl MemOps for MemOpSyscalls { - fn memcpy(&self, dst: &mut [u8], src: &[u8], n: usize) { + unsafe fn memcpy(&self, dst: &mut [u8], src: &[u8], n: usize) { unsafe { compiler_builtins::mem::memcpy(dst.as_mut_ptr(), src.as_ptr(), n); } @@ -22,12 +22,12 @@ pub extern "C" fn entrypoint(_input: *mut u8) -> u64 { unsafe fn memmove(&self, dst: *mut u8, src: *mut u8, n: usize) { compiler_builtins::mem::memmove(dst, src, n); } - fn memset(&self, s: &mut [u8], c: u8, n: usize) { + unsafe fn memset(&self, s: &mut [u8], c: u8, n: usize) { unsafe { compiler_builtins::mem::memset(s.as_mut_ptr(), c as i32, n); } } - fn memcmp(&self, s1: &[u8], s2: &[u8], n: usize) -> i32 { + unsafe fn memcmp(&self, s1: &[u8], s2: &[u8], n: usize) -> i32 { unsafe { compiler_builtins::mem::memcmp(s1.as_ptr(), s2.as_ptr(), n) } } } diff --git a/programs/sbf/rust/r2_instruction_data_pointer/Cargo.toml b/programs/sbf/rust/r2_instruction_data_pointer/Cargo.toml new file mode 100644 index 00000000000000..ecc7184fc64ba8 --- /dev/null +++ b/programs/sbf/rust/r2_instruction_data_pointer/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "solana-sbf-rust-r2-instruction-data-pointer" +version = { workspace = true } +description = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[lib] +crate-type = ["cdylib"] + +[dependencies] +solana-cpi = { workspace = true } +solana-program-entrypoint = { workspace = true } + +[lints] +workspace = true diff --git a/programs/sbf/rust/r2_instruction_data_pointer/src/lib.rs b/programs/sbf/rust/r2_instruction_data_pointer/src/lib.rs new file mode 100644 index 00000000000000..1597d282c79263 --- /dev/null +++ b/programs/sbf/rust/r2_instruction_data_pointer/src/lib.rs @@ -0,0 +1,18 @@ +//! Test program that reads instruction data using the r2 register pointer. + +#![allow(clippy::arithmetic_side_effects)] +#![allow(clippy::missing_safety_doc)] + +#[no_mangle] +pub unsafe extern "C" fn entrypoint(_input: *mut u8, instruction_data_addr: *const u8) -> u64 { + let instruction_data_len = *((instruction_data_addr as u64 - 8) as *const u64); + let instruction_data = + core::slice::from_raw_parts(instruction_data_addr, instruction_data_len as usize); + + solana_cpi::set_return_data(instruction_data); + + solana_program_entrypoint::SUCCESS +} + +solana_program_entrypoint::custom_heap_default!(); +solana_program_entrypoint::custom_panic_default!(); diff --git a/programs/sbf/rust/secp256k1_recover/Cargo.toml b/programs/sbf/rust/secp256k1_recover/Cargo.toml index 6615cfb4b290cb..8760d9739132b7 100644 --- a/programs/sbf/rust/secp256k1_recover/Cargo.toml +++ b/programs/sbf/rust/secp256k1_recover/Cargo.toml @@ -13,7 +13,9 @@ crate-type = ["cdylib"] [dependencies] libsecp256k1 = { workspace = true } -solana-keccak-hasher = { workspace = true } +sha3 = { workspace = true } +solana-hash = { workspace = true } +solana-keccak-hasher = { workspace = true, features = ["sha3"] } solana-msg = { workspace = true } solana-program-entrypoint = { workspace = true } solana-secp256k1-recover = { workspace = true } diff --git a/programs/sbf/rust/secp256k1_recover/src/lib.rs b/programs/sbf/rust/secp256k1_recover/src/lib.rs index 1b749d5aef1565..b7f65f0a08b4a7 100644 --- a/programs/sbf/rust/secp256k1_recover/src/lib.rs +++ b/programs/sbf/rust/secp256k1_recover/src/lib.rs @@ -37,6 +37,14 @@ fn test_secp256k1_recover() { /// secp256k1_recover allows malleable signatures fn test_secp256k1_recover_malleability() { let message = b"hello world"; + #[cfg(target_os = "solana")] + let message_hash = { + use sha3::Digest; + let mut hasher = sha3::Keccak256::default(); + hasher.update(message); + solana_hash::Hash::new_from_array(hasher.finalize().into()) + }; + #[cfg(not(target_os = "solana"))] let message_hash = { let mut hasher = solana_keccak_hasher::Hasher::default(); hasher.hash(message); @@ -70,11 +78,15 @@ fn test_secp256k1_recover_malleability() { let alt_recovery_id = alt_recovery_id.serialize(); let recovered_pubkey = - secp256k1_recover(&message_hash.0, recovery_id, &signature_bytes[..]).unwrap(); + secp256k1_recover(message_hash.as_bytes(), recovery_id, &signature_bytes[..]).unwrap(); assert_eq!(recovered_pubkey.to_bytes(), pubkey_bytes); - let alt_recovered_pubkey = - secp256k1_recover(&message_hash.0, alt_recovery_id, &alt_signature_bytes[..]).unwrap(); + let alt_recovered_pubkey = secp256k1_recover( + message_hash.as_bytes(), + alt_recovery_id, + &alt_signature_bytes[..], + ) + .unwrap(); assert_eq!(alt_recovered_pubkey.to_bytes(), pubkey_bytes); } diff --git a/programs/sbf/rust/sha/Cargo.toml b/programs/sbf/rust/sha/Cargo.toml index 16fd6658852c2f..94876f0aa9d959 100644 --- a/programs/sbf/rust/sha/Cargo.toml +++ b/programs/sbf/rust/sha/Cargo.toml @@ -13,11 +13,14 @@ crate-type = ["cdylib"] [dependencies] blake3 = { workspace = true } -solana-blake3-hasher = { workspace = true } -solana-keccak-hasher = { workspace = true } +sha2 = { workspace = true } +sha3 = { workspace = true } +solana-blake3-hasher = { workspace = true, features = ["blake3"] } +solana-hash = { workspace = true } +solana-keccak-hasher = { workspace = true, features = ["sha3"] } solana-msg = { workspace = true } solana-program-entrypoint = { workspace = true } -solana-sha256-hasher = { workspace = true } +solana-sha256-hasher = { workspace = true, features = ["sha2"] } [lints] workspace = true diff --git a/programs/sbf/rust/sha/src/lib.rs b/programs/sbf/rust/sha/src/lib.rs index 3763969a13aa06..d622ebd3c03852 100644 --- a/programs/sbf/rust/sha/src/lib.rs +++ b/programs/sbf/rust/sha/src/lib.rs @@ -6,19 +6,45 @@ use { }; fn test_sha256_hasher() { - use solana_sha256_hasher::{hashv, Hasher}; + use solana_sha256_hasher::hashv; let vals = &["Gaggablaghblagh!".as_ref(), "flurbos".as_ref()]; - let mut hasher = Hasher::default(); - hasher.hashv(vals); - assert_eq!(hashv(vals), hasher.result()); + #[cfg(target_os = "solana")] + let hash = { + use sha2::Digest; + let mut hasher = sha2::Sha256::default(); + for val in vals { + hasher.update(val); + } + solana_hash::Hash::new_from_array(hasher.finalize().into()) + }; + #[cfg(not(target_os = "solana"))] + let hash = { + let mut hasher = solana_sha256_hasher::Hasher::default(); + hasher.hashv(vals); + hasher.result() + }; + assert_eq!(hashv(vals), hash); } fn test_keccak256_hasher() { - use solana_keccak_hasher::{hashv, Hasher}; + use solana_keccak_hasher::hashv; let vals = &["Gaggablaghblagh!".as_ref(), "flurbos".as_ref()]; - let mut hasher = Hasher::default(); - hasher.hashv(vals); - assert_eq!(hashv(vals), hasher.result()); + #[cfg(target_os = "solana")] + let hash = { + use sha3::Digest; + let mut hasher = sha3::Keccak256::default(); + for val in vals { + hasher.update(val); + } + solana_hash::Hash::new_from_array(hasher.finalize().into()) + }; + #[cfg(not(target_os = "solana"))] + let hash = { + let mut hasher = solana_keccak_hasher::Hasher::default(); + hasher.hashv(vals); + hasher.result() + }; + assert_eq!(hashv(vals), hash); } fn test_blake3_hasher() { @@ -27,7 +53,7 @@ fn test_blake3_hasher() { let v1: &[u8] = b"flurbos!"; let vals: &[&[u8]] = &[v0, v1]; let hash = blake3::hash(&[v0, v1].concat()); - assert_eq!(hashv(vals).0, *hash.as_bytes()); + assert_eq!(hashv(vals).as_bytes(), hash.as_bytes()); } #[no_mangle] diff --git a/programs/sbf/rust/simulation/src/lib.rs b/programs/sbf/rust/simulation/src/lib.rs index 8af47ee069d510..259b5a08c6c7a6 100644 --- a/programs/sbf/rust/simulation/src/lib.rs +++ b/programs/sbf/rust/simulation/src/lib.rs @@ -6,7 +6,7 @@ use { solana_msg::msg, solana_program_error::ProgramResult, solana_pubkey::{declare_id, Pubkey}, - solana_sysvar::Sysvar, + solana_sysvar::{Sysvar, SysvarSerialize}, std::convert::TryInto, }; diff --git a/programs/sbf/rust/sysvar/Cargo.toml b/programs/sbf/rust/sysvar/Cargo.toml index f7c9b9cd384a6e..b95f11ffcd1e3b 100644 --- a/programs/sbf/rust/sysvar/Cargo.toml +++ b/programs/sbf/rust/sysvar/Cargo.toml @@ -22,7 +22,8 @@ solana-program-entrypoint = { workspace = true } solana-program-error = { workspace = true } solana-pubkey = { workspace = true } solana-sdk-ids = { workspace = true } -solana-sysvar = { workspace = true } +solana-stake-interface = { workspace = true, features = ["sysvar"] } +solana-sysvar = { workspace = true, features = ["bincode"] } [lints] workspace = true diff --git a/programs/sbf/rust/sysvar/src/lib.rs b/programs/sbf/rust/sysvar/src/lib.rs index 9ed258ff7968e6..1994cdff6a1fb8 100644 --- a/programs/sbf/rust/sysvar/src/lib.rs +++ b/programs/sbf/rust/sysvar/src/lib.rs @@ -10,6 +10,10 @@ use { solana_program_error::{ProgramError, ProgramResult}, solana_pubkey::Pubkey, solana_sdk_ids::sysvar, + solana_stake_interface::{ + stake_history::{StakeHistory, StakeHistoryGetEntry}, + sysvar::stake_history::StakeHistorySysvar, + }, solana_sysvar::{ clock::Clock, epoch_rewards::EpochRewards, @@ -17,8 +21,7 @@ use { rent::Rent, slot_hashes::{PodSlotHashes, SlotHashes}, slot_history::SlotHistory, - stake_history::{StakeHistory, StakeHistoryGetEntry, StakeHistorySysvar}, - Sysvar, + Sysvar, SysvarSerialize, }, }; @@ -26,7 +29,7 @@ use { #[cfg(target_os = "solana")] fn sol_get_sysvar_handler(dst: &mut [u8], offset: u64, length: u64) -> Result<(), ProgramError> where - T: Sysvar, + T: SysvarSerialize, { let sysvar_id = &T::id() as *const _ as *const u8; let var_addr = dst as *mut _ as *mut u8; @@ -44,7 +47,7 @@ where // Double-helper arrangement is easier to write to a mutable slice. fn sol_get_sysvar() -> Result where - T: Sysvar, + T: SysvarSerialize, { #[cfg(target_os = "solana")] { @@ -206,6 +209,16 @@ pub fn process_instruction( Ok(()) } + Some(&4) => { + // Attempt to store the result in the input region instead of the stack or heap + unsafe { + solana_define_syscall::definitions::sol_get_epoch_rewards_sysvar( + accounts[2].data.borrow_mut().as_mut_ptr(), + ) + }; + + Ok(()) + } _ => Err(ProgramError::InvalidInstructionData), } } diff --git a/programs/sbf/tests/programs.rs b/programs/sbf/tests/programs.rs index 7ceaf621ac4bda..7ef4dea59e59b2 100644 --- a/programs/sbf/tests/programs.rs +++ b/programs/sbf/tests/programs.rs @@ -16,18 +16,18 @@ use { solana_account_info::MAX_PERMITTED_DATA_INCREASE, solana_client_traits::SyncClient, solana_clock::{UnixTimestamp, MAX_PROCESSING_AGE}, + solana_cluster_type::ClusterType, solana_compute_budget::compute_budget::ComputeBudget, solana_compute_budget_instruction::instructions_processor::process_compute_budget_instructions, solana_compute_budget_interface::ComputeBudgetInstruction, solana_fee_calculator::FeeRateGovernor, solana_fee_structure::{FeeBin, FeeBudgetLimits, FeeStructure}, - solana_genesis_config::ClusterType, solana_hash::Hash, solana_instruction::{error::InstructionError, AccountMeta, Instruction}, solana_keypair::Keypair, solana_loader_v3_interface::instruction as loader_v3_instruction, solana_loader_v4_interface::instruction as loader_v4_instruction, - solana_message::{Message, SanitizedMessage}, + solana_message::{inner_instruction::InnerInstruction, Message, SanitizedMessage}, solana_program_runtime::invoke_context::mock_process_instruction, solana_pubkey::Pubkey, solana_rent::Rent, @@ -52,18 +52,16 @@ use { solana_sdk_ids::sysvar::{self as sysvar, clock}, solana_sdk_ids::{bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, loader_v4}, solana_signer::Signer, - solana_stake_interface as stake, solana_svm::{ transaction_commit_result::{CommittedTransaction, TransactionCommitResult}, - transaction_execution_result::InnerInstruction, transaction_processor::ExecutionRecordingConfig, }, + solana_svm_timings::ExecuteTimings, solana_svm_transaction::svm_message::SVMMessage, + solana_svm_type_overrides::rand, solana_system_interface::{program as system_program, MAX_PERMITTED_DATA_LENGTH}, - solana_timings::ExecuteTimings, solana_transaction::Transaction, solana_transaction_error::TransactionError, - solana_type_overrides::rand, std::{ assert_eq, cell::RefCell, @@ -71,6 +69,7 @@ use { sync::{Arc, RwLock}, time::Duration, }, + test_case::test_matrix, }; #[cfg(feature = "sbf_rust")] @@ -302,9 +301,8 @@ fn test_program_sbf_loader_deprecated() { #[test] #[cfg(feature = "sbf_rust")] -#[should_panic( - expected = "called `Result::unwrap()` on an `Err` value: TransactionError(InstructionError(0, InvalidAccountData))" -)] +#[should_panic(expected = "called `Result::unwrap()` on an `Err` value: \ + TransactionError(InstructionError(0, InvalidAccountData))")] fn test_sol_alloc_free_no_longer_deployable_with_upgradeable_loader() { solana_logger::setup(); @@ -849,7 +847,7 @@ fn test_program_sbf_invoke_sanity() { &bank, ); - // With SIMD-0296 enabled, eight nested invokes should pass. + // With SIMD-0268 enabled, eight nested invokes should pass. let bank = bank_with_feature_activated( &bank_forks, bank, @@ -867,7 +865,7 @@ fn test_program_sbf_invoke_sanity() { bank.store_account(&invoked_argument_keypair.pubkey(), &account); } do_invoke_success( - TEST_NESTED_INVOKE_SIMD_0296_OK, + TEST_NESTED_INVOKE_SIMD_0268_OK, &[], &[invoked_program_id.clone(); 16], // 16, 8 for each invoke &bank, @@ -999,7 +997,10 @@ fn test_program_sbf_invoke_sanity() { format!("Program log: invoke {program_lang} program"), "Program log: Test max instruction data len exceeded".into(), "skip".into(), // don't compare compute consumption logs - format!("Program {invoke_program_id} failed: Invoked an instruction with data that is too large (10241 > 10240)"), + format!( + "Program {invoke_program_id} failed: Invoked an instruction with data that is \ + too large (10241 > 10240)" + ), ]), &bank, ); @@ -1013,7 +1014,10 @@ fn test_program_sbf_invoke_sanity() { format!("Program log: invoke {program_lang} program"), "Program log: Test max instruction accounts exceeded".into(), "skip".into(), // don't compare compute consumption logs - format!("Program {invoke_program_id} failed: Invoked an instruction with too many accounts (256 > 255)"), + format!( + "Program {invoke_program_id} failed: Invoked an instruction with too many \ + accounts (256 > 255)" + ), ]), &bank, ); @@ -1027,7 +1031,10 @@ fn test_program_sbf_invoke_sanity() { format!("Program log: invoke {program_lang} program"), "Program log: Test max account infos exceeded".into(), "skip".into(), // don't compare compute consumption logs - format!("Program {invoke_program_id} failed: Invoked an instruction with too many account info's (129 > 128)"), + format!( + "Program {invoke_program_id} failed: Invoked an instruction with too many \ + account info's (129 > 128)" + ), ]), &bank, ); @@ -1065,7 +1072,7 @@ fn test_program_sbf_invoke_sanity() { &bank, ); - // With SIMD-0296 disabled, five nested invokes is too deep. + // With SIMD-0268 disabled, five nested invokes is too deep. let bank = bank_with_feature_deactivated( &bank_forks, bank, @@ -1088,7 +1095,7 @@ fn test_program_sbf_invoke_sanity() { &bank, ); - // With SIMD-0296 enabled, nine nested invokes is too deep. + // With SIMD-0268 enabled, nine nested invokes is too deep. let bank = bank_with_feature_activated( &bank_forks, bank, @@ -1098,7 +1105,7 @@ fn test_program_sbf_invoke_sanity() { .feature_set .is_active(&feature_set::raise_cpi_nesting_limit_to_8::id())); do_invoke_failure_test_local( - TEST_NESTED_INVOKE_SIMD_0296_TOO_DEEP, + TEST_NESTED_INVOKE_SIMD_0268_TOO_DEEP, TransactionError::InstructionError(0, InstructionError::CallDepth), &[ invoked_program_id.clone(), @@ -1428,13 +1435,13 @@ fn assert_instruction_count() { ("alloc", 18572), ("sbf_to_sbf", 316), ("multiple_static", 210), - ("noop", 6), - ("noop++", 6), + ("noop", 5), + ("noop++", 5), ("relative_call", 212), ("return_data", 1026), - ("sanity", 2374), - ("sanity++", 2274), - ("secp256k1_recover", 25422), + ("sanity", 2371), + ("sanity++", 2271), + ("secp256k1_recover", 25421), ("sha", 1446), ("struct_pass", 108), ("struct_ret", 122), @@ -1445,18 +1452,18 @@ fn assert_instruction_count() { programs.extend_from_slice(&[ ("solana_sbf_rust_128bit", 801), ("solana_sbf_rust_alloc", 4983), - ("solana_sbf_rust_custom_heap", 303), - ("solana_sbf_rust_dep_crate", 3), + ("solana_sbf_rust_custom_heap", 339), + ("solana_sbf_rust_dep_crate", 22), ("solana_sbf_rust_iter", 1414), ("solana_sbf_rust_many_args", 1287), - ("solana_sbf_rust_mem", 1298), - ("solana_sbf_rust_membuiltins", 330), - ("solana_sbf_rust_noop", 313), + ("solana_sbf_rust_mem", 1322), + ("solana_sbf_rust_membuiltins", 329), + ("solana_sbf_rust_noop", 334), ("solana_sbf_rust_param_passing", 109), - ("solana_sbf_rust_rand", 276), - ("solana_sbf_rust_sanity", 18116), - ("solana_sbf_rust_secp256k1_recover", 89274), - ("solana_sbf_rust_sha", 22811), + ("solana_sbf_rust_rand", 312), + ("solana_sbf_rust_sanity", 17902), + ("solana_sbf_rust_secp256k1_recover", 88670), + ("solana_sbf_rust_sha", 22175), ]); } @@ -1485,7 +1492,7 @@ fn assert_instruction_count() { print!(" {:36} {:8}", program_name, *expected_consumption); mock_process_instruction( &loader_id, - vec![0], + Some(0), &[], transaction_accounts, instruction_accounts, @@ -1572,6 +1579,71 @@ fn test_program_sbf_instruction_introspection() { assert!(bank.get_account(&sysvar::instructions::id()).is_none()); } +#[test_matrix( + [0, 1, 2, 5, 10, 15, 20], + [1, 10, 50, 100, 255, 500, 1000, 1024] // MAX_RETURN_DATA = 1024 +)] +#[allow(clippy::arithmetic_side_effects)] +#[cfg(feature = "sbf_rust")] +fn test_program_sbf_r2_instruction_data_pointer(num_accounts: usize, input_data_len: usize) { + solana_logger::setup(); + + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(50); + + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let mut bank_client = BankClient::new_shared(bank.clone()); + let authority_keypair = Keypair::new(); + + let (bank, program_id) = load_program_of_loader_v4( + &mut bank_client, + &bank_forks, + &mint_keypair, + &authority_keypair, + "solana_sbf_rust_r2_instruction_data_pointer", + ); + + let mut account_metas = Vec::new(); + + for i in 0..num_accounts { + let pubkey = Pubkey::new_unique(); + + // Mixed account sizes. + bank.store_account( + &pubkey, + &AccountSharedData::new(0, 100 + (i * 50), &program_id), + ); + + // Mixed account roles. + if i % 2 == 0 { + account_metas.push(AccountMeta::new(pubkey, false)); + } else { + account_metas.push(AccountMeta::new_readonly(pubkey, false)); + } + } + + bank.freeze(); + + // The provided instruction data will be set to the return data. + let input_data: Vec = (0..input_data_len).map(|i| (i % 256) as u8).collect(); + + let instruction = Instruction::new_with_bytes(program_id, &input_data, account_metas); + + let blockhash = bank.last_blockhash(); + let message = Message::new(&[instruction], Some(&mint_keypair.pubkey())); + let transaction = Transaction::new(&[&mint_keypair], message, blockhash); + let sanitized_tx = RuntimeTransaction::from_transaction_for_tests(transaction); + + let result = bank.simulate_transaction(&sanitized_tx, false); + assert!(result.result.is_ok()); + + let return_data = result.return_data.unwrap().data; + assert_eq!(input_data, return_data); +} + fn get_stable_genesis_config() -> GenesisConfigInfo { let validator_pubkey = Pubkey::from_str("GLh546CXmtZdvpEzL8sxzqhhUf7KPvmGaRpFHB5W1sjV").unwrap(); @@ -1857,7 +1929,7 @@ fn test_program_sbf_invoke_in_same_tx_as_deployment() { // Asserting the instruction number as an upper bound, since the quantity of // instructions depends on the program size, which in turn depends on the SBPF // versions. - assert!(instr_no <= 41); + assert!(instr_no <= 40); assert_eq!(ty, InstructionError::UnsupportedProgramId); } else { panic!("Invalid error type"); @@ -3667,34 +3739,6 @@ fn test_program_fees() { assert_eq!(pre_balance - post_balance, expected_prioritized_fee); } -#[test] -#[cfg(feature = "sbf_rust")] -fn test_get_minimum_delegation() { - let GenesisConfigInfo { - genesis_config, - mint_keypair, - .. - } = create_genesis_config(100_123_456_789); - - let bank = Bank::new_for_tests(&genesis_config); - let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); - let mut bank_client = BankClient::new_shared(bank.clone()); - let authority_keypair = Keypair::new(); - - let (_bank, program_id) = load_program_of_loader_v4( - &mut bank_client, - &bank_forks, - &mint_keypair, - &authority_keypair, - "solana_sbf_rust_get_minimum_delegation", - ); - - let account_metas = vec![AccountMeta::new_readonly(stake::program::id(), false)]; - let instruction = Instruction::new_with_bytes(program_id, &[], account_metas); - let result = bank_client.send_and_confirm_instruction(&mint_keypair, instruction); - assert!(result.is_ok()); -} - #[test] #[cfg(feature = "sbf_rust")] fn test_program_sbf_inner_instruction_alignment_checks() { @@ -5017,7 +5061,10 @@ fn test_clone_account_data() { let tx = Transaction::new(&[&mint_keypair], message.clone(), bank.last_blockhash()); let (result, _, logs, _) = process_transaction_and_record_inner(&bank, tx); assert!(result.is_err(), "{result:?}"); - let error = format!("Program {invoke_program_id} failed: instruction modified data of an account it does not own"); + let error = format!( + "Program {invoke_program_id} failed: instruction modified data of an account it does not \ + own" + ); assert!(logs.iter().any(|log| log.contains(&error)), "{logs:?}"); // II. clone data, modify and then CPI @@ -5047,7 +5094,10 @@ fn test_clone_account_data() { let tx = Transaction::new(&[&mint_keypair], message.clone(), bank.last_blockhash()); let (result, _, logs, _) = process_transaction_and_record_inner(&bank, tx); assert!(result.is_err(), "{result:?}"); - let error = format!("Program {invoke_program_id} failed: instruction modified data of an account it does not own"); + let error = format!( + "Program {invoke_program_id} failed: instruction modified data of an account it does not \ + own" + ); assert!(logs.iter().any(|log| log.contains(&error)), "{logs:?}"); // II. Clone data, call, modifiy in callee and then make the same change in the caller - transaction succeeds @@ -5344,7 +5394,11 @@ fn test_mem_syscalls_overlap_account_begin_or_end() { bank.store_account(&account_keypair.pubkey(), &account); for instr in 0..=15 { - println!("Testing deprecated:{deprecated} stricter_abi_and_runtime_constraints:{stricter_abi_and_runtime_constraints} instruction:{instr}"); + println!( + "Testing deprecated:{deprecated} \ + stricter_abi_and_runtime_constraints:{stricter_abi_and_runtime_constraints} \ + instruction:{instr}" + ); let instruction = Instruction::new_with_bytes(program_id, &[instr], account_metas.clone()); @@ -5364,7 +5418,11 @@ fn test_mem_syscalls_overlap_account_begin_or_end() { bank.store_account(&account_keypair.pubkey(), &account); for instr in 0..=15 { - println!("Testing deprecated:{deprecated} stricter_abi_and_runtime_constraints:{stricter_abi_and_runtime_constraints} instruction:{instr} zero-length account"); + println!( + "Testing deprecated:{deprecated} \ + stricter_abi_and_runtime_constraints:{stricter_abi_and_runtime_constraints} \ + instruction:{instr} zero-length account" + ); let instruction = Instruction::new_with_bytes(program_id, &[instr, 0], account_metas.clone()); diff --git a/programs/sbf/tests/simulation.rs b/programs/sbf/tests/simulation.rs index c82fc0cd0254a2..8ebd543a1e87e3 100644 --- a/programs/sbf/tests/simulation.rs +++ b/programs/sbf/tests/simulation.rs @@ -88,8 +88,8 @@ fn test_no_panic_rpc_client() { match rpc_client.send_and_confirm_transaction(&transaction) { Ok(_) => break, Err(e) => { - if !format!("{:?}", e).contains("Program is not deployed") { - panic!("Unexpected error: {:?}", e); + if !format!("{e:?}").contains("Program is not deployed") { + panic!("Unexpected error: {e:?}"); } attempt += 1; if attempt > MAX_ATTEMPTS { diff --git a/programs/sbf/tests/sysvar.rs b/programs/sbf/tests/sysvar.rs index 484ecb489fd448..6728e2599492c7 100644 --- a/programs/sbf/tests/sysvar.rs +++ b/programs/sbf/tests/sysvar.rs @@ -18,10 +18,8 @@ use { stake_history, }, solana_signer::Signer, - solana_sysvar::{ - epoch_rewards, - stake_history::{StakeHistory, StakeHistoryEntry}, - }, + solana_stake_interface::stake_history::{StakeHistory, StakeHistoryEntry}, + solana_sysvar::epoch_rewards, solana_transaction::Transaction, }; @@ -71,7 +69,13 @@ fn test_sysvar_syscalls() { &authority_keypair, "solana_sbf_rust_sysvar", ); + let dummy_account_key = Pubkey::new_unique(); + bank.store_account( + &dummy_account_key, + &solana_account::AccountSharedData::new(1, 32, &program_id), + ); bank.freeze(); + let blockhash = bank.last_blockhash(); for ix_discriminator in 0..4 { let instruction = Instruction::new_with_bincode( @@ -79,7 +83,7 @@ fn test_sysvar_syscalls() { &[ix_discriminator], vec![ AccountMeta::new(mint_keypair.pubkey(), true), - AccountMeta::new(Pubkey::new_unique(), false), + AccountMeta::new(dummy_account_key, false), AccountMeta::new_readonly(clock::id(), false), AccountMeta::new_readonly(epoch_schedule::id(), false), AccountMeta::new_readonly(instructions::id(), false), @@ -92,11 +96,27 @@ fn test_sysvar_syscalls() { AccountMeta::new_readonly(epoch_rewards::id(), false), ], ); - let blockhash = bank.last_blockhash(); let message = Message::new(&[instruction], Some(&mint_keypair.pubkey())); let transaction = Transaction::new(&[&mint_keypair], message, blockhash); let sanitized_tx = RuntimeTransaction::from_transaction_for_tests(transaction); let result = bank.simulate_transaction(&sanitized_tx, false); assert!(result.result.is_ok()); } + + // Storing the result of get_sysvar() in the input region is not allowed + // because of the 16 byte alignment requirement of the EpochRewards sysvar. + let instruction = Instruction::new_with_bincode( + program_id, + &[4], + vec![ + AccountMeta::new(mint_keypair.pubkey(), true), + AccountMeta::new_readonly(epoch_rewards::id(), false), + AccountMeta::new(dummy_account_key, false), + ], + ); + let message = Message::new(&[instruction], Some(&mint_keypair.pubkey())); + let transaction = Transaction::new(&[&mint_keypair], message, blockhash); + let sanitized_tx = RuntimeTransaction::from_transaction_for_tests(transaction); + let result = bank.simulate_transaction(&sanitized_tx, false); + assert!(result.result.is_err()); } diff --git a/programs/stake-tests/Cargo.toml b/programs/stake-tests/Cargo.toml deleted file mode 100644 index a37e91f80a0a7c..00000000000000 --- a/programs/stake-tests/Cargo.toml +++ /dev/null @@ -1,34 +0,0 @@ -# This package only exists to avoid circular dependencies during cargo publish: -# solana-program-test <--> solana-stake-program - -[package] -name = "solana-stake-program-tests" -publish = false -version = { workspace = true } -authors = { workspace = true } -repository = { workspace = true } -homepage = { workspace = true } -license = { workspace = true } -edition = { workspace = true } - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dev-dependencies] -agave-feature-set = { workspace = true } -assert_matches = { workspace = true } -bincode = { workspace = true } -solana-account = { workspace = true } -solana-instruction = { workspace = true } -solana-keypair = { workspace = true } -solana-program-error = { workspace = true } -solana-program-test = { workspace = true } -solana-pubkey = { workspace = true } -solana-signer = { workspace = true } -solana-stake-interface = { workspace = true } -solana-system-interface = { workspace = true } -solana-sysvar = { workspace = true } -solana-transaction = { workspace = true } -solana-transaction-error = { workspace = true } -solana-vote-program = { workspace = true } -test-case = { workspace = true } diff --git a/programs/stake-tests/tests/test_move_stake_and_lamports.rs b/programs/stake-tests/tests/test_move_stake_and_lamports.rs deleted file mode 100644 index 7b268ea77e7241..00000000000000 --- a/programs/stake-tests/tests/test_move_stake_and_lamports.rs +++ /dev/null @@ -1,1221 +0,0 @@ -#![allow(clippy::arithmetic_side_effects)] - -// NOTE this is temporarily ported from the bpf stake program repo so MoveStake and MoveLamports can be tested comprehensively -// in the future we will either port *all* instruction tests from bpf stake program and remove existing stakeinstruction tests -// or we will develop a text fixture system that allows fuzzing and obsoletes both existing test suites -// in other words the utility functions in this file should not be broken out into modules or used elsewhere - -use { - agave_feature_set::stake_raise_minimum_delegation_to_1_sol, - solana_account::Account as SolanaAccount, - solana_instruction::Instruction, - solana_keypair::Keypair, - solana_program_error::{ProgramError, ProgramResult}, - solana_program_test::*, - solana_pubkey::Pubkey, - solana_signer::{signers::Signers, Signer}, - solana_stake_interface::{ - self as stake, - error::StakeError, - instruction as ixn, program as stake_program, - state::{Authorized, Lockup, Meta, Stake, StakeStateV2}, - }, - solana_system_interface::{instruction as system_instruction, program as system_program}, - solana_sysvar::{clock::Clock, stake_history::StakeHistory}, - solana_transaction::Transaction, - solana_transaction_error::TransactionError, - solana_vote_program::{ - self, vote_instruction, - vote_state::{VoteInit, VoteStateV3, VoteStateVersions}, - }, - test_case::test_matrix, -}; - -const NO_SIGNERS: &[Keypair] = &[]; - -fn program_test() -> ProgramTest { - program_test_without_features(&[]) -} - -fn program_test_without_features(feature_ids: &[Pubkey]) -> ProgramTest { - let mut program_test = ProgramTest::default(); - for feature_id in feature_ids { - program_test.deactivate_feature(*feature_id); - } - - program_test -} - -#[derive(Debug, PartialEq)] -struct Accounts { - validator: Keypair, - voter: Keypair, - withdrawer: Keypair, - vote_account: Keypair, -} - -impl Accounts { - async fn initialize(&self, context: &mut ProgramTestContext) { - let slot = context.genesis_config().epoch_schedule.first_normal_slot + 1; - context.warp_to_slot(slot).unwrap(); - - create_vote( - context, - &self.validator, - &self.voter.pubkey(), - &self.withdrawer.pubkey(), - &self.vote_account, - ) - .await; - } -} - -impl Default for Accounts { - fn default() -> Self { - Self { - validator: Keypair::new(), - voter: Keypair::new(), - withdrawer: Keypair::new(), - vote_account: Keypair::new(), - } - } -} - -async fn create_vote( - context: &mut ProgramTestContext, - validator: &Keypair, - voter: &Pubkey, - withdrawer: &Pubkey, - vote_account: &Keypair, -) { - let rent = context.banks_client.get_rent().await.unwrap(); - let rent_voter = rent.minimum_balance(VoteStateV3::size_of()); - - let mut instructions = vec![system_instruction::create_account( - &context.payer.pubkey(), - &validator.pubkey(), - rent.minimum_balance(0), - 0, - &system_program::id(), - )]; - instructions.append(&mut vote_instruction::create_account_with_config( - &context.payer.pubkey(), - &vote_account.pubkey(), - &VoteInit { - node_pubkey: validator.pubkey(), - authorized_voter: *voter, - authorized_withdrawer: *withdrawer, - ..VoteInit::default() - }, - rent_voter, - vote_instruction::CreateVoteAccountConfig { - space: VoteStateVersions::vote_state_size_of(true) as u64, - ..Default::default() - }, - )); - - let transaction = Transaction::new_signed_with_payer( - &instructions, - Some(&context.payer.pubkey()), - &[validator, vote_account, &context.payer], - context.last_blockhash, - ); - - // ignore errors for idempotency - let _ = context.banks_client.process_transaction(transaction).await; -} - -async fn transfer(context: &mut ProgramTestContext, recipient: &Pubkey, amount: u64) { - let transaction = Transaction::new_signed_with_payer( - &[system_instruction::transfer( - &context.payer.pubkey(), - recipient, - amount, - )], - Some(&context.payer.pubkey()), - &[&context.payer], - context.last_blockhash, - ); - context - .banks_client - .process_transaction(transaction) - .await - .unwrap(); -} - -async fn advance_epoch(context: &mut ProgramTestContext) { - refresh_blockhash(context).await; - - let root_slot = context.banks_client.get_root_slot().await.unwrap(); - let slots_per_epoch = context.genesis_config().epoch_schedule.slots_per_epoch; - context.warp_to_slot(root_slot + slots_per_epoch).unwrap(); -} - -async fn refresh_blockhash(context: &mut ProgramTestContext) { - context.last_blockhash = context - .banks_client - .get_new_latest_blockhash(&context.last_blockhash) - .await - .unwrap(); -} - -async fn get_account(banks_client: &mut BanksClient, pubkey: &Pubkey) -> SolanaAccount { - banks_client - .get_account(*pubkey) - .await - .expect("client error") - .expect("account not found") -} - -async fn get_stake_account( - banks_client: &mut BanksClient, - pubkey: &Pubkey, -) -> (Meta, Option, u64) { - let stake_account = get_account(banks_client, pubkey).await; - let lamports = stake_account.lamports; - match bincode::deserialize::(&stake_account.data).unwrap() { - StakeStateV2::Initialized(meta) => (meta, None, lamports), - StakeStateV2::Stake(meta, stake, _) => (meta, Some(stake), lamports), - StakeStateV2::Uninitialized => panic!("panic: uninitialized"), - _ => unimplemented!(), - } -} - -async fn get_stake_account_rent(banks_client: &mut BanksClient) -> u64 { - let rent = banks_client.get_rent().await.unwrap(); - rent.minimum_balance(std::mem::size_of::()) -} - -async fn get_effective_stake(banks_client: &mut BanksClient, pubkey: &Pubkey) -> u64 { - let clock = banks_client.get_sysvar::().await.unwrap(); - let stake_history = banks_client.get_sysvar::().await.unwrap(); - let stake_account = get_account(banks_client, pubkey).await; - match bincode::deserialize::(&stake_account.data).unwrap() { - StakeStateV2::Stake(_, stake, _) => { - stake - .delegation - .stake_activating_and_deactivating(clock.epoch, &stake_history, Some(0)) - .effective - } - _ => 0, - } -} - -async fn get_minimum_delegation(context: &mut ProgramTestContext) -> u64 { - let transaction = Transaction::new_signed_with_payer( - &[stake::instruction::get_minimum_delegation()], - Some(&context.payer.pubkey()), - &[&context.payer], - context.last_blockhash, - ); - let mut data = context - .banks_client - .simulate_transaction(transaction) - .await - .unwrap() - .simulation_details - .unwrap() - .return_data - .unwrap() - .data; - data.resize(8, 0); - - data.try_into().map(u64::from_le_bytes).unwrap() -} - -async fn create_blank_stake_account_from_keypair( - context: &mut ProgramTestContext, - stake: &Keypair, -) -> Pubkey { - let lamports = get_stake_account_rent(&mut context.banks_client).await; - - let transaction = Transaction::new_signed_with_payer( - &[system_instruction::create_account( - &context.payer.pubkey(), - &stake.pubkey(), - lamports, - StakeStateV2::size_of() as u64, - &stake_program::id(), - )], - Some(&context.payer.pubkey()), - &[&context.payer, stake], - context.last_blockhash, - ); - - context - .banks_client - .process_transaction(transaction) - .await - .unwrap(); - - stake.pubkey() -} - -async fn process_instruction( - context: &mut ProgramTestContext, - instruction: &Instruction, - additional_signers: &T, -) -> ProgramResult { - let mut transaction = - Transaction::new_with_payer(&[instruction.clone()], Some(&context.payer.pubkey())); - - transaction.partial_sign(&[&context.payer], context.last_blockhash); - transaction.sign(additional_signers, context.last_blockhash); - - match context.banks_client.process_transaction(transaction).await { - Ok(_) => Ok(()), - Err(e) => { - // banks client error -> transaction error -> instruction error -> program error - match e.unwrap() { - TransactionError::InstructionError(_, e) => Err(e.try_into().unwrap()), - TransactionError::InsufficientFundsForRent { .. } => { - Err(ProgramError::InsufficientFunds) - } - _ => panic!("couldnt convert {e:?} to ProgramError"), - } - } - } -} - -async fn test_instruction_with_missing_signers( - context: &mut ProgramTestContext, - instruction: &Instruction, - additional_signers: &Vec<&Keypair>, -) { - // remove every signer one by one and ensure we always fail - for i in 0..instruction.accounts.len() { - if instruction.accounts[i].is_signer { - let mut instruction = instruction.clone(); - instruction.accounts[i].is_signer = false; - let reduced_signers: Vec<_> = additional_signers - .iter() - .filter(|s| s.pubkey() != instruction.accounts[i].pubkey) - .collect(); - - let e = process_instruction(context, &instruction, &reduced_signers) - .await - .unwrap_err(); - assert_eq!(e, ProgramError::MissingRequiredSignature); - } - } - - // now make sure the instruction succeeds - process_instruction(context, instruction, additional_signers) - .await - .unwrap(); -} - -#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] -enum StakeLifecycle { - Uninitialized = 0, - Initialized, - Activating, - Active, - Deactivating, - Deactive, -} -impl StakeLifecycle { - // (stake, staker, withdrawer) - async fn new_stake_account( - self, - context: &mut ProgramTestContext, - vote_account: &Pubkey, - staked_amount: u64, - ) -> (Keypair, Keypair, Keypair) { - let stake_keypair = Keypair::new(); - let staker_keypair = Keypair::new(); - let withdrawer_keypair = Keypair::new(); - - self.new_stake_account_fully_specified( - context, - vote_account, - staked_amount, - &stake_keypair, - &staker_keypair, - &withdrawer_keypair, - &Lockup::default(), - ) - .await; - - (stake_keypair, staker_keypair, withdrawer_keypair) - } - - #[allow(clippy::too_many_arguments)] - async fn new_stake_account_fully_specified( - self, - context: &mut ProgramTestContext, - vote_account: &Pubkey, - staked_amount: u64, - stake_keypair: &Keypair, - staker_keypair: &Keypair, - withdrawer_keypair: &Keypair, - lockup: &Lockup, - ) { - let authorized = Authorized { - staker: staker_keypair.pubkey(), - withdrawer: withdrawer_keypair.pubkey(), - }; - - let stake = create_blank_stake_account_from_keypair(context, stake_keypair).await; - if staked_amount > 0 { - transfer(context, &stake, staked_amount).await; - } - - if self >= StakeLifecycle::Initialized { - let instruction = ixn::initialize(&stake, &authorized, lockup); - process_instruction(context, &instruction, NO_SIGNERS) - .await - .unwrap(); - } - - if self >= StakeLifecycle::Activating { - let instruction = ixn::delegate_stake(&stake, &staker_keypair.pubkey(), vote_account); - process_instruction(context, &instruction, &vec![staker_keypair]) - .await - .unwrap(); - } - - if self >= StakeLifecycle::Active { - advance_epoch(context).await; - assert_eq!( - get_effective_stake(&mut context.banks_client, &stake).await, - staked_amount, - ); - } - - if self >= StakeLifecycle::Deactivating { - let instruction = ixn::deactivate_stake(&stake, &staker_keypair.pubkey()); - process_instruction(context, &instruction, &vec![staker_keypair]) - .await - .unwrap(); - } - - if self == StakeLifecycle::Deactive { - advance_epoch(context).await; - assert_eq!( - get_effective_stake(&mut context.banks_client, &stake).await, - 0, - ); - } - } -} - -#[test_matrix( - [program_test(), program_test_without_features(&[stake_raise_minimum_delegation_to_1_sol::id()])], - [StakeLifecycle::Initialized, StakeLifecycle::Activating, StakeLifecycle::Active, - StakeLifecycle::Deactivating, StakeLifecycle::Deactive], - [StakeLifecycle::Initialized, StakeLifecycle::Activating, StakeLifecycle::Active, - StakeLifecycle::Deactivating, StakeLifecycle::Deactive], - [false, true], - [false, true] -)] -#[tokio::test] -async fn test_move_stake( - program_test: ProgramTest, - move_source_type: StakeLifecycle, - move_dest_type: StakeLifecycle, - full_move: bool, - has_lockup: bool, -) { - let mut context = program_test.start_with_context().await; - let accounts = Accounts::default(); - accounts.initialize(&mut context).await; - - let rent_exempt_reserve = get_stake_account_rent(&mut context.banks_client).await; - let minimum_delegation = get_minimum_delegation(&mut context).await; - - // source has 2x minimum so we can easily test an unfunded destination - let source_staked_amount = minimum_delegation * 2; - - // this is the amount of *staked* lamports for test checks - // destinations may have excess lamports but these are *never* activated by move - let dest_staked_amount = if move_dest_type == StakeLifecycle::Active { - minimum_delegation - } else { - 0 - }; - - // test with and without lockup. both of these cases pass, we test failures elsewhere - let lockup = if has_lockup { - let clock = context.banks_client.get_sysvar::().await.unwrap(); - let lockup = Lockup { - unix_timestamp: 0, - epoch: clock.epoch + 100, - custodian: Pubkey::new_unique(), - }; - - assert!(lockup.is_in_force(&clock, None)); - lockup - } else { - Lockup::default() - }; - - // we put an extra minimum in every account, unstaked, to test that no new lamports activate - // name them here so our asserts are readable - let source_excess = minimum_delegation; - let dest_excess = minimum_delegation; - - let move_source_keypair = Keypair::new(); - let move_dest_keypair = Keypair::new(); - let staker_keypair = Keypair::new(); - let withdrawer_keypair = Keypair::new(); - - // create source stake - move_source_type - .new_stake_account_fully_specified( - &mut context, - &accounts.vote_account.pubkey(), - source_staked_amount, - &move_source_keypair, - &staker_keypair, - &withdrawer_keypair, - &lockup, - ) - .await; - let move_source = move_source_keypair.pubkey(); - let mut source_account = get_account(&mut context.banks_client, &move_source).await; - let mut source_stake_state: StakeStateV2 = bincode::deserialize(&source_account.data).unwrap(); - - // create dest stake with same authorities - move_dest_type - .new_stake_account_fully_specified( - &mut context, - &accounts.vote_account.pubkey(), - minimum_delegation, - &move_dest_keypair, - &staker_keypair, - &withdrawer_keypair, - &lockup, - ) - .await; - let move_dest = move_dest_keypair.pubkey(); - - // true up source epoch if transient - if move_source_type == StakeLifecycle::Activating - || move_source_type == StakeLifecycle::Deactivating - { - let clock = context.banks_client.get_sysvar::().await.unwrap(); - if let StakeStateV2::Stake(_, ref mut stake, _) = &mut source_stake_state { - match move_source_type { - StakeLifecycle::Activating => stake.delegation.activation_epoch = clock.epoch, - StakeLifecycle::Deactivating => stake.delegation.deactivation_epoch = clock.epoch, - _ => (), - } - } - - source_account.data = bincode::serialize(&source_stake_state).unwrap(); - context.set_account(&move_source, &source_account.into()); - } - - // our inactive accounts have extra lamports, lets not let active feel left out - if move_dest_type == StakeLifecycle::Active { - transfer(&mut context, &move_dest, dest_excess).await; - } - - // hey why not spread the love around to everyone - transfer(&mut context, &move_source, source_excess).await; - - // alright first things first, clear out all the state failures - match (move_source_type, move_dest_type) { - // valid - (StakeLifecycle::Active, StakeLifecycle::Initialized) - | (StakeLifecycle::Active, StakeLifecycle::Active) - | (StakeLifecycle::Active, StakeLifecycle::Deactive) => (), - // invalid! get outta my test - _ => { - let instruction = ixn::move_stake( - &move_source, - &move_dest, - &staker_keypair.pubkey(), - if full_move { - source_staked_amount - } else { - minimum_delegation - }, - ); - - // this is InvalidAccountData sometimes and Custom(5) sometimes but i dont care - process_instruction(&mut context, &instruction, &vec![&staker_keypair]) - .await - .unwrap_err(); - return; - } - } - - // the below checks are conceptually incoherent with a 1 lamport minimum - // the undershoot fails successfully (but because its a zero move, not because the destination ends underfunded) - // then the second one succeeds failedly (because its a full move, so the "underfunded" source is actually closed) - if minimum_delegation > 1 { - // source has 2x minimum (always 2 sol because these tests dont have featuresets) - // so first for inactive accounts lets undershoot and fail for underfunded dest - if move_dest_type != StakeLifecycle::Active { - let instruction = ixn::move_stake( - &move_source, - &move_dest, - &staker_keypair.pubkey(), - minimum_delegation - 1, - ); - - let e = process_instruction(&mut context, &instruction, &vec![&staker_keypair]) - .await - .unwrap_err(); - assert_eq!(e, ProgramError::InvalidArgument); - } - - // now lets overshoot and fail for underfunded source - let instruction = ixn::move_stake( - &move_source, - &move_dest, - &staker_keypair.pubkey(), - minimum_delegation + 1, - ); - - let e = process_instruction(&mut context, &instruction, &vec![&staker_keypair]) - .await - .unwrap_err(); - assert_eq!(e, ProgramError::InvalidArgument); - } - - // now we do it juuust right - let instruction = ixn::move_stake( - &move_source, - &move_dest, - &staker_keypair.pubkey(), - if full_move { - source_staked_amount - } else { - minimum_delegation - }, - ); - - test_instruction_with_missing_signers(&mut context, &instruction, &vec![&staker_keypair]).await; - - if full_move { - let (_, option_source_stake, source_lamports) = - get_stake_account(&mut context.banks_client, &move_source).await; - - // source is deactivated and rent/excess stay behind - assert!(option_source_stake.is_none()); - assert_eq!(source_lamports, source_excess + rent_exempt_reserve); - - let (_, Some(dest_stake), dest_lamports) = - get_stake_account(&mut context.banks_client, &move_dest).await - else { - panic!("dest should be active") - }; - let dest_effective_stake = get_effective_stake(&mut context.banks_client, &move_dest).await; - - // dest captured the entire source delegation, kept its rent/excess, didnt activate its excess - assert_eq!( - dest_stake.delegation.stake, - source_staked_amount + dest_staked_amount - ); - assert_eq!(dest_effective_stake, dest_stake.delegation.stake); - assert_eq!( - dest_lamports, - dest_effective_stake + dest_excess + rent_exempt_reserve - ); - } else { - let (_, Some(source_stake), source_lamports) = - get_stake_account(&mut context.banks_client, &move_source).await - else { - panic!("source should be active") - }; - let source_effective_stake = - get_effective_stake(&mut context.banks_client, &move_source).await; - - // half of source delegation moved over, excess stayed behind - assert_eq!(source_stake.delegation.stake, source_staked_amount / 2); - assert_eq!(source_effective_stake, source_stake.delegation.stake); - assert_eq!( - source_lamports, - source_effective_stake + source_excess + rent_exempt_reserve - ); - - let (_, Some(dest_stake), dest_lamports) = - get_stake_account(&mut context.banks_client, &move_dest).await - else { - panic!("dest should be active") - }; - let dest_effective_stake = get_effective_stake(&mut context.banks_client, &move_dest).await; - - // dest mirrors our observations - assert_eq!( - dest_stake.delegation.stake, - source_staked_amount / 2 + dest_staked_amount - ); - assert_eq!(dest_effective_stake, dest_stake.delegation.stake); - assert_eq!( - dest_lamports, - dest_effective_stake + dest_excess + rent_exempt_reserve - ); - } -} - -#[test_matrix( - [program_test(), program_test_without_features(&[stake_raise_minimum_delegation_to_1_sol::id()])], - [StakeLifecycle::Initialized, StakeLifecycle::Activating, StakeLifecycle::Active, - StakeLifecycle::Deactivating, StakeLifecycle::Deactive], - [StakeLifecycle::Initialized, StakeLifecycle::Activating, StakeLifecycle::Active, - StakeLifecycle::Deactivating, StakeLifecycle::Deactive], - [false, true], - [false, true] -)] -#[tokio::test] -async fn test_move_lamports( - program_test: ProgramTest, - move_source_type: StakeLifecycle, - move_dest_type: StakeLifecycle, - different_votes: bool, - has_lockup: bool, -) { - let mut context = program_test.start_with_context().await; - let accounts = Accounts::default(); - accounts.initialize(&mut context).await; - - let rent_exempt_reserve = get_stake_account_rent(&mut context.banks_client).await; - let minimum_delegation = get_minimum_delegation(&mut context).await; - - // put minimum in both accounts if theyre active - let source_staked_amount = if move_source_type == StakeLifecycle::Active { - minimum_delegation - } else { - 0 - }; - - let dest_staked_amount = if move_dest_type == StakeLifecycle::Active { - minimum_delegation - } else { - 0 - }; - - // test with and without lockup. both of these cases pass, we test failures elsewhere - let lockup = if has_lockup { - let clock = context.banks_client.get_sysvar::().await.unwrap(); - let lockup = Lockup { - unix_timestamp: 0, - epoch: clock.epoch + 100, - custodian: Pubkey::new_unique(), - }; - - assert!(lockup.is_in_force(&clock, None)); - lockup - } else { - Lockup::default() - }; - - // we put an extra minimum in every account, unstaked, to test moving them - let source_excess = minimum_delegation; - let dest_excess = minimum_delegation; - - let move_source_keypair = Keypair::new(); - let move_dest_keypair = Keypair::new(); - let staker_keypair = Keypair::new(); - let withdrawer_keypair = Keypair::new(); - - // make a separate vote account if needed - let dest_vote_account = if different_votes { - let vote_account = Keypair::new(); - create_vote( - &mut context, - &Keypair::new(), - &Pubkey::new_unique(), - &Pubkey::new_unique(), - &vote_account, - ) - .await; - - vote_account.pubkey() - } else { - accounts.vote_account.pubkey() - }; - - // create source stake - move_source_type - .new_stake_account_fully_specified( - &mut context, - &accounts.vote_account.pubkey(), - minimum_delegation, - &move_source_keypair, - &staker_keypair, - &withdrawer_keypair, - &lockup, - ) - .await; - let move_source = move_source_keypair.pubkey(); - let mut source_account = get_account(&mut context.banks_client, &move_source).await; - let mut source_stake_state: StakeStateV2 = bincode::deserialize(&source_account.data).unwrap(); - - // create dest stake with same authorities - move_dest_type - .new_stake_account_fully_specified( - &mut context, - &dest_vote_account, - minimum_delegation, - &move_dest_keypair, - &staker_keypair, - &withdrawer_keypair, - &lockup, - ) - .await; - let move_dest = move_dest_keypair.pubkey(); - - // true up source epoch if transient - if move_source_type == StakeLifecycle::Activating - || move_source_type == StakeLifecycle::Deactivating - { - let clock = context.banks_client.get_sysvar::().await.unwrap(); - if let StakeStateV2::Stake(_, ref mut stake, _) = &mut source_stake_state { - match move_source_type { - StakeLifecycle::Activating => stake.delegation.activation_epoch = clock.epoch, - StakeLifecycle::Deactivating => stake.delegation.deactivation_epoch = clock.epoch, - _ => (), - } - } - - source_account.data = bincode::serialize(&source_stake_state).unwrap(); - context.set_account(&move_source, &source_account.into()); - } - - // if we activated the initial amount we need to top up with the test lamports - if move_source_type == StakeLifecycle::Active { - transfer(&mut context, &move_source, source_excess).await; - } - if move_dest_type == StakeLifecycle::Active { - transfer(&mut context, &move_dest, dest_excess).await; - } - - // clear out state failures - if move_source_type == StakeLifecycle::Activating - || move_source_type == StakeLifecycle::Deactivating - || move_dest_type == StakeLifecycle::Deactivating - { - let instruction = ixn::move_lamports( - &move_source, - &move_dest, - &staker_keypair.pubkey(), - source_excess, - ); - - process_instruction(&mut context, &instruction, &vec![&staker_keypair]) - .await - .unwrap_err(); - return; - } - - // overshoot and fail for underfunded source - let instruction = ixn::move_lamports( - &move_source, - &move_dest, - &staker_keypair.pubkey(), - source_excess + 1, - ); - - let e = process_instruction(&mut context, &instruction, &vec![&staker_keypair]) - .await - .unwrap_err(); - assert_eq!(e, ProgramError::InvalidArgument); - - let (_, _, before_source_lamports) = - get_stake_account(&mut context.banks_client, &move_source).await; - let (_, _, before_dest_lamports) = - get_stake_account(&mut context.banks_client, &move_dest).await; - - // now properly move the full excess - let instruction = ixn::move_lamports( - &move_source, - &move_dest, - &staker_keypair.pubkey(), - source_excess, - ); - - test_instruction_with_missing_signers(&mut context, &instruction, &vec![&staker_keypair]).await; - - let (_, _, after_source_lamports) = - get_stake_account(&mut context.banks_client, &move_source).await; - let source_effective_stake = get_effective_stake(&mut context.banks_client, &move_source).await; - - // source activation didnt change - assert_eq!(source_effective_stake, source_staked_amount); - - // source lamports are right - assert_eq!( - after_source_lamports, - before_source_lamports - minimum_delegation - ); - assert_eq!( - after_source_lamports, - source_effective_stake + rent_exempt_reserve - ); - - let (_, _, after_dest_lamports) = - get_stake_account(&mut context.banks_client, &move_dest).await; - let dest_effective_stake = get_effective_stake(&mut context.banks_client, &move_dest).await; - - // dest activation didnt change - assert_eq!(dest_effective_stake, dest_staked_amount); - - // dest lamports are right - assert_eq!( - after_dest_lamports, - before_dest_lamports + minimum_delegation - ); - assert_eq!( - after_dest_lamports, - dest_effective_stake + rent_exempt_reserve + source_excess + dest_excess - ); -} - -#[test_matrix( - [program_test(), program_test_without_features(&[stake_raise_minimum_delegation_to_1_sol::id()])], - [(StakeLifecycle::Active, StakeLifecycle::Uninitialized), - (StakeLifecycle::Uninitialized, StakeLifecycle::Initialized), - (StakeLifecycle::Uninitialized, StakeLifecycle::Uninitialized)], - [false, true] -)] -#[tokio::test] -async fn test_move_uninitialized_fail( - program_test: ProgramTest, - move_types: (StakeLifecycle, StakeLifecycle), - move_lamports: bool, -) { - let mut context = program_test.start_with_context().await; - let accounts = Accounts::default(); - accounts.initialize(&mut context).await; - - let minimum_delegation = get_minimum_delegation(&mut context).await; - let source_staked_amount = minimum_delegation * 2; - - let (move_source_type, move_dest_type) = move_types; - - let (move_source_keypair, staker_keypair, withdrawer_keypair) = move_source_type - .new_stake_account( - &mut context, - &accounts.vote_account.pubkey(), - source_staked_amount, - ) - .await; - let move_source = move_source_keypair.pubkey(); - - let move_dest_keypair = Keypair::new(); - move_dest_type - .new_stake_account_fully_specified( - &mut context, - &accounts.vote_account.pubkey(), - 0, - &move_dest_keypair, - &staker_keypair, - &withdrawer_keypair, - &Lockup::default(), - ) - .await; - let move_dest = move_dest_keypair.pubkey(); - - let source_signer = if move_source_type == StakeLifecycle::Uninitialized { - &move_source_keypair - } else { - &staker_keypair - }; - - let instruction = if move_lamports { - ixn::move_lamports( - &move_source, - &move_dest, - &source_signer.pubkey(), - minimum_delegation, - ) - } else { - ixn::move_stake( - &move_source, - &move_dest, - &source_signer.pubkey(), - minimum_delegation, - ) - }; - - let e = process_instruction(&mut context, &instruction, &vec![source_signer]) - .await - .unwrap_err(); - assert_eq!(e, ProgramError::InvalidAccountData); -} - -#[test_matrix( - [program_test(), program_test_without_features(&[stake_raise_minimum_delegation_to_1_sol::id()])], - [StakeLifecycle::Initialized, StakeLifecycle::Active, StakeLifecycle::Deactive], - [StakeLifecycle::Initialized, StakeLifecycle::Activating, StakeLifecycle::Active, StakeLifecycle::Deactive], - [false, true] -)] -#[tokio::test] -async fn test_move_general_fail( - program_test: ProgramTest, - move_source_type: StakeLifecycle, - move_dest_type: StakeLifecycle, - move_lamports: bool, -) { - // the test_matrix includes all valid source/dest combinations for MoveLamports - // we dont test invalid combinations because they would fail regardless of the fail cases we test here - // valid source/dest for MoveStake are a strict subset of MoveLamports - // source must be active, and dest must be active or inactive. so we skip the additional invalid MoveStake cases - if !move_lamports - && (move_source_type != StakeLifecycle::Active - || move_dest_type == StakeLifecycle::Activating) - { - return; - } - - let mut context = program_test.start_with_context().await; - let accounts = Accounts::default(); - accounts.initialize(&mut context).await; - - let minimum_delegation = get_minimum_delegation(&mut context).await; - let source_staked_amount = minimum_delegation * 2; - - let in_force_lockup = { - let clock = context.banks_client.get_sysvar::().await.unwrap(); - Lockup { - unix_timestamp: 0, - epoch: clock.epoch + 1_000_000, - custodian: Pubkey::new_unique(), - } - }; - - let mk_ixn = if move_lamports { - ixn::move_lamports - } else { - ixn::move_stake - }; - - // we can reuse source but will need a lot of dest - let (move_source_keypair, staker_keypair, withdrawer_keypair) = move_source_type - .new_stake_account( - &mut context, - &accounts.vote_account.pubkey(), - source_staked_amount, - ) - .await; - let move_source = move_source_keypair.pubkey(); - transfer(&mut context, &move_source, minimum_delegation).await; - - // self-move fails - // NOTE this error type is an artifact of the native program interface - // when we move to bpf, it should actually hit the processor error - let instruction = mk_ixn( - &move_source, - &move_source, - &staker_keypair.pubkey(), - minimum_delegation, - ); - let e = process_instruction(&mut context, &instruction, &vec![&staker_keypair]) - .await - .unwrap_err(); - assert_eq!(e, ProgramError::AccountBorrowFailed); - - // first we make a "normal" move dest - { - let move_dest_keypair = Keypair::new(); - move_dest_type - .new_stake_account_fully_specified( - &mut context, - &accounts.vote_account.pubkey(), - minimum_delegation, - &move_dest_keypair, - &staker_keypair, - &withdrawer_keypair, - &Lockup::default(), - ) - .await; - let move_dest = move_dest_keypair.pubkey(); - - // zero move fails - let instruction = mk_ixn(&move_source, &move_dest, &staker_keypair.pubkey(), 0); - let e = process_instruction(&mut context, &instruction, &vec![&staker_keypair]) - .await - .unwrap_err(); - assert_eq!(e, ProgramError::InvalidArgument); - - // sign with withdrawer fails - let instruction = mk_ixn( - &move_source, - &move_dest, - &withdrawer_keypair.pubkey(), - minimum_delegation, - ); - let e = process_instruction(&mut context, &instruction, &vec![&withdrawer_keypair]) - .await - .unwrap_err(); - assert_eq!(e, ProgramError::MissingRequiredSignature); - - // good place to test source lockup - let move_locked_source_keypair = Keypair::new(); - move_source_type - .new_stake_account_fully_specified( - &mut context, - &accounts.vote_account.pubkey(), - source_staked_amount, - &move_locked_source_keypair, - &staker_keypair, - &withdrawer_keypair, - &in_force_lockup, - ) - .await; - let move_locked_source = move_locked_source_keypair.pubkey(); - transfer(&mut context, &move_locked_source, minimum_delegation).await; - - let instruction = mk_ixn( - &move_locked_source, - &move_dest, - &staker_keypair.pubkey(), - minimum_delegation, - ); - let e = process_instruction(&mut context, &instruction, &vec![&staker_keypair]) - .await - .unwrap_err(); - assert_eq!(e, StakeError::MergeMismatch.into()); - } - - // staker mismatch - { - let move_dest_keypair = Keypair::new(); - let throwaway = Keypair::new(); - move_dest_type - .new_stake_account_fully_specified( - &mut context, - &accounts.vote_account.pubkey(), - minimum_delegation, - &move_dest_keypair, - &throwaway, - &withdrawer_keypair, - &Lockup::default(), - ) - .await; - let move_dest = move_dest_keypair.pubkey(); - - let instruction = mk_ixn( - &move_source, - &move_dest, - &staker_keypair.pubkey(), - minimum_delegation, - ); - let e = process_instruction(&mut context, &instruction, &vec![&staker_keypair]) - .await - .unwrap_err(); - assert_eq!(e, StakeError::MergeMismatch.into()); - - let instruction = mk_ixn( - &move_source, - &move_dest, - &throwaway.pubkey(), - minimum_delegation, - ); - let e = process_instruction(&mut context, &instruction, &vec![&throwaway]) - .await - .unwrap_err(); - assert_eq!(e, ProgramError::MissingRequiredSignature); - } - - // withdrawer mismatch - { - let move_dest_keypair = Keypair::new(); - let throwaway = Keypair::new(); - move_dest_type - .new_stake_account_fully_specified( - &mut context, - &accounts.vote_account.pubkey(), - minimum_delegation, - &move_dest_keypair, - &staker_keypair, - &throwaway, - &Lockup::default(), - ) - .await; - let move_dest = move_dest_keypair.pubkey(); - - let instruction = mk_ixn( - &move_source, - &move_dest, - &staker_keypair.pubkey(), - minimum_delegation, - ); - let e = process_instruction(&mut context, &instruction, &vec![&staker_keypair]) - .await - .unwrap_err(); - assert_eq!(e, StakeError::MergeMismatch.into()); - - let instruction = mk_ixn( - &move_source, - &move_dest, - &throwaway.pubkey(), - minimum_delegation, - ); - let e = process_instruction(&mut context, &instruction, &vec![&throwaway]) - .await - .unwrap_err(); - assert_eq!(e, ProgramError::MissingRequiredSignature); - } - - // dest lockup - { - let move_dest_keypair = Keypair::new(); - move_dest_type - .new_stake_account_fully_specified( - &mut context, - &accounts.vote_account.pubkey(), - minimum_delegation, - &move_dest_keypair, - &staker_keypair, - &withdrawer_keypair, - &in_force_lockup, - ) - .await; - let move_dest = move_dest_keypair.pubkey(); - - let instruction = mk_ixn( - &move_source, - &move_dest, - &staker_keypair.pubkey(), - minimum_delegation, - ); - let e = process_instruction(&mut context, &instruction, &vec![&staker_keypair]) - .await - .unwrap_err(); - assert_eq!(e, StakeError::MergeMismatch.into()); - } - - // lastly we test different vote accounts for move_stake - if !move_lamports && move_dest_type == StakeLifecycle::Active { - let dest_vote_account_keypair = Keypair::new(); - create_vote( - &mut context, - &Keypair::new(), - &Pubkey::new_unique(), - &Pubkey::new_unique(), - &dest_vote_account_keypair, - ) - .await; - - let move_dest_keypair = Keypair::new(); - move_dest_type - .new_stake_account_fully_specified( - &mut context, - &dest_vote_account_keypair.pubkey(), - minimum_delegation, - &move_dest_keypair, - &staker_keypair, - &withdrawer_keypair, - &Lockup::default(), - ) - .await; - let move_dest = move_dest_keypair.pubkey(); - - let instruction = mk_ixn( - &move_source, - &move_dest, - &staker_keypair.pubkey(), - minimum_delegation, - ); - let e = process_instruction(&mut context, &instruction, &vec![&staker_keypair]) - .await - .unwrap_err(); - assert_eq!(e, StakeError::VoteAddressMismatch.into()); - } -} diff --git a/programs/stake/Cargo.toml b/programs/stake/Cargo.toml index 0fdbff3e923b68..7f2573e16c1042 100644 --- a/programs/stake/Cargo.toml +++ b/programs/stake/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-stake-program" -description = "Solana Stake program" +description = "Solana Stake program helpers" documentation = "https://docs.rs/solana-stake-program" version = { workspace = true } authors = { workspace = true } @@ -21,42 +21,17 @@ agave-feature-set = { workspace = true } bincode = { workspace = true } log = { workspace = true } solana-account = { workspace = true } -solana-bincode = { workspace = true } solana-clock = { workspace = true } -solana-config-program-client = { workspace = true, features = ["serde"] } +solana-config-interface = { workspace = true, features = ["bincode"] } solana-genesis-config = { workspace = true } -solana-instruction = { workspace = true } -solana-log-collector = { workspace = true } solana-native-token = { workspace = true } -solana-packet = { workspace = true } -solana-program-runtime = { workspace = true } -solana-pubkey = { workspace = true, features = ["sha2"] } +solana-pubkey = { workspace = true } solana-rent = { workspace = true } solana-sdk-ids = { workspace = true } -solana-stake-interface = { workspace = true } +solana-stake-interface = { workspace = true, features = ["bincode"] } solana-sysvar = { workspace = true } solana-transaction-context = { workspace = true, features = ["bincode"] } -solana-type-overrides = { workspace = true } solana-vote-interface = { workspace = true, features = ["bincode"] } -[dev-dependencies] -assert_matches = { workspace = true } -criterion = { workspace = true } -proptest = { workspace = true } -solana-compute-budget = { workspace = true } -solana-epoch-rewards = { workspace = true } -solana-epoch-schedule = { workspace = true } -solana-program-runtime = { workspace = true, features = ["dev-context-only-utils"] } -solana-pubkey = { workspace = true, features = ["rand"] } -solana-svm-callback = { workspace = true } -solana-svm-feature-set = { workspace = true } -solana-sysvar-id = { workspace = true } -solana-vote-program = { workspace = true, default-features = false } -test-case = { workspace = true } - -[[bench]] -name = "stake" -harness = false - [lints] workspace = true diff --git a/programs/stake/benches/stake.rs b/programs/stake/benches/stake.rs deleted file mode 100644 index ffee5d581d777d..00000000000000 --- a/programs/stake/benches/stake.rs +++ /dev/null @@ -1,752 +0,0 @@ -use { - agave_feature_set::FeatureSet, - bincode::serialize, - criterion::{black_box, criterion_group, criterion_main, Criterion}, - solana_account::{create_account_shared_data_for_test, AccountSharedData, WritableAccount}, - solana_clock::{Clock, Epoch}, - solana_instruction::AccountMeta, - solana_program_runtime::invoke_context::mock_process_instruction_with_feature_set, - solana_pubkey::Pubkey, - solana_rent::Rent, - solana_sdk_ids::sysvar::{clock, rent, stake_history}, - solana_stake_interface::{ - instruction::{ - self, AuthorizeCheckedWithSeedArgs, AuthorizeWithSeedArgs, LockupArgs, - LockupCheckedArgs, StakeInstruction, - }, - stake_flags::StakeFlags, - state::{Authorized, Lockup, StakeAuthorize, StakeStateV2}, - }, - solana_stake_program::{ - stake_instruction, - stake_state::{Delegation, Meta, Stake}, - }, - solana_sysvar::stake_history::StakeHistory, - solana_vote_interface::state::{VoteState, VoteStateVersions}, - solana_vote_program::vote_state, - std::sync::Arc, -}; - -const ACCOUNT_BALANCE: u64 = u64::MAX / 4; // enough lamports for tests - -struct TestSetup { - feature_set: Arc, - stake_address: Pubkey, - stake_account: AccountSharedData, - transaction_accounts: Vec<(Pubkey, AccountSharedData)>, - instruction_accounts: Vec, -} - -impl TestSetup { - fn new() -> Self { - let stake_account = AccountSharedData::new( - ACCOUNT_BALANCE, - StakeStateV2::size_of(), - &solana_stake_program::id(), - ); - let stake_address = solana_pubkey::Pubkey::new_unique(); - Self { - // some stake instructions are behind feature gate, enable all - // feature gates to bench all instructions - feature_set: Arc::new(FeatureSet::all_enabled()), - stake_address, - stake_account: stake_account.clone(), - transaction_accounts: vec![(stake_address, stake_account)], - instruction_accounts: vec![AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }], - } - } - - fn add_account(&mut self, id: Pubkey, account: AccountSharedData) { - self.transaction_accounts.push((id, account)); - self.instruction_accounts.push(AccountMeta { - pubkey: id, - is_signer: false, - is_writable: true, - }); - } - - fn add_account_signer(&mut self, id: Pubkey, account: AccountSharedData) { - self.transaction_accounts.push((id, account)); - self.instruction_accounts.push(AccountMeta { - pubkey: id, - is_signer: true, - is_writable: true, - }); - } - - fn initialize_stake_account(&mut self) { - let initialized_stake_account = AccountSharedData::new_data_with_space( - ACCOUNT_BALANCE, - &StakeStateV2::Initialized(Meta::auto(&self.stake_address)), - StakeStateV2::size_of(), - &solana_stake_program::id(), - ) - .unwrap(); - - self.stake_account = initialized_stake_account.clone(); - self.transaction_accounts[0] = (self.stake_address, initialized_stake_account); - // also make stake address a signer - self.instruction_accounts[0] = AccountMeta { - pubkey: self.stake_address, - is_signer: true, - is_writable: true, - }; - } - - fn initialize_stake_account_with_seed(&mut self, seed: &str, authorized_owner: &Pubkey) { - self.stake_address = - Pubkey::create_with_seed(authorized_owner, seed, authorized_owner).unwrap(); - self.initialize_stake_account(); - } - - // config withdraw authority, returns authorized withdrwer's pubkey - fn config_withdraw_authority(&mut self) -> Pubkey { - let withdraw_authority_address = Pubkey::new_unique(); - - let instruction = instruction::authorize( - &self.stake_address, - &self.stake_address, - &withdraw_authority_address, - StakeAuthorize::Withdrawer, - None, - ); - - let transaction_accounts = vec![ - (self.stake_address, self.stake_account.clone()), - ( - clock::id(), - create_account_shared_data_for_test(&Clock::default()), - ), - (withdraw_authority_address, AccountSharedData::default()), - ]; - - let accounts = mock_process_instruction_with_feature_set( - &solana_stake_program::id(), - Vec::new(), - &instruction.data, - transaction_accounts, - instruction.accounts.clone(), - Ok(()), - stake_instruction::Entrypoint::vm, - |_invoke_context| {}, - |_invoke_context| {}, - &self.feature_set.runtime_features(), - ); - // update stake account - self.transaction_accounts[0] = (self.stake_address, accounts[0].clone()); - - withdraw_authority_address - } - - fn delegate_stake(&mut self) { - let vote_address = Pubkey::new_unique(); - - let instruction = - instruction::delegate_stake(&self.stake_address, &self.stake_address, &vote_address); - - let transaction_accounts = vec![ - (self.stake_address, self.stake_account.clone()), - ( - vote_address, - vote_state::create_account(&vote_address, &Pubkey::new_unique(), 0, 100), - ), - ( - clock::id(), - create_account_shared_data_for_test(&Clock::default()), - ), - ( - stake_history::id(), - create_account_shared_data_for_test(&StakeHistory::default()), - ), - ]; - - let accounts = mock_process_instruction_with_feature_set( - &solana_stake_program::id(), - Vec::new(), - &instruction.data, - transaction_accounts, - instruction.accounts.clone(), - Ok(()), - stake_instruction::Entrypoint::vm, - |_invoke_context| {}, - |_invoke_context| {}, - &self.feature_set.runtime_features(), - ); - self.stake_account = accounts[0].clone(); - self.stake_account.set_lamports(ACCOUNT_BALANCE * 2); - self.transaction_accounts[0] = (self.stake_address, self.stake_account.clone()); - } - - fn run(&self, instruction_data: &[u8]) { - mock_process_instruction_with_feature_set( - &solana_stake_program::id(), - Vec::new(), - instruction_data, - self.transaction_accounts.clone(), - self.instruction_accounts.clone(), - Ok(()), //expected_result, - stake_instruction::Entrypoint::vm, - |_invoke_context| {}, - |_invoke_context| {}, - &self.feature_set.runtime_features(), - ); - } -} - -fn bench_initialize(c: &mut Criterion) { - let mut test_setup = TestSetup::new(); - test_setup.add_account( - solana_sdk_ids::sysvar::rent::id(), - create_account_shared_data_for_test(&Rent::default()), - ); - - let instruction_data = serialize(&StakeInstruction::Initialize( - Authorized::auto(&test_setup.stake_address), - Lockup::default(), - )) - .unwrap(); - c.bench_function("initialize", |bencher| { - bencher.iter(|| test_setup.run(black_box(&instruction_data))) - }); -} - -fn bench_initialize_checked(c: &mut Criterion) { - let mut test_setup = TestSetup::new(); - test_setup.add_account( - solana_sdk_ids::sysvar::rent::id(), - create_account_shared_data_for_test(&Rent::default()), - ); - // add staker account - test_setup.add_account(Pubkey::new_unique(), AccountSharedData::default()); - // add withdrawer account - test_setup.add_account_signer(Pubkey::new_unique(), AccountSharedData::default()); - - let instruction_data = serialize(&StakeInstruction::InitializeChecked).unwrap(); - - c.bench_function("initialize_checked", |bencher| { - bencher.iter(|| test_setup.run(black_box(&instruction_data))) - }); -} - -fn bench_authorize_staker(c: &mut Criterion) { - let mut test_setup = TestSetup::new(); - test_setup.initialize_stake_account(); - test_setup.add_account( - clock::id(), - create_account_shared_data_for_test(&Clock::default()), - ); - let authority_address = Pubkey::new_unique(); - test_setup.add_account(authority_address, AccountSharedData::default()); - - let instruction_data = serialize(&StakeInstruction::Authorize( - authority_address, - StakeAuthorize::Staker, - )) - .unwrap(); - - c.bench_function("authorize_staker", |bencher| { - bencher.iter(|| test_setup.run(black_box(&instruction_data))) - }); -} - -fn bench_authorize_withdrawer(c: &mut Criterion) { - let mut test_setup = TestSetup::new(); - test_setup.initialize_stake_account(); - test_setup.add_account( - clock::id(), - create_account_shared_data_for_test(&Clock::default()), - ); - // add authority address - let authority_address = Pubkey::new_unique(); - test_setup.add_account(authority_address, AccountSharedData::default()); - - let instruction_data = serialize(&StakeInstruction::Authorize( - authority_address, - StakeAuthorize::Withdrawer, - )) - .unwrap(); - - c.bench_function("authorize_withdrawer", |bencher| { - bencher.iter(|| test_setup.run(black_box(&instruction_data))) - }); -} - -fn bench_authorize_staker_with_seed(c: &mut Criterion) { - let seed = "test test"; - let authorize_address = Pubkey::new_unique(); - - let mut test_setup = TestSetup::new(); - test_setup.initialize_stake_account_with_seed(seed, &authorize_address); - test_setup.add_account_signer(authorize_address, AccountSharedData::default()); - test_setup.add_account( - clock::id(), - create_account_shared_data_for_test(&Clock::default()), - ); - - let instruction_data = serialize(&StakeInstruction::AuthorizeWithSeed( - AuthorizeWithSeedArgs { - new_authorized_pubkey: Pubkey::new_unique(), - stake_authorize: StakeAuthorize::Staker, - authority_seed: seed.to_string(), - authority_owner: authorize_address, - }, - )) - .unwrap(); - - c.bench_function("authorize_staker_with_seed", |bencher| { - bencher.iter(|| test_setup.run(black_box(&instruction_data))) - }); -} - -fn bench_authorize_withdrawer_with_seed(c: &mut Criterion) { - let seed = "test test"; - let authorize_address = Pubkey::new_unique(); - - let mut test_setup = TestSetup::new(); - test_setup.initialize_stake_account_with_seed(seed, &authorize_address); - test_setup.add_account_signer(authorize_address, AccountSharedData::default()); - test_setup.add_account( - clock::id(), - create_account_shared_data_for_test(&Clock::default()), - ); - - let instruction_data = serialize(&StakeInstruction::AuthorizeWithSeed( - AuthorizeWithSeedArgs { - new_authorized_pubkey: Pubkey::new_unique(), - stake_authorize: StakeAuthorize::Withdrawer, - authority_seed: seed.to_string(), - authority_owner: authorize_address, - }, - )) - .unwrap(); - - c.bench_function("authorize_withdrawer_with_seed", |bencher| { - bencher.iter(|| test_setup.run(black_box(&instruction_data))) - }); -} - -fn bench_authorize_staker_checked(c: &mut Criterion) { - let mut test_setup = TestSetup::new(); - test_setup.initialize_stake_account(); - test_setup.add_account( - clock::id(), - create_account_shared_data_for_test(&Clock::default()), - ); - // add authorized address as signer - test_setup.add_account_signer(Pubkey::new_unique(), AccountSharedData::default()); - // add staker account as signer - test_setup.add_account_signer(Pubkey::new_unique(), AccountSharedData::default()); - - let instruction_data = - serialize(&StakeInstruction::AuthorizeChecked(StakeAuthorize::Staker)).unwrap(); - - c.bench_function("authorize_staker_checked", |bencher| { - bencher.iter(|| test_setup.run(black_box(&instruction_data))) - }); -} - -fn bench_authorize_withdrawer_checked(c: &mut Criterion) { - let mut test_setup = TestSetup::new(); - test_setup.initialize_stake_account(); - test_setup.add_account( - clock::id(), - create_account_shared_data_for_test(&Clock::default()), - ); - // add authorized address as signer - test_setup.add_account_signer(Pubkey::new_unique(), AccountSharedData::default()); - // add staker account as signer - test_setup.add_account_signer(Pubkey::new_unique(), AccountSharedData::default()); - - let instruction_data = serialize(&StakeInstruction::AuthorizeChecked( - StakeAuthorize::Withdrawer, - )) - .unwrap(); - - c.bench_function("authorize_withdrawer_checked", |bencher| { - bencher.iter(|| test_setup.run(black_box(&instruction_data))) - }); -} - -fn bench_authorize_staker_checked_with_seed(c: &mut Criterion) { - let seed = "test test"; - let authorize_address = Pubkey::new_unique(); - - let mut test_setup = TestSetup::new(); - test_setup.initialize_stake_account_with_seed(seed, &authorize_address); - // add authorized address as signer - test_setup.add_account_signer(authorize_address, AccountSharedData::default()); - test_setup.add_account( - clock::id(), - create_account_shared_data_for_test(&Clock::default()), - ); - // add new authorize account as signer - test_setup.add_account_signer(Pubkey::new_unique(), AccountSharedData::default()); - - let instruction_data = serialize(&StakeInstruction::AuthorizeCheckedWithSeed( - AuthorizeCheckedWithSeedArgs { - stake_authorize: StakeAuthorize::Staker, - authority_seed: seed.to_string(), - authority_owner: authorize_address, - }, - )) - .unwrap(); - - c.bench_function("authorize_staker_checked_with_seed", |bencher| { - bencher.iter(|| test_setup.run(black_box(&instruction_data))) - }); -} - -fn bench_authorize_withdrawer_checked_with_seed(c: &mut Criterion) { - let seed = "test test"; - let authorize_address = Pubkey::new_unique(); - - let mut test_setup = TestSetup::new(); - test_setup.initialize_stake_account_with_seed(seed, &authorize_address); - // add authorized address as signer - test_setup.add_account_signer(authorize_address, AccountSharedData::default()); - test_setup.add_account( - clock::id(), - create_account_shared_data_for_test(&Clock::default()), - ); - // add new authorize account as signer - test_setup.add_account_signer(Pubkey::new_unique(), AccountSharedData::default()); - - let instruction_data = serialize(&StakeInstruction::AuthorizeCheckedWithSeed( - AuthorizeCheckedWithSeedArgs { - stake_authorize: StakeAuthorize::Withdrawer, - authority_seed: seed.to_string(), - authority_owner: authorize_address, - }, - )) - .unwrap(); - - c.bench_function("authorize_withdrawer_checked_with_seed", |bencher| { - bencher.iter(|| test_setup.run(black_box(&instruction_data))) - }); -} - -fn bench_set_lockup(c: &mut Criterion) { - let mut test_setup = TestSetup::new(); - test_setup.initialize_stake_account(); - test_setup.add_account( - clock::id(), - create_account_shared_data_for_test(&Clock::default()), - ); - - let instruction_data = serialize(&StakeInstruction::SetLockup(LockupArgs { - unix_timestamp: None, - epoch: Some(1), - custodian: None, - })) - .unwrap(); - - c.bench_function("set_lockup", |bencher| { - bencher.iter(|| test_setup.run(black_box(&instruction_data))) - }); -} - -fn bench_set_lockup_checked(c: &mut Criterion) { - let mut test_setup = TestSetup::new(); - test_setup.initialize_stake_account(); - test_setup.add_account( - clock::id(), - create_account_shared_data_for_test(&Clock::default()), - ); - - let instruction_data = serialize(&StakeInstruction::SetLockupChecked(LockupCheckedArgs { - unix_timestamp: None, - epoch: Some(1), - })) - .unwrap(); - - c.bench_function("set_lockup_checked", |bencher| { - bencher.iter(|| test_setup.run(black_box(&instruction_data))) - }); -} - -fn bench_withdraw(c: &mut Criterion) { - let mut test_setup = TestSetup::new(); - test_setup.initialize_stake_account(); - let withdraw_authority_address = test_setup.config_withdraw_authority(); - - // withdraw to pubkey - test_setup.add_account(Pubkey::new_unique(), AccountSharedData::default()); - // clock - test_setup.add_account( - clock::id(), - create_account_shared_data_for_test(&Clock::default()), - ); - // stake history - test_setup.add_account( - stake_history::id(), - create_account_shared_data_for_test(&StakeHistory::default()), - ); - // withdrawer pubkey - test_setup.add_account_signer(withdraw_authority_address, AccountSharedData::default()); - - let instruction_data = serialize(&StakeInstruction::Withdraw(1)).unwrap(); - - c.bench_function("withdraw", |bencher| { - bencher.iter(|| test_setup.run(black_box(&instruction_data))) - }); -} - -fn bench_delegate_stake(c: &mut Criterion) { - let mut test_setup = TestSetup::new(); - test_setup.initialize_stake_account(); - - let vote_address = Pubkey::new_unique(); - let vote_account = vote_state::create_account(&vote_address, &Pubkey::new_unique(), 0, 100); - test_setup.add_account(vote_address, vote_account); - test_setup.add_account( - clock::id(), - create_account_shared_data_for_test(&Clock::default()), - ); - test_setup.add_account( - stake_history::id(), - create_account_shared_data_for_test(&StakeHistory::default()), - ); - // dummy config account to pass check - test_setup.add_account(Pubkey::new_unique(), AccountSharedData::default()); - let instruction_data = serialize(&StakeInstruction::DelegateStake).unwrap(); - - c.bench_function("delegate_stake", |bencher| { - bencher.iter(|| test_setup.run(black_box(&instruction_data))) - }); -} - -fn bench_deactivate(c: &mut Criterion) { - let mut test_setup = TestSetup::new(); - test_setup.initialize_stake_account(); - test_setup.delegate_stake(); - - test_setup.add_account( - clock::id(), - create_account_shared_data_for_test(&Clock::default()), - ); - - let instruction_data = serialize(&StakeInstruction::Deactivate).unwrap(); - - c.bench_function("deactivate", |bencher| { - bencher.iter(|| test_setup.run(black_box(&instruction_data))) - }); -} - -fn bench_split(c: &mut Criterion) { - let mut test_setup = TestSetup::new(); - test_setup.initialize_stake_account(); - - let split_to_address = Pubkey::new_unique(); - let split_to_account = AccountSharedData::new_data_with_space( - 0, - &StakeStateV2::Uninitialized, - StakeStateV2::size_of(), - &solana_stake_program::id(), - ) - .unwrap(); - - test_setup.add_account(split_to_address, split_to_account); - test_setup.add_account( - rent::id(), - create_account_shared_data_for_test(&Rent { - lamports_per_byte_year: 0, - ..Rent::default() - }), - ); - test_setup.add_account( - clock::id(), - create_account_shared_data_for_test(&Clock::default()), - ); - test_setup.add_account( - stake_history::id(), - create_account_shared_data_for_test(&StakeHistory::default()), - ); - - let instruction_data = serialize(&StakeInstruction::Split(1)).unwrap(); - - c.bench_function("split", |bencher| { - bencher.iter(|| test_setup.run(black_box(&instruction_data))) - }); -} - -fn bench_merge(c: &mut Criterion) { - let mut test_setup = TestSetup::new(); - test_setup.initialize_stake_account(); - - let merge_from_address = Pubkey::new_unique(); - // merge from account has same authority as stake account for simplicity, - // it also has lamports 0 to avoid `ArithmeticOverflow` to current stake account - let merge_from_account = AccountSharedData::new_data_with_space( - 1, - &StakeStateV2::Initialized(Meta::auto(&test_setup.stake_address)), - StakeStateV2::size_of(), - &solana_stake_program::id(), - ) - .unwrap(); - - test_setup.add_account(merge_from_address, merge_from_account); - test_setup.add_account( - clock::id(), - create_account_shared_data_for_test(&Clock::default()), - ); - test_setup.add_account( - stake_history::id(), - create_account_shared_data_for_test(&StakeHistory::default()), - ); - - let instruction_data = serialize(&StakeInstruction::Merge).unwrap(); - - c.bench_function("merge", |bencher| { - bencher.iter(|| test_setup.run(black_box(&instruction_data))) - }); -} - -fn bench_get_minimum_delegation(c: &mut Criterion) { - let test_setup = TestSetup::new(); - let instruction_data = serialize(&StakeInstruction::GetMinimumDelegation).unwrap(); - - c.bench_function("get_minimum_delegation", |bencher| { - bencher.iter(|| test_setup.run(black_box(&instruction_data))) - }); -} - -fn bench_deactivate_delinquent(c: &mut Criterion) { - let mut test_setup = TestSetup::new(); - - // reference vote account has been consistently voting - let mut vote_state = VoteState::default(); - for epoch in 0..=solana_stake_interface::MINIMUM_DELINQUENT_EPOCHS_FOR_DEACTIVATION { - vote_state.increment_credits(epoch as Epoch, 1); - } - let reference_vote_address = Pubkey::new_unique(); - let reference_vote_account = AccountSharedData::new_data_with_space( - 1, - &VoteStateVersions::new_current(vote_state), - VoteState::size_of(), - &solana_sdk_ids::vote::id(), - ) - .unwrap(); - - let vote_address = Pubkey::new_unique(); - let vote_account = vote_state::create_account(&vote_address, &Pubkey::new_unique(), 0, 100); - test_setup.stake_account = AccountSharedData::new_data_with_space( - 1, - &StakeStateV2::Stake( - Meta::default(), - Stake { - delegation: Delegation::new(&vote_address, 1, 1), - credits_observed: VoteState::default().credits(), - }, - StakeFlags::empty(), - ), - StakeStateV2::size_of(), - &solana_stake_program::id(), - ) - .unwrap(); - test_setup.transaction_accounts[0] = - (test_setup.stake_address, test_setup.stake_account.clone()); - - test_setup.add_account(vote_address, vote_account); - test_setup.add_account(reference_vote_address, reference_vote_account); - test_setup.add_account( - clock::id(), - create_account_shared_data_for_test(&Clock { - epoch: solana_stake_interface::MINIMUM_DELINQUENT_EPOCHS_FOR_DEACTIVATION as u64, - ..Clock::default() - }), - ); - - let instruction_data = serialize(&StakeInstruction::DeactivateDelinquent).unwrap(); - - c.bench_function("deactivate_delinquent", |bencher| { - bencher.iter(|| test_setup.run(black_box(&instruction_data))) - }); -} - -fn bench_move_stake(c: &mut Criterion) { - let mut test_setup = TestSetup::new(); - test_setup.initialize_stake_account(); - test_setup.delegate_stake(); - - let destination_stake_address = Pubkey::new_unique(); - let destination_stake_account = test_setup.transaction_accounts[0].1.clone(); - test_setup.add_account(destination_stake_address, destination_stake_account); - test_setup.add_account_signer(test_setup.stake_address, AccountSharedData::default()); - test_setup.add_account( - clock::id(), - // advance epoch to fully activate source account - create_account_shared_data_for_test(&Clock { - epoch: 1_u64, - ..Clock::default() - }), - ); - test_setup.add_account( - stake_history::id(), - create_account_shared_data_for_test(&StakeHistory::default()), - ); - - let instruction_data = serialize(&StakeInstruction::MoveStake(1)).unwrap(); - - c.bench_function("move_stake", |bencher| { - bencher.iter(|| test_setup.run(black_box(&instruction_data))) - }); -} - -fn bench_move_lamports(c: &mut Criterion) { - let mut test_setup = TestSetup::new(); - test_setup.initialize_stake_account(); - test_setup.delegate_stake(); - - let destination_stake_address = Pubkey::new_unique(); - let destination_stake_account = test_setup.transaction_accounts[0].1.clone(); - test_setup.add_account(destination_stake_address, destination_stake_account); - test_setup.add_account_signer(test_setup.stake_address, AccountSharedData::default()); - test_setup.add_account( - clock::id(), - // advance epoch to fully activate source account - create_account_shared_data_for_test(&Clock { - epoch: 1_u64, - ..Clock::default() - }), - ); - test_setup.add_account( - stake_history::id(), - create_account_shared_data_for_test(&StakeHistory::default()), - ); - - let instruction_data = serialize(&StakeInstruction::MoveLamports(1)).unwrap(); - - c.bench_function("move_lamports", |bencher| { - bencher.iter(|| test_setup.run(black_box(&instruction_data))) - }); -} - -criterion_group!( - benches, - bench_initialize, - bench_initialize_checked, - bench_authorize_staker, - bench_authorize_withdrawer, - bench_authorize_staker_with_seed, - bench_authorize_withdrawer_with_seed, - bench_authorize_staker_checked, - bench_authorize_withdrawer_checked, - bench_authorize_staker_checked_with_seed, - bench_authorize_withdrawer_checked_with_seed, - bench_set_lockup, - bench_set_lockup_checked, - bench_withdraw, - bench_delegate_stake, - bench_deactivate, - bench_split, - bench_merge, - bench_get_minimum_delegation, - bench_deactivate_delinquent, - bench_move_stake, - bench_move_lamports, -); -criterion_main!(benches); diff --git a/programs/stake/src/config.rs b/programs/stake/src/config.rs index b6b4e6d43d53cd..72dd6397b7605e 100644 --- a/programs/stake/src/config.rs +++ b/programs/stake/src/config.rs @@ -8,10 +8,10 @@ pub use solana_stake_interface::config::*; use { bincode::{deserialize, serialize}, solana_account::{Account, AccountSharedData, ReadableAccount, WritableAccount}, - solana_config_program_client::{get_config_data, ConfigKeys}, + solana_config_interface::state::{get_config_data, ConfigKeys}, solana_genesis_config::GenesisConfig, solana_pubkey::Pubkey, - solana_transaction_context::BorrowedAccount, + solana_transaction_context::BorrowedInstructionAccount, }; #[allow(deprecated)] @@ -31,7 +31,7 @@ fn create_config_account( } #[allow(deprecated)] -pub fn from(account: &BorrowedAccount) -> Option { +pub fn from(account: &BorrowedInstructionAccount) -> Option { get_config_data(account.get_data()) .ok() .and_then(|data| deserialize(data).ok()) diff --git a/programs/stake/src/epoch_rewards.rs b/programs/stake/src/epoch_rewards.rs index 958a59da350455..468cfc3f24ddb5 100644 --- a/programs/stake/src/epoch_rewards.rs +++ b/programs/stake/src/epoch_rewards.rs @@ -5,7 +5,7 @@ use { solana_sdk_ids::sysvar, solana_sysvar::{ epoch_rewards::{self, EpochRewards}, - Sysvar, + SysvarSerialize, }, }; diff --git a/programs/stake/src/lib.rs b/programs/stake/src/lib.rs index fd68f1771609d8..d5b792f70f970e 100644 --- a/programs/stake/src/lib.rs +++ b/programs/stake/src/lib.rs @@ -11,7 +11,6 @@ pub mod config; pub mod epoch_rewards; #[deprecated(since = "2.2.0")] pub mod points; -pub mod stake_instruction; pub mod stake_state; pub fn add_genesis_accounts(genesis_config: &mut GenesisConfig) -> u64 { diff --git a/programs/stake/src/stake_instruction.rs b/programs/stake/src/stake_instruction.rs deleted file mode 100644 index 2584b826d0c4e2..00000000000000 --- a/programs/stake/src/stake_instruction.rs +++ /dev/null @@ -1,7535 +0,0 @@ -use { - crate::stake_state::{ - authorize, authorize_with_seed, deactivate, deactivate_delinquent, delegate, initialize, - merge, move_lamports, move_stake, new_warmup_cooldown_rate_epoch, set_lockup, split, - withdraw, - }, - log::*, - solana_bincode::limited_deserialize, - solana_instruction::error::InstructionError, - solana_program_runtime::{ - declare_process_instruction, sysvar_cache::get_sysvar_with_account_check, - }, - solana_pubkey::Pubkey, - solana_stake_interface::{ - error::StakeError, - instruction::{LockupArgs, StakeInstruction}, - program::id, - state::{Authorized, Lockup}, - }, - solana_transaction_context::{IndexOfAccount, InstructionContext, TransactionContext}, -}; - -fn get_optional_pubkey<'a>( - transaction_context: &'a TransactionContext, - instruction_context: &'a InstructionContext, - instruction_account_index: IndexOfAccount, - should_be_signer: bool, -) -> Result, InstructionError> { - Ok( - if instruction_account_index < instruction_context.get_number_of_instruction_accounts() { - if should_be_signer - && !instruction_context.is_instruction_account_signer(instruction_account_index)? - { - return Err(InstructionError::MissingRequiredSignature); - } - Some( - transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction( - instruction_account_index, - )?, - )?, - ) - } else { - None - }, - ) -} - -pub const DEFAULT_COMPUTE_UNITS: u64 = 750; - -declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| { - let transaction_context = &invoke_context.transaction_context; - let instruction_context = transaction_context.get_current_instruction_context()?; - let data = instruction_context.get_instruction_data(); - - trace!("process_instruction: {data:?}"); - - let get_stake_account = || { - let me = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; - if *me.get_owner() != id() { - return Err(InstructionError::InvalidAccountOwner); - } - Ok(me) - }; - - // The EpochRewards sysvar only exists after the - // partitioned_epoch_rewards_superfeature feature is activated. If it - // exists, check the `active` field - let epoch_rewards_active = invoke_context - .get_sysvar_cache() - .get_epoch_rewards() - .map(|epoch_rewards| epoch_rewards.active) - .unwrap_or(false); - - let signers = instruction_context.get_signers(transaction_context)?; - - let stake_instruction: StakeInstruction = - limited_deserialize(data, solana_packet::PACKET_DATA_SIZE as u64)?; - if epoch_rewards_active && !matches!(stake_instruction, StakeInstruction::GetMinimumDelegation) - { - return Err(StakeError::EpochRewardsActive.into()); - } - match stake_instruction { - StakeInstruction::Initialize(authorized, lockup) => { - let mut me = get_stake_account()?; - let rent = get_sysvar_with_account_check::rent(invoke_context, instruction_context, 1)?; - initialize(&mut me, &authorized, &lockup, &rent) - } - StakeInstruction::Authorize(authorized_pubkey, stake_authorize) => { - let mut me = get_stake_account()?; - let clock = - get_sysvar_with_account_check::clock(invoke_context, instruction_context, 1)?; - instruction_context.check_number_of_instruction_accounts(3)?; - let custodian_pubkey = - get_optional_pubkey(transaction_context, instruction_context, 3, false)?; - - authorize( - &mut me, - &signers, - &authorized_pubkey, - stake_authorize, - &clock, - custodian_pubkey, - ) - } - StakeInstruction::AuthorizeWithSeed(args) => { - let mut me = get_stake_account()?; - instruction_context.check_number_of_instruction_accounts(2)?; - let clock = - get_sysvar_with_account_check::clock(invoke_context, instruction_context, 2)?; - let custodian_pubkey = - get_optional_pubkey(transaction_context, instruction_context, 3, false)?; - - authorize_with_seed( - transaction_context, - instruction_context, - &mut me, - 1, - &args.authority_seed, - &args.authority_owner, - &args.new_authorized_pubkey, - args.stake_authorize, - &clock, - custodian_pubkey, - ) - } - StakeInstruction::DelegateStake => { - let me = get_stake_account()?; - instruction_context.check_number_of_instruction_accounts(2)?; - let clock = - get_sysvar_with_account_check::clock(invoke_context, instruction_context, 2)?; - let stake_history = get_sysvar_with_account_check::stake_history( - invoke_context, - instruction_context, - 3, - )?; - instruction_context.check_number_of_instruction_accounts(5)?; - drop(me); - delegate( - transaction_context, - instruction_context, - 0, - 1, - &clock, - &stake_history, - &signers, - invoke_context, - ) - } - StakeInstruction::Split(lamports) => { - let me = get_stake_account()?; - instruction_context.check_number_of_instruction_accounts(2)?; - drop(me); - split( - invoke_context, - transaction_context, - instruction_context, - 0, - lamports, - 1, - &signers, - ) - } - StakeInstruction::Merge => { - let me = get_stake_account()?; - instruction_context.check_number_of_instruction_accounts(2)?; - let clock = - get_sysvar_with_account_check::clock(invoke_context, instruction_context, 2)?; - let stake_history = get_sysvar_with_account_check::stake_history( - invoke_context, - instruction_context, - 3, - )?; - drop(me); - merge( - invoke_context, - transaction_context, - instruction_context, - 0, - 1, - &clock, - &stake_history, - &signers, - ) - } - StakeInstruction::Withdraw(lamports) => { - let me = get_stake_account()?; - instruction_context.check_number_of_instruction_accounts(2)?; - let clock = - get_sysvar_with_account_check::clock(invoke_context, instruction_context, 2)?; - let stake_history = get_sysvar_with_account_check::stake_history( - invoke_context, - instruction_context, - 3, - )?; - instruction_context.check_number_of_instruction_accounts(5)?; - drop(me); - withdraw( - transaction_context, - instruction_context, - 0, - lamports, - 1, - &clock, - &stake_history, - 4, - if instruction_context.get_number_of_instruction_accounts() >= 6 { - Some(5) - } else { - None - }, - new_warmup_cooldown_rate_epoch(), - ) - } - StakeInstruction::Deactivate => { - let mut me = get_stake_account()?; - let clock = - get_sysvar_with_account_check::clock(invoke_context, instruction_context, 1)?; - deactivate(&mut me, &clock, &signers) - } - StakeInstruction::SetLockup(lockup) => { - let mut me = get_stake_account()?; - let clock = invoke_context.get_sysvar_cache().get_clock()?; - set_lockup(&mut me, &lockup, &signers, &clock) - } - StakeInstruction::InitializeChecked => { - let mut me = get_stake_account()?; - instruction_context.check_number_of_instruction_accounts(4)?; - let staker_pubkey = transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(2)?, - )?; - let withdrawer_pubkey = transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(3)?, - )?; - if !instruction_context.is_instruction_account_signer(3)? { - return Err(InstructionError::MissingRequiredSignature); - } - - let authorized = Authorized { - staker: *staker_pubkey, - withdrawer: *withdrawer_pubkey, - }; - - let rent = get_sysvar_with_account_check::rent(invoke_context, instruction_context, 1)?; - initialize(&mut me, &authorized, &Lockup::default(), &rent) - } - StakeInstruction::AuthorizeChecked(stake_authorize) => { - let mut me = get_stake_account()?; - let clock = - get_sysvar_with_account_check::clock(invoke_context, instruction_context, 1)?; - instruction_context.check_number_of_instruction_accounts(4)?; - let authorized_pubkey = transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(3)?, - )?; - if !instruction_context.is_instruction_account_signer(3)? { - return Err(InstructionError::MissingRequiredSignature); - } - let custodian_pubkey = - get_optional_pubkey(transaction_context, instruction_context, 4, false)?; - - authorize( - &mut me, - &signers, - authorized_pubkey, - stake_authorize, - &clock, - custodian_pubkey, - ) - } - StakeInstruction::AuthorizeCheckedWithSeed(args) => { - let mut me = get_stake_account()?; - instruction_context.check_number_of_instruction_accounts(2)?; - let clock = - get_sysvar_with_account_check::clock(invoke_context, instruction_context, 2)?; - instruction_context.check_number_of_instruction_accounts(4)?; - let authorized_pubkey = transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(3)?, - )?; - if !instruction_context.is_instruction_account_signer(3)? { - return Err(InstructionError::MissingRequiredSignature); - } - let custodian_pubkey = - get_optional_pubkey(transaction_context, instruction_context, 4, false)?; - - authorize_with_seed( - transaction_context, - instruction_context, - &mut me, - 1, - &args.authority_seed, - &args.authority_owner, - authorized_pubkey, - args.stake_authorize, - &clock, - custodian_pubkey, - ) - } - StakeInstruction::SetLockupChecked(lockup_checked) => { - let mut me = get_stake_account()?; - let custodian_pubkey = - get_optional_pubkey(transaction_context, instruction_context, 2, true)?; - - let lockup = LockupArgs { - unix_timestamp: lockup_checked.unix_timestamp, - epoch: lockup_checked.epoch, - custodian: custodian_pubkey.cloned(), - }; - let clock = invoke_context.get_sysvar_cache().get_clock()?; - set_lockup(&mut me, &lockup, &signers, &clock) - } - StakeInstruction::GetMinimumDelegation => { - let minimum_delegation = crate::get_minimum_delegation( - invoke_context.is_stake_raise_minimum_delegation_to_1_sol_active(), - ); - let minimum_delegation = Vec::from(minimum_delegation.to_le_bytes()); - invoke_context - .transaction_context - .set_return_data(id(), minimum_delegation) - } - StakeInstruction::DeactivateDelinquent => { - let mut me = get_stake_account()?; - instruction_context.check_number_of_instruction_accounts(3)?; - - let clock = invoke_context.get_sysvar_cache().get_clock()?; - deactivate_delinquent( - transaction_context, - instruction_context, - &mut me, - 1, - 2, - clock.epoch, - ) - } - #[allow(deprecated)] - StakeInstruction::Redelegate => { - let _ = get_stake_account()?; - Err(InstructionError::InvalidInstructionData) - } - StakeInstruction::MoveStake(lamports) => { - instruction_context.check_number_of_instruction_accounts(3)?; - move_stake( - invoke_context, - transaction_context, - instruction_context, - 0, - lamports, - 1, - 2, - ) - } - StakeInstruction::MoveLamports(lamports) => { - instruction_context.check_number_of_instruction_accounts(3)?; - move_lamports( - invoke_context, - transaction_context, - instruction_context, - 0, - lamports, - 1, - 2, - ) - } - } -}); - -#[cfg(test)] -mod tests { - use { - super::*, - crate::{ - config, - stake_state::{ - authorized_from, create_stake_history_from_delegations, from, new_stake, - stake_from, Delegation, Meta, Stake, StakeStateV2, - }, - }, - agave_feature_set::FeatureSet, - assert_matches::assert_matches, - bincode::serialize, - solana_account::{ - create_account_shared_data_for_test, state_traits::StateMut, AccountSharedData, - ReadableAccount, WritableAccount, - }, - solana_clock::{Clock, Epoch, UnixTimestamp}, - solana_epoch_rewards::EpochRewards, - solana_epoch_schedule::EpochSchedule, - solana_instruction::{AccountMeta, Instruction}, - solana_program_runtime::invoke_context::mock_process_instruction_with_feature_set, - solana_pubkey::Pubkey, - solana_rent::Rent, - solana_sdk_ids::{ - system_program, - sysvar::{clock, epoch_rewards, epoch_schedule, rent, rewards, stake_history}, - }, - solana_stake_interface::{ - config as stake_config, - error::StakeError, - instruction::{ - self, authorize_checked, authorize_checked_with_seed, initialize_checked, - set_lockup_checked, AuthorizeCheckedWithSeedArgs, AuthorizeWithSeedArgs, - LockupArgs, - }, - stake_flags::StakeFlags, - state::{warmup_cooldown_rate, Authorized, Lockup, StakeAuthorize}, - MINIMUM_DELINQUENT_EPOCHS_FOR_DEACTIVATION, - }, - solana_sysvar::{ - rewards::Rewards, - stake_history::{StakeHistory, StakeHistoryEntry}, - }, - solana_vote_interface::state::{VoteState, VoteStateVersions}, - solana_vote_program::vote_state, - std::{collections::HashSet, str::FromStr, sync::Arc}, - test_case::test_case, - }; - - fn feature_set_all_enabled() -> Arc { - Arc::new(FeatureSet::all_enabled()) - } - - /// No stake minimum delegation - fn feature_set_no_minimum_delegation() -> Arc { - let mut feature_set = feature_set_all_enabled(); - Arc::get_mut(&mut feature_set) - .unwrap() - .deactivate(&agave_feature_set::stake_raise_minimum_delegation_to_1_sol::id()); - feature_set - } - - fn create_default_account() -> AccountSharedData { - AccountSharedData::new(0, 0, &Pubkey::new_unique()) - } - - fn create_default_stake_account() -> AccountSharedData { - AccountSharedData::new(0, 0, &id()) - } - - fn invalid_stake_state_pubkey() -> Pubkey { - Pubkey::from_str("BadStake11111111111111111111111111111111111").unwrap() - } - - fn invalid_vote_state_pubkey() -> Pubkey { - Pubkey::from_str("BadVote111111111111111111111111111111111111").unwrap() - } - - fn spoofed_stake_state_pubkey() -> Pubkey { - Pubkey::from_str("SpoofedStake1111111111111111111111111111111").unwrap() - } - - fn spoofed_stake_program_id() -> Pubkey { - Pubkey::from_str("Spoofed111111111111111111111111111111111111").unwrap() - } - - fn process_instruction( - feature_set: Arc, - instruction_data: &[u8], - transaction_accounts: Vec<(Pubkey, AccountSharedData)>, - instruction_accounts: Vec, - expected_result: Result<(), InstructionError>, - ) -> Vec { - mock_process_instruction_with_feature_set( - &id(), - Vec::new(), - instruction_data, - transaction_accounts, - instruction_accounts, - expected_result, - Entrypoint::vm, - |_invoke_context| {}, - |_invoke_context| {}, - &feature_set.runtime_features(), - ) - } - - fn get_default_transaction_accounts( - instruction: &Instruction, - ) -> Vec<(Pubkey, AccountSharedData)> { - let mut pubkeys: HashSet = instruction - .accounts - .iter() - .map(|meta| meta.pubkey) - .collect(); - pubkeys.insert(clock::id()); - pubkeys.insert(epoch_schedule::id()); - pubkeys.insert(stake_history::id()); - #[allow(deprecated)] - pubkeys - .iter() - .map(|pubkey| { - ( - *pubkey, - if clock::check_id(pubkey) { - create_account_shared_data_for_test(&Clock::default()) - } else if rewards::check_id(pubkey) { - create_account_shared_data_for_test(&Rewards::new(0.0)) - } else if stake_history::check_id(pubkey) { - create_account_shared_data_for_test(&StakeHistory::default()) - } else if stake_config::check_id(pubkey) { - config::create_account(0, &stake_config::Config::default()) - } else if epoch_schedule::check_id(pubkey) { - create_account_shared_data_for_test(&EpochSchedule::default()) - } else if rent::check_id(pubkey) { - create_account_shared_data_for_test(&Rent::default()) - } else if *pubkey == invalid_stake_state_pubkey() { - AccountSharedData::new(0, 0, &id()) - } else if *pubkey == invalid_vote_state_pubkey() { - AccountSharedData::new(0, 0, &solana_sdk_ids::vote::id()) - } else if *pubkey == spoofed_stake_state_pubkey() { - AccountSharedData::new(0, 0, &spoofed_stake_program_id()) - } else { - AccountSharedData::new(0, 0, &id()) - }, - ) - }) - .collect() - } - - fn process_instruction_as_one_arg( - feature_set: Arc, - instruction: &Instruction, - expected_result: Result<(), InstructionError>, - ) -> Vec { - let transaction_accounts = get_default_transaction_accounts(instruction); - process_instruction( - Arc::clone(&feature_set), - &instruction.data, - transaction_accounts, - instruction.accounts.clone(), - expected_result, - ) - } - - fn just_stake(meta: Meta, stake: u64) -> StakeStateV2 { - StakeStateV2::Stake( - meta, - Stake { - delegation: Delegation { - stake, - ..Delegation::default() - }, - ..Stake::default() - }, - StakeFlags::empty(), - ) - } - - fn get_active_stake_for_tests( - stake_accounts: &[AccountSharedData], - clock: &Clock, - stake_history: &StakeHistory, - ) -> u64 { - let mut active_stake = 0; - for account in stake_accounts { - if let StakeStateV2::Stake(_meta, stake, _stake_flags) = account.state().unwrap() { - let stake_status = stake.delegation.stake_activating_and_deactivating( - clock.epoch, - stake_history, - None, - ); - active_stake += stake_status.effective; - } - } - active_stake - } - - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_stake_process_instruction(feature_set: Arc) { - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::initialize( - &Pubkey::new_unique(), - &Authorized::default(), - &Lockup::default(), - ), - Err(InstructionError::InvalidAccountData), - ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::authorize( - &Pubkey::new_unique(), - &Pubkey::new_unique(), - &Pubkey::new_unique(), - StakeAuthorize::Staker, - None, - ), - Err(InstructionError::InvalidAccountData), - ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::split( - &Pubkey::new_unique(), - &Pubkey::new_unique(), - 100, - &invalid_stake_state_pubkey(), - )[2], - Err(InstructionError::InvalidAccountData), - ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::merge( - &Pubkey::new_unique(), - &invalid_stake_state_pubkey(), - &Pubkey::new_unique(), - )[0], - Err(InstructionError::InvalidAccountData), - ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::split_with_seed( - &Pubkey::new_unique(), - &Pubkey::new_unique(), - 100, - &invalid_stake_state_pubkey(), - &Pubkey::new_unique(), - "seed", - )[1], - Err(InstructionError::InvalidAccountData), - ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::delegate_stake( - &Pubkey::new_unique(), - &Pubkey::new_unique(), - &invalid_vote_state_pubkey(), - ), - Err(InstructionError::InvalidAccountData), - ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::withdraw( - &Pubkey::new_unique(), - &Pubkey::new_unique(), - &Pubkey::new_unique(), - 100, - None, - ), - Err(InstructionError::InvalidAccountData), - ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::deactivate_stake(&Pubkey::new_unique(), &Pubkey::new_unique()), - Err(InstructionError::InvalidAccountData), - ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::set_lockup( - &Pubkey::new_unique(), - &LockupArgs::default(), - &Pubkey::new_unique(), - ), - Err(InstructionError::InvalidAccountData), - ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::deactivate_delinquent_stake( - &Pubkey::new_unique(), - &Pubkey::new_unique(), - &invalid_vote_state_pubkey(), - ), - Err(InstructionError::IncorrectProgramId), - ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::deactivate_delinquent_stake( - &Pubkey::new_unique(), - &invalid_vote_state_pubkey(), - &Pubkey::new_unique(), - ), - Err(InstructionError::InvalidAccountData), - ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::deactivate_delinquent_stake( - &Pubkey::new_unique(), - &invalid_vote_state_pubkey(), - &invalid_vote_state_pubkey(), - ), - Err(InstructionError::InvalidAccountData), - ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::move_stake( - &Pubkey::new_unique(), - &Pubkey::new_unique(), - &Pubkey::new_unique(), - 100, - ), - Err(InstructionError::InvalidAccountData), - ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::move_lamports( - &Pubkey::new_unique(), - &Pubkey::new_unique(), - &Pubkey::new_unique(), - 100, - ), - Err(InstructionError::InvalidAccountData), - ); - } - - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_spoofed_stake_accounts(feature_set: Arc) { - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::initialize( - &spoofed_stake_state_pubkey(), - &Authorized::default(), - &Lockup::default(), - ), - Err(InstructionError::InvalidAccountOwner), - ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::authorize( - &spoofed_stake_state_pubkey(), - &Pubkey::new_unique(), - &Pubkey::new_unique(), - StakeAuthorize::Staker, - None, - ), - Err(InstructionError::InvalidAccountOwner), - ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::split( - &spoofed_stake_state_pubkey(), - &Pubkey::new_unique(), - 100, - &Pubkey::new_unique(), - )[2], - Err(InstructionError::InvalidAccountOwner), - ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::split( - &Pubkey::new_unique(), - &Pubkey::new_unique(), - 100, - &spoofed_stake_state_pubkey(), - )[2], - Err(InstructionError::IncorrectProgramId), - ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::merge( - &spoofed_stake_state_pubkey(), - &Pubkey::new_unique(), - &Pubkey::new_unique(), - )[0], - Err(InstructionError::InvalidAccountOwner), - ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::merge( - &Pubkey::new_unique(), - &spoofed_stake_state_pubkey(), - &Pubkey::new_unique(), - )[0], - Err(InstructionError::IncorrectProgramId), - ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::split_with_seed( - &spoofed_stake_state_pubkey(), - &Pubkey::new_unique(), - 100, - &Pubkey::new_unique(), - &Pubkey::new_unique(), - "seed", - )[1], - Err(InstructionError::InvalidAccountOwner), - ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::delegate_stake( - &spoofed_stake_state_pubkey(), - &Pubkey::new_unique(), - &Pubkey::new_unique(), - ), - Err(InstructionError::InvalidAccountOwner), - ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::withdraw( - &spoofed_stake_state_pubkey(), - &Pubkey::new_unique(), - &Pubkey::new_unique(), - 100, - None, - ), - Err(InstructionError::InvalidAccountOwner), - ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::deactivate_stake(&spoofed_stake_state_pubkey(), &Pubkey::new_unique()), - Err(InstructionError::InvalidAccountOwner), - ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::set_lockup( - &spoofed_stake_state_pubkey(), - &LockupArgs::default(), - &Pubkey::new_unique(), - ), - Err(InstructionError::InvalidAccountOwner), - ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::deactivate_delinquent_stake( - &spoofed_stake_state_pubkey(), - &Pubkey::new_unique(), - &Pubkey::new_unique(), - ), - Err(InstructionError::InvalidAccountOwner), - ); - } - - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_stake_process_instruction_decode_bail(feature_set: Arc) { - // these will not call stake_state, have bogus contents - let stake_address = Pubkey::new_unique(); - let stake_account = create_default_stake_account(); - let rent_address = rent::id(); - let rent = Rent::default(); - let rent_account = create_account_shared_data_for_test(&rent); - let rewards_address = rewards::id(); - let rewards_account = create_account_shared_data_for_test(&Rewards::new(0.0)); - let stake_history_address = stake_history::id(); - let stake_history_account = create_account_shared_data_for_test(&StakeHistory::default()); - let vote_address = Pubkey::new_unique(); - let vote_account = AccountSharedData::new(0, 0, &solana_sdk_ids::vote::id()); - let clock_address = clock::id(); - let clock_account = create_account_shared_data_for_test(&Clock::default()); - #[allow(deprecated)] - let config_address = stake_config::id(); - #[allow(deprecated)] - let config_account = config::create_account(0, &stake_config::Config::default()); - let rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); - let minimum_delegation = crate::get_minimum_delegation( - feature_set - .is_active(&agave_feature_set::stake_raise_minimum_delegation_to_1_sol::id()), - ); - let withdrawal_amount = rent_exempt_reserve + minimum_delegation; - - // gets the "is_empty()" check - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Initialize( - Authorized::default(), - Lockup::default(), - )) - .unwrap(), - Vec::new(), - Vec::new(), - Err(InstructionError::NotEnoughAccountKeys), - ); - - // no account for rent - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Initialize( - Authorized::default(), - Lockup::default(), - )) - .unwrap(), - vec![(stake_address, stake_account.clone())], - vec![AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }], - Err(InstructionError::NotEnoughAccountKeys), - ); - - // fails to deserialize stake state - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Initialize( - Authorized::default(), - Lockup::default(), - )) - .unwrap(), - vec![ - (stake_address, stake_account.clone()), - (rent_address, rent_account), - ], - vec![ - AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: rent_address, - is_signer: false, - is_writable: false, - }, - ], - Err(InstructionError::InvalidAccountData), - ); - - // gets the first check in delegate, wrong number of accounts - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::DelegateStake).unwrap(), - vec![(stake_address, stake_account.clone())], - vec![AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }], - Err(InstructionError::NotEnoughAccountKeys), - ); - - // gets the sub-check for number of args - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::DelegateStake).unwrap(), - vec![(stake_address, stake_account.clone())], - vec![AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }], - Err(InstructionError::NotEnoughAccountKeys), - ); - - // gets the check non-deserialize-able account in delegate_stake - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::DelegateStake).unwrap(), - vec![ - (stake_address, stake_account.clone()), - (vote_address, vote_account.clone()), - (clock_address, clock_account), - (stake_history_address, stake_history_account.clone()), - (config_address, config_account), - ], - vec![ - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: vote_address, - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: clock_address, - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_history_address, - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: config_address, - is_signer: false, - is_writable: false, - }, - ], - Err(InstructionError::InvalidAccountData), - ); - - // Tests 3rd keyed account is of correct type (Clock instead of rewards) in withdraw - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Withdraw(withdrawal_amount)).unwrap(), - vec![ - (stake_address, stake_account.clone()), - (vote_address, vote_account.clone()), - (rewards_address, rewards_account.clone()), - (stake_history_address, stake_history_account), - ], - vec![ - AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: vote_address, - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: rewards_address, - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_history_address, - is_signer: false, - is_writable: false, - }, - ], - Err(InstructionError::InvalidArgument), - ); - - // Tests correct number of accounts are provided in withdraw - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Withdraw(withdrawal_amount)).unwrap(), - vec![(stake_address, stake_account.clone())], - vec![AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }], - Err(InstructionError::NotEnoughAccountKeys), - ); - - // Tests 2nd keyed account is of correct type (Clock instead of rewards) in deactivate - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Deactivate).unwrap(), - vec![ - (stake_address, stake_account.clone()), - (rewards_address, rewards_account), - ], - vec![ - AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: rewards_address, - is_signer: false, - is_writable: false, - }, - ], - Err(InstructionError::InvalidArgument), - ); - - // Tests correct number of accounts are provided in deactivate - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Deactivate).unwrap(), - Vec::new(), - Vec::new(), - Err(InstructionError::NotEnoughAccountKeys), - ); - - // Tests correct number of accounts are provided in deactivate_delinquent - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::DeactivateDelinquent).unwrap(), - Vec::new(), - Vec::new(), - Err(InstructionError::NotEnoughAccountKeys), - ); - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::DeactivateDelinquent).unwrap(), - vec![(stake_address, stake_account.clone())], - vec![AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }], - Err(InstructionError::NotEnoughAccountKeys), - ); - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::DeactivateDelinquent).unwrap(), - vec![(stake_address, stake_account), (vote_address, vote_account)], - vec![ - AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: vote_address, - is_signer: false, - is_writable: false, - }, - ], - Err(InstructionError::NotEnoughAccountKeys), - ); - } - - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_stake_checked_instructions(feature_set: Arc) { - let stake_address = Pubkey::new_unique(); - let staker = Pubkey::new_unique(); - let staker_account = create_default_account(); - let withdrawer = Pubkey::new_unique(); - let withdrawer_account = create_default_account(); - let authorized_address = Pubkey::new_unique(); - let authorized_account = create_default_account(); - let new_authorized_account = create_default_account(); - let clock_address = clock::id(); - let clock_account = create_account_shared_data_for_test(&Clock::default()); - let custodian = Pubkey::new_unique(); - let custodian_account = create_default_account(); - let rent = Rent::default(); - let rent_address = rent::id(); - let rent_account = create_account_shared_data_for_test(&rent); - let rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); - let minimum_delegation = crate::get_minimum_delegation( - feature_set - .is_active(&agave_feature_set::stake_raise_minimum_delegation_to_1_sol::id()), - ); - - // Test InitializeChecked with non-signing withdrawer - let mut instruction = - initialize_checked(&stake_address, &Authorized { staker, withdrawer }); - instruction.accounts[3] = AccountMeta::new_readonly(withdrawer, false); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction, - Err(InstructionError::MissingRequiredSignature), - ); - - // Test InitializeChecked with withdrawer signer - let stake_account = AccountSharedData::new( - rent_exempt_reserve + minimum_delegation, - StakeStateV2::size_of(), - &id(), - ); - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::InitializeChecked).unwrap(), - vec![ - (stake_address, stake_account), - (rent_address, rent_account), - (staker, staker_account), - (withdrawer, withdrawer_account.clone()), - ], - vec![ - AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: rent_address, - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: staker, - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: withdrawer, - is_signer: true, - is_writable: false, - }, - ], - Ok(()), - ); - - // Test AuthorizeChecked with non-signing authority - let mut instruction = authorize_checked( - &stake_address, - &authorized_address, - &staker, - StakeAuthorize::Staker, - None, - ); - instruction.accounts[3] = AccountMeta::new_readonly(staker, false); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction, - Err(InstructionError::MissingRequiredSignature), - ); - - let mut instruction = authorize_checked( - &stake_address, - &authorized_address, - &withdrawer, - StakeAuthorize::Withdrawer, - None, - ); - instruction.accounts[3] = AccountMeta::new_readonly(withdrawer, false); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction, - Err(InstructionError::MissingRequiredSignature), - ); - - // Test AuthorizeChecked with authority signer - let stake_account = AccountSharedData::new_data_with_space( - 42, - &StakeStateV2::Initialized(Meta::auto(&authorized_address)), - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::AuthorizeChecked(StakeAuthorize::Staker)).unwrap(), - vec![ - (stake_address, stake_account.clone()), - (clock_address, clock_account.clone()), - (authorized_address, authorized_account.clone()), - (staker, new_authorized_account.clone()), - ], - vec![ - AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: clock_address, - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: authorized_address, - is_signer: true, - is_writable: false, - }, - AccountMeta { - pubkey: staker, - is_signer: true, - is_writable: false, - }, - ], - Ok(()), - ); - - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::AuthorizeChecked( - StakeAuthorize::Withdrawer, - )) - .unwrap(), - vec![ - (stake_address, stake_account), - (clock_address, clock_account.clone()), - (authorized_address, authorized_account.clone()), - (withdrawer, new_authorized_account.clone()), - ], - vec![ - AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: clock_address, - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: authorized_address, - is_signer: true, - is_writable: false, - }, - AccountMeta { - pubkey: withdrawer, - is_signer: true, - is_writable: false, - }, - ], - Ok(()), - ); - - // Test AuthorizeCheckedWithSeed with non-signing authority - let authorized_owner = Pubkey::new_unique(); - let seed = "test seed"; - let address_with_seed = - Pubkey::create_with_seed(&authorized_owner, seed, &authorized_owner).unwrap(); - let mut instruction = authorize_checked_with_seed( - &stake_address, - &authorized_owner, - seed.to_string(), - &authorized_owner, - &staker, - StakeAuthorize::Staker, - None, - ); - instruction.accounts[3] = AccountMeta::new_readonly(staker, false); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction, - Err(InstructionError::MissingRequiredSignature), - ); - - let mut instruction = authorize_checked_with_seed( - &stake_address, - &authorized_owner, - seed.to_string(), - &authorized_owner, - &staker, - StakeAuthorize::Withdrawer, - None, - ); - instruction.accounts[3] = AccountMeta::new_readonly(staker, false); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction, - Err(InstructionError::MissingRequiredSignature), - ); - - // Test AuthorizeCheckedWithSeed with authority signer - let stake_account = AccountSharedData::new_data_with_space( - 42, - &StakeStateV2::Initialized(Meta::auto(&address_with_seed)), - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::AuthorizeCheckedWithSeed( - AuthorizeCheckedWithSeedArgs { - stake_authorize: StakeAuthorize::Staker, - authority_seed: seed.to_string(), - authority_owner: authorized_owner, - }, - )) - .unwrap(), - vec![ - (address_with_seed, stake_account.clone()), - (authorized_owner, authorized_account.clone()), - (clock_address, clock_account.clone()), - (staker, new_authorized_account.clone()), - ], - vec![ - AccountMeta { - pubkey: address_with_seed, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: authorized_owner, - is_signer: true, - is_writable: false, - }, - AccountMeta { - pubkey: clock_address, - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: staker, - is_signer: true, - is_writable: false, - }, - ], - Ok(()), - ); - - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::AuthorizeCheckedWithSeed( - AuthorizeCheckedWithSeedArgs { - stake_authorize: StakeAuthorize::Withdrawer, - authority_seed: seed.to_string(), - authority_owner: authorized_owner, - }, - )) - .unwrap(), - vec![ - (address_with_seed, stake_account), - (authorized_owner, authorized_account), - (clock_address, clock_account.clone()), - (withdrawer, new_authorized_account), - ], - vec![ - AccountMeta { - pubkey: address_with_seed, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: authorized_owner, - is_signer: true, - is_writable: false, - }, - AccountMeta { - pubkey: clock_address, - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: withdrawer, - is_signer: true, - is_writable: false, - }, - ], - Ok(()), - ); - - // Test SetLockupChecked with non-signing lockup custodian - let mut instruction = set_lockup_checked( - &stake_address, - &LockupArgs { - unix_timestamp: None, - epoch: Some(1), - custodian: Some(custodian), - }, - &withdrawer, - ); - instruction.accounts[2] = AccountMeta::new_readonly(custodian, false); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction, - Err(InstructionError::MissingRequiredSignature), - ); - - // Test SetLockupChecked with lockup custodian signer - let stake_account = AccountSharedData::new_data_with_space( - 42, - &StakeStateV2::Initialized(Meta::auto(&withdrawer)), - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - - process_instruction( - Arc::clone(&feature_set), - &instruction.data, - vec![ - (clock_address, clock_account), - (stake_address, stake_account), - (withdrawer, withdrawer_account), - (custodian, custodian_account), - ], - vec![ - AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: withdrawer, - is_signer: true, - is_writable: false, - }, - AccountMeta { - pubkey: custodian, - is_signer: true, - is_writable: false, - }, - ], - Ok(()), - ); - } - - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_stake_initialize(feature_set: Arc) { - let rent = Rent::default(); - let rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); - let stake_lamports = rent_exempt_reserve; - let stake_address = solana_pubkey::new_rand(); - let stake_account = AccountSharedData::new(stake_lamports, StakeStateV2::size_of(), &id()); - let custodian_address = solana_pubkey::new_rand(); - let lockup = Lockup { - epoch: 1, - unix_timestamp: 0, - custodian: custodian_address, - }; - let instruction_data = serialize(&StakeInstruction::Initialize( - Authorized::auto(&stake_address), - lockup, - )) - .unwrap(); - let mut transaction_accounts = vec![ - (stake_address, stake_account.clone()), - (rent::id(), create_account_shared_data_for_test(&rent)), - ]; - let instruction_accounts = vec![ - AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: rent::id(), - is_signer: false, - is_writable: false, - }, - ]; - - // should pass - let accounts = process_instruction( - Arc::clone(&feature_set), - &instruction_data, - transaction_accounts.clone(), - instruction_accounts.clone(), - Ok(()), - ); - // check that we see what we expect - assert_eq!( - from(&accounts[0]).unwrap(), - StakeStateV2::Initialized(Meta { - authorized: Authorized::auto(&stake_address), - rent_exempt_reserve, - lockup, - }), - ); - - // 2nd time fails, can't move it from anything other than uninit->init - transaction_accounts[0] = (stake_address, accounts[0].clone()); - process_instruction( - Arc::clone(&feature_set), - &instruction_data, - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(InstructionError::InvalidAccountData), - ); - transaction_accounts[0] = (stake_address, stake_account); - - // not enough balance for rent - transaction_accounts[1] = ( - rent::id(), - create_account_shared_data_for_test(&Rent { - lamports_per_byte_year: rent.lamports_per_byte_year + 1, - ..rent - }), - ); - process_instruction( - Arc::clone(&feature_set), - &instruction_data, - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(InstructionError::InsufficientFunds), - ); - - // incorrect account sizes - let stake_account = - AccountSharedData::new(stake_lamports, StakeStateV2::size_of() + 1, &id()); - transaction_accounts[0] = (stake_address, stake_account); - process_instruction( - Arc::clone(&feature_set), - &instruction_data, - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(InstructionError::InvalidAccountData), - ); - - let stake_account = - AccountSharedData::new(stake_lamports, StakeStateV2::size_of() - 1, &id()); - transaction_accounts[0] = (stake_address, stake_account); - process_instruction( - Arc::clone(&feature_set), - &instruction_data, - transaction_accounts, - instruction_accounts, - Err(InstructionError::InvalidAccountData), - ); - } - - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_authorize(feature_set: Arc) { - let authority_address = solana_pubkey::new_rand(); - let authority_address_2 = solana_pubkey::new_rand(); - let stake_address = solana_pubkey::new_rand(); - let stake_lamports = 42; - let stake_account = AccountSharedData::new_data_with_space( - stake_lamports, - &StakeStateV2::default(), - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let to_address = solana_pubkey::new_rand(); - let to_account = AccountSharedData::new(1, 0, &system_program::id()); - let mut transaction_accounts = vec![ - (stake_address, stake_account), - (to_address, to_account), - (authority_address, AccountSharedData::default()), - ( - clock::id(), - create_account_shared_data_for_test(&Clock::default()), - ), - ( - stake_history::id(), - create_account_shared_data_for_test(&StakeHistory::default()), - ), - ( - epoch_schedule::id(), - create_account_shared_data_for_test(&EpochSchedule::default()), - ), - ]; - let mut instruction_accounts = vec![ - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: clock::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: authority_address, - is_signer: false, - is_writable: false, - }, - ]; - - // should fail, uninit - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Authorize( - authority_address, - StakeAuthorize::Staker, - )) - .unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(InstructionError::InvalidAccountData), - ); - - // should pass - let stake_account = AccountSharedData::new_data_with_space( - stake_lamports, - &StakeStateV2::Initialized(Meta::auto(&stake_address)), - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - transaction_accounts[0] = (stake_address, stake_account); - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Authorize( - authority_address, - StakeAuthorize::Staker, - )) - .unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Ok(()), - ); - transaction_accounts[0] = (stake_address, accounts[0].clone()); - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Authorize( - authority_address, - StakeAuthorize::Withdrawer, - )) - .unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Ok(()), - ); - transaction_accounts[0] = (stake_address, accounts[0].clone()); - if let StakeStateV2::Initialized(Meta { authorized, .. }) = from(&accounts[0]).unwrap() { - assert_eq!(authorized.staker, authority_address); - assert_eq!(authorized.withdrawer, authority_address); - } else { - panic!(); - } - - // A second authorization signed by the stake account should fail - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Authorize( - authority_address_2, - StakeAuthorize::Staker, - )) - .unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(InstructionError::MissingRequiredSignature), - ); - - // Test a second authorization by the new authority_address - instruction_accounts[0].is_signer = false; - instruction_accounts[2].is_signer = true; - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Authorize( - authority_address_2, - StakeAuthorize::Staker, - )) - .unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Ok(()), - ); - if let StakeStateV2::Initialized(Meta { authorized, .. }) = from(&accounts[0]).unwrap() { - assert_eq!(authorized.staker, authority_address_2); - } else { - panic!(); - } - - // Test a successful action by the currently authorized withdrawer - let mut instruction_accounts = vec![ - AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: to_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: clock::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_history::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: authority_address, - is_signer: true, - is_writable: false, - }, - ]; - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Withdraw(stake_lamports)).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Ok(()), - ); - assert_eq!(from(&accounts[0]).unwrap(), StakeStateV2::Uninitialized); - - // Test that withdrawal to account fails without authorized withdrawer - instruction_accounts[4].is_signer = false; - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Withdraw(stake_lamports)).unwrap(), - transaction_accounts, - instruction_accounts, - Err(InstructionError::MissingRequiredSignature), - ); - } - - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_authorize_override(feature_set: Arc) { - let authority_address = solana_pubkey::new_rand(); - let mallory_address = solana_pubkey::new_rand(); - let stake_address = solana_pubkey::new_rand(); - let stake_lamports = 42; - let stake_account = AccountSharedData::new_data_with_space( - stake_lamports, - &StakeStateV2::Initialized(Meta::auto(&stake_address)), - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let mut transaction_accounts = vec![ - (stake_address, stake_account), - (authority_address, AccountSharedData::default()), - ( - clock::id(), - create_account_shared_data_for_test(&Clock::default()), - ), - ]; - let mut instruction_accounts = vec![ - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: clock::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: authority_address, - is_signer: false, - is_writable: false, - }, - ]; - - // Authorize a staker pubkey and move the withdrawer key into cold storage. - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Authorize( - authority_address, - StakeAuthorize::Staker, - )) - .unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Ok(()), - ); - transaction_accounts[0] = (stake_address, accounts[0].clone()); - - // Attack! The stake key (a hot key) is stolen and used to authorize a new staker. - instruction_accounts[0].is_signer = false; - instruction_accounts[2].is_signer = true; - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Authorize( - mallory_address, - StakeAuthorize::Staker, - )) - .unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Ok(()), - ); - transaction_accounts[0] = (stake_address, accounts[0].clone()); - - // Verify the original staker no longer has access. - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Authorize( - authority_address, - StakeAuthorize::Staker, - )) - .unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(InstructionError::MissingRequiredSignature), - ); - - // Verify the withdrawer (pulled from cold storage) can save the day. - instruction_accounts[0].is_signer = true; - instruction_accounts[2].is_signer = false; - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Authorize( - authority_address, - StakeAuthorize::Withdrawer, - )) - .unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Ok(()), - ); - transaction_accounts[0] = (stake_address, accounts[0].clone()); - - // Attack! Verify the staker cannot be used to authorize a withdraw. - instruction_accounts[0].is_signer = false; - instruction_accounts[2] = AccountMeta { - pubkey: mallory_address, - is_signer: true, - is_writable: false, - }; - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Authorize( - authority_address, - StakeAuthorize::Withdrawer, - )) - .unwrap(), - transaction_accounts, - instruction_accounts, - Err(InstructionError::MissingRequiredSignature), - ); - } - - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_authorize_with_seed(feature_set: Arc) { - let authority_base_address = solana_pubkey::new_rand(); - let authority_address = solana_pubkey::new_rand(); - let seed = "42"; - let stake_address = Pubkey::create_with_seed(&authority_base_address, seed, &id()).unwrap(); - let stake_lamports = 42; - let stake_account = AccountSharedData::new_data_with_space( - stake_lamports, - &StakeStateV2::Initialized(Meta::auto(&stake_address)), - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let mut transaction_accounts = vec![ - (stake_address, stake_account), - (authority_base_address, AccountSharedData::default()), - ( - clock::id(), - create_account_shared_data_for_test(&Clock::default()), - ), - ]; - let mut instruction_accounts = vec![ - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: authority_base_address, - is_signer: true, - is_writable: false, - }, - AccountMeta { - pubkey: clock::id(), - is_signer: false, - is_writable: false, - }, - ]; - - // Wrong seed - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::AuthorizeWithSeed( - AuthorizeWithSeedArgs { - new_authorized_pubkey: authority_address, - stake_authorize: StakeAuthorize::Staker, - authority_seed: "".to_string(), - authority_owner: id(), - }, - )) - .unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(InstructionError::MissingRequiredSignature), - ); - - // Wrong base - instruction_accounts[1].pubkey = authority_address; - let instruction_data = serialize(&StakeInstruction::AuthorizeWithSeed( - AuthorizeWithSeedArgs { - new_authorized_pubkey: authority_address, - stake_authorize: StakeAuthorize::Staker, - authority_seed: seed.to_string(), - authority_owner: id(), - }, - )) - .unwrap(); - process_instruction( - Arc::clone(&feature_set), - &instruction_data, - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(InstructionError::MissingRequiredSignature), - ); - instruction_accounts[1].pubkey = authority_base_address; - - // Set stake authority - let accounts = process_instruction( - Arc::clone(&feature_set), - &instruction_data, - transaction_accounts.clone(), - instruction_accounts.clone(), - Ok(()), - ); - transaction_accounts[0] = (stake_address, accounts[0].clone()); - - // Set withdraw authority - let instruction_data = serialize(&StakeInstruction::AuthorizeWithSeed( - AuthorizeWithSeedArgs { - new_authorized_pubkey: authority_address, - stake_authorize: StakeAuthorize::Withdrawer, - authority_seed: seed.to_string(), - authority_owner: id(), - }, - )) - .unwrap(); - let accounts = process_instruction( - Arc::clone(&feature_set), - &instruction_data, - transaction_accounts.clone(), - instruction_accounts.clone(), - Ok(()), - ); - transaction_accounts[0] = (stake_address, accounts[0].clone()); - - // No longer withdraw authority - process_instruction( - Arc::clone(&feature_set), - &instruction_data, - transaction_accounts, - instruction_accounts, - Err(InstructionError::MissingRequiredSignature), - ); - } - - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_authorize_delegated_stake(feature_set: Arc) { - let authority_address = solana_pubkey::new_rand(); - let stake_address = solana_pubkey::new_rand(); - let minimum_delegation = crate::get_minimum_delegation( - feature_set - .is_active(&agave_feature_set::stake_raise_minimum_delegation_to_1_sol::id()), - ); - let stake_lamports = minimum_delegation; - let stake_account = AccountSharedData::new_data_with_space( - stake_lamports, - &StakeStateV2::Initialized(Meta::auto(&stake_address)), - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let vote_address = solana_pubkey::new_rand(); - let vote_account = - vote_state::create_account(&vote_address, &solana_pubkey::new_rand(), 0, 100); - let vote_address_2 = solana_pubkey::new_rand(); - let mut vote_account_2 = - vote_state::create_account(&vote_address_2, &solana_pubkey::new_rand(), 0, 100); - vote_account_2.set_state(&VoteState::default()).unwrap(); - #[allow(deprecated)] - let mut transaction_accounts = vec![ - (stake_address, stake_account), - (vote_address, vote_account), - (vote_address_2, vote_account_2), - ( - authority_address, - AccountSharedData::new(42, 0, &system_program::id()), - ), - ( - clock::id(), - create_account_shared_data_for_test(&Clock::default()), - ), - ( - stake_history::id(), - create_account_shared_data_for_test(&StakeHistory::default()), - ), - ( - stake_config::id(), - config::create_account(0, &stake_config::Config::default()), - ), - ( - epoch_schedule::id(), - create_account_shared_data_for_test(&EpochSchedule::default()), - ), - ]; - #[allow(deprecated)] - let mut instruction_accounts = vec![ - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: vote_address, - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: clock::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_history::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_config::id(), - is_signer: false, - is_writable: false, - }, - ]; - - // delegate stake - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::DelegateStake).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Ok(()), - ); - transaction_accounts[0] = (stake_address, accounts[0].clone()); - - // deactivate, so we can re-delegate - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Deactivate).unwrap(), - transaction_accounts.clone(), - vec![ - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: clock::id(), - is_signer: false, - is_writable: false, - }, - ], - Ok(()), - ); - transaction_accounts[0] = (stake_address, accounts[0].clone()); - - // authorize - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Authorize( - authority_address, - StakeAuthorize::Staker, - )) - .unwrap(), - transaction_accounts.clone(), - vec![ - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: clock::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: authority_address, - is_signer: false, - is_writable: false, - }, - ], - Ok(()), - ); - transaction_accounts[0] = (stake_address, accounts[0].clone()); - assert_eq!( - authorized_from(&accounts[0]).unwrap().staker, - authority_address - ); - - // Random other account should fail - instruction_accounts[0].is_signer = false; - instruction_accounts[1].pubkey = vote_address_2; - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::DelegateStake).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(InstructionError::MissingRequiredSignature), - ); - - // Authorized staker should succeed - instruction_accounts.push(AccountMeta { - pubkey: authority_address, - is_signer: true, - is_writable: false, - }); - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::DelegateStake).unwrap(), - transaction_accounts.clone(), - instruction_accounts, - Ok(()), - ); - transaction_accounts[0] = (stake_address, accounts[0].clone()); - assert_eq!( - stake_from(&accounts[0]).unwrap().delegation.voter_pubkey, - vote_address_2, - ); - - // Test another staking action - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Deactivate).unwrap(), - transaction_accounts, - vec![ - AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: clock::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: authority_address, - is_signer: true, - is_writable: false, - }, - ], - Ok(()), - ); - } - - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_stake_delegate(feature_set: Arc) { - let mut vote_state = VoteState::default(); - for i in 0..1000 { - vote_state::process_slot_vote_unchecked(&mut vote_state, i); - } - let vote_state_credits = vote_state.credits(); - let vote_address = solana_pubkey::new_rand(); - let vote_address_2 = solana_pubkey::new_rand(); - let mut vote_account = - vote_state::create_account(&vote_address, &solana_pubkey::new_rand(), 0, 100); - let mut vote_account_2 = - vote_state::create_account(&vote_address_2, &solana_pubkey::new_rand(), 0, 100); - vote_account - .set_state(&VoteStateVersions::new_current(vote_state.clone())) - .unwrap(); - vote_account_2 - .set_state(&VoteStateVersions::new_current(vote_state)) - .unwrap(); - let minimum_delegation = crate::get_minimum_delegation( - feature_set - .is_active(&agave_feature_set::stake_raise_minimum_delegation_to_1_sol::id()), - ); - let stake_lamports = minimum_delegation; - let stake_address = solana_pubkey::new_rand(); - let mut stake_account = AccountSharedData::new_data_with_space( - stake_lamports, - &StakeStateV2::Initialized(Meta { - authorized: Authorized { - staker: stake_address, - withdrawer: stake_address, - }, - ..Meta::default() - }), - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let mut clock = Clock { - epoch: 1, - ..Clock::default() - }; - #[allow(deprecated)] - let mut transaction_accounts = vec![ - (stake_address, stake_account.clone()), - (vote_address, vote_account), - (vote_address_2, vote_account_2.clone()), - (clock::id(), create_account_shared_data_for_test(&clock)), - ( - stake_history::id(), - create_account_shared_data_for_test(&StakeHistory::default()), - ), - ( - stake_config::id(), - config::create_account(0, &stake_config::Config::default()), - ), - ( - epoch_schedule::id(), - create_account_shared_data_for_test(&EpochSchedule::default()), - ), - ]; - #[allow(deprecated)] - let mut instruction_accounts = vec![ - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: vote_address, - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: clock::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_history::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_config::id(), - is_signer: false, - is_writable: false, - }, - ]; - - // should fail, unsigned stake account - instruction_accounts[0].is_signer = false; - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::DelegateStake).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(InstructionError::MissingRequiredSignature), - ); - instruction_accounts[0].is_signer = true; - - // should pass - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::DelegateStake).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Ok(()), - ); - // verify that delegate() looks right, compare against hand-rolled - assert_eq!( - stake_from(&accounts[0]).unwrap(), - Stake { - delegation: Delegation { - voter_pubkey: vote_address, - stake: stake_lamports, - activation_epoch: clock.epoch, - deactivation_epoch: u64::MAX, - ..Delegation::default() - }, - credits_observed: vote_state_credits, - } - ); - - // verify that delegate fails as stake is active and not deactivating - clock.epoch += 1; - transaction_accounts[0] = (stake_address, accounts[0].clone()); - transaction_accounts[3] = (clock::id(), create_account_shared_data_for_test(&clock)); - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::DelegateStake).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(StakeError::TooSoonToRedelegate.into()), - ); - - // deactivate - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Deactivate).unwrap(), - transaction_accounts.clone(), - vec![ - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: clock::id(), - is_signer: false, - is_writable: false, - }, - ], - Ok(()), - ); - - // verify that delegate to a different vote account fails - // during deactivation - transaction_accounts[0] = (stake_address, accounts[0].clone()); - instruction_accounts[1].pubkey = vote_address_2; - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::DelegateStake).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(StakeError::TooSoonToRedelegate.into()), - ); - instruction_accounts[1].pubkey = vote_address; - - // verify that delegate succeeds to same vote account - // when stake is deactivating - let accounts_2 = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::DelegateStake).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Ok(()), - ); - // verify that deactivation has been cleared - let stake = stake_from(&accounts_2[0]).unwrap(); - assert_eq!(stake.delegation.deactivation_epoch, u64::MAX); - - // verify that delegate to a different vote account fails - // if stake is still active - transaction_accounts[0] = (stake_address, accounts_2[0].clone()); - instruction_accounts[1].pubkey = vote_address_2; - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::DelegateStake).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(StakeError::TooSoonToRedelegate.into()), - ); - - // without stake history, cool down is instantaneous - clock.epoch += 1; - transaction_accounts[3] = (clock::id(), create_account_shared_data_for_test(&clock)); - - // verify that delegate can be called to new vote account, 2nd is redelegate - transaction_accounts[0] = (stake_address, accounts[0].clone()); - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::DelegateStake).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Ok(()), - ); - instruction_accounts[1].pubkey = vote_address; - // verify that delegate() looks right, compare against hand-rolled - assert_eq!( - stake_from(&accounts[0]).unwrap(), - Stake { - delegation: Delegation { - voter_pubkey: vote_address_2, - stake: stake_lamports, - activation_epoch: clock.epoch, - deactivation_epoch: u64::MAX, - ..Delegation::default() - }, - credits_observed: vote_state_credits, - } - ); - - // signed but faked vote account - transaction_accounts[1] = (vote_address_2, vote_account_2); - transaction_accounts[1] - .1 - .set_owner(solana_pubkey::new_rand()); - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::DelegateStake).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(solana_instruction::error::InstructionError::IncorrectProgramId), - ); - - // verify that non-stakes fail delegate() - let stake_state = StakeStateV2::RewardsPool; - stake_account.set_state(&stake_state).unwrap(); - transaction_accounts[0] = (stake_address, stake_account); - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::DelegateStake).unwrap(), - transaction_accounts, - instruction_accounts, - Err(solana_instruction::error::InstructionError::IncorrectProgramId), - ); - } - - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_redelegate_consider_balance_changes(feature_set: Arc) { - let mut clock = Clock::default(); - let rent = Rent::default(); - let rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); - let initial_lamports = 4242424242; - let stake_lamports = rent_exempt_reserve + initial_lamports; - let recipient_address = solana_pubkey::new_rand(); - let authority_address = solana_pubkey::new_rand(); - let vote_address = solana_pubkey::new_rand(); - let vote_account = - vote_state::create_account(&vote_address, &solana_pubkey::new_rand(), 0, 100); - let stake_address = solana_pubkey::new_rand(); - let stake_account = AccountSharedData::new_data_with_space( - stake_lamports, - &StakeStateV2::Initialized(Meta { - rent_exempt_reserve, - ..Meta::auto(&authority_address) - }), - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - #[allow(deprecated)] - let mut transaction_accounts = vec![ - (stake_address, stake_account), - (vote_address, vote_account), - ( - recipient_address, - AccountSharedData::new(1, 0, &system_program::id()), - ), - (authority_address, AccountSharedData::default()), - (clock::id(), create_account_shared_data_for_test(&clock)), - ( - stake_history::id(), - create_account_shared_data_for_test(&StakeHistory::default()), - ), - ( - stake_config::id(), - config::create_account(0, &stake_config::Config::default()), - ), - ( - epoch_schedule::id(), - create_account_shared_data_for_test(&EpochSchedule::default()), - ), - ]; - #[allow(deprecated)] - let delegate_instruction_accounts = vec![ - AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: vote_address, - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: clock::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_history::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_config::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: authority_address, - is_signer: true, - is_writable: false, - }, - ]; - let deactivate_instruction_accounts = vec![ - AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: clock::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: authority_address, - is_signer: true, - is_writable: false, - }, - ]; - - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::DelegateStake).unwrap(), - transaction_accounts.clone(), - delegate_instruction_accounts.clone(), - Ok(()), - ); - transaction_accounts[0] = (stake_address, accounts[0].clone()); - - clock.epoch += 1; - transaction_accounts[2] = (clock::id(), create_account_shared_data_for_test(&clock)); - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Deactivate).unwrap(), - transaction_accounts.clone(), - deactivate_instruction_accounts.clone(), - Ok(()), - ); - transaction_accounts[0] = (stake_address, accounts[0].clone()); - - // Once deactivated, we withdraw stake to new account - clock.epoch += 1; - transaction_accounts[2] = (clock::id(), create_account_shared_data_for_test(&clock)); - let withdraw_lamports = initial_lamports / 2; - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Withdraw(withdraw_lamports)).unwrap(), - transaction_accounts.clone(), - vec![ - AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: recipient_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: clock::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_history::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: authority_address, - is_signer: true, - is_writable: false, - }, - ], - Ok(()), - ); - let expected_balance = rent_exempt_reserve + initial_lamports - withdraw_lamports; - assert_eq!(accounts[0].lamports(), expected_balance); - transaction_accounts[0] = (stake_address, accounts[0].clone()); - - clock.epoch += 1; - transaction_accounts[2] = (clock::id(), create_account_shared_data_for_test(&clock)); - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::DelegateStake).unwrap(), - transaction_accounts.clone(), - delegate_instruction_accounts.clone(), - Ok(()), - ); - assert_eq!( - stake_from(&accounts[0]).unwrap().delegation.stake, - accounts[0].lamports() - rent_exempt_reserve, - ); - transaction_accounts[0] = (stake_address, accounts[0].clone()); - - clock.epoch += 1; - transaction_accounts[2] = (clock::id(), create_account_shared_data_for_test(&clock)); - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Deactivate).unwrap(), - transaction_accounts.clone(), - deactivate_instruction_accounts, - Ok(()), - ); - transaction_accounts[0] = (stake_address, accounts[0].clone()); - - // Out of band deposit - transaction_accounts[0] - .1 - .checked_add_lamports(withdraw_lamports) - .unwrap(); - - clock.epoch += 1; - transaction_accounts[2] = (clock::id(), create_account_shared_data_for_test(&clock)); - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::DelegateStake).unwrap(), - transaction_accounts, - delegate_instruction_accounts, - Ok(()), - ); - assert_eq!( - stake_from(&accounts[0]).unwrap().delegation.stake, - accounts[0].lamports() - rent_exempt_reserve, - ); - } - - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_split(feature_set: Arc) { - let stake_history = StakeHistory::default(); - let current_epoch = 100; - let clock = Clock { - epoch: current_epoch, - ..Clock::default() - }; - let stake_address = solana_pubkey::new_rand(); - let minimum_delegation = crate::get_minimum_delegation( - feature_set - .is_active(&agave_feature_set::stake_raise_minimum_delegation_to_1_sol::id()), - ); - let stake_lamports = minimum_delegation * 2; - let split_to_address = solana_pubkey::new_rand(); - let split_to_account = AccountSharedData::new_data_with_space( - 0, - &StakeStateV2::Uninitialized, - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let mut transaction_accounts = vec![ - (stake_address, AccountSharedData::default()), - (split_to_address, split_to_account.clone()), - ( - rent::id(), - create_account_shared_data_for_test(&Rent { - lamports_per_byte_year: 0, - ..Rent::default() - }), - ), - ( - stake_history::id(), - create_account_shared_data_for_test(&stake_history), - ), - (clock::id(), create_account_shared_data_for_test(&clock)), - ( - epoch_schedule::id(), - create_account_shared_data_for_test(&EpochSchedule::default()), - ), - ]; - let instruction_accounts = vec![ - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: split_to_address, - is_signer: false, - is_writable: true, - }, - ]; - - let feature_set = Arc::new(feature_set); - - for state in [ - StakeStateV2::Initialized(Meta::auto(&stake_address)), - just_stake(Meta::auto(&stake_address), stake_lamports), - ] { - let stake_account = AccountSharedData::new_data_with_space( - stake_lamports, - &state, - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let expected_active_stake = get_active_stake_for_tests( - &[stake_account.clone(), split_to_account.clone()], - &clock, - &stake_history, - ); - transaction_accounts[0] = (stake_address, stake_account); - - // should fail, split more than available - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Split(stake_lamports + 1)).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(InstructionError::InsufficientFunds), - ); - - // should pass - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Split(stake_lamports / 2)).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Ok(()), - ); - // no lamport leakage - assert_eq!( - accounts[0].lamports() + accounts[1].lamports(), - stake_lamports - ); - - // no deactivated stake - assert_eq!( - expected_active_stake, - get_active_stake_for_tests(&accounts[0..2], &clock, &stake_history) - ); - - assert_eq!(from(&accounts[0]).unwrap(), from(&accounts[1]).unwrap()); - match state { - StakeStateV2::Initialized(_meta) => { - assert_eq!(from(&accounts[0]).unwrap(), state); - } - StakeStateV2::Stake(_meta, _stake, _) => { - let stake_0 = from(&accounts[0]).unwrap().stake(); - assert_eq!(stake_0.unwrap().delegation.stake, stake_lamports / 2); - } - _ => unreachable!(), - } - } - - // should fail, fake owner of destination - let split_to_account = AccountSharedData::new_data_with_space( - 0, - &StakeStateV2::Uninitialized, - StakeStateV2::size_of(), - &solana_pubkey::new_rand(), - ) - .unwrap(); - transaction_accounts[1] = (split_to_address, split_to_account); - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Split(stake_lamports / 2)).unwrap(), - transaction_accounts, - instruction_accounts, - Err(InstructionError::IncorrectProgramId), - ); - } - - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_withdraw_stake(feature_set: Arc) { - let recipient_address = solana_pubkey::new_rand(); - let authority_address = solana_pubkey::new_rand(); - let custodian_address = solana_pubkey::new_rand(); - let stake_address = solana_pubkey::new_rand(); - let minimum_delegation = crate::get_minimum_delegation( - feature_set - .is_active(&agave_feature_set::stake_raise_minimum_delegation_to_1_sol::id()), - ); - let stake_lamports = minimum_delegation; - let stake_account = AccountSharedData::new_data_with_space( - stake_lamports, - &StakeStateV2::Uninitialized, - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let vote_address = solana_pubkey::new_rand(); - let mut vote_account = - vote_state::create_account(&vote_address, &solana_pubkey::new_rand(), 0, 100); - vote_account - .set_state(&VoteStateVersions::new_current(VoteState::default())) - .unwrap(); - #[allow(deprecated)] - let mut transaction_accounts = vec![ - (stake_address, stake_account), - (vote_address, vote_account), - (recipient_address, AccountSharedData::default()), - ( - authority_address, - AccountSharedData::new(42, 0, &system_program::id()), - ), - (custodian_address, AccountSharedData::default()), - ( - clock::id(), - create_account_shared_data_for_test(&Clock::default()), - ), - ( - rent::id(), - create_account_shared_data_for_test(&Rent::free()), - ), - ( - stake_history::id(), - create_account_shared_data_for_test(&StakeHistory::default()), - ), - ( - stake_config::id(), - config::create_account(0, &stake_config::Config::default()), - ), - ( - epoch_schedule::id(), - create_account_shared_data_for_test(&EpochSchedule::default()), - ), - ]; - let mut instruction_accounts = vec![ - AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: recipient_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: clock::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_history::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: false, - }, - ]; - - // should fail, no signer - instruction_accounts[4].is_signer = false; - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Withdraw(stake_lamports)).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(InstructionError::MissingRequiredSignature), - ); - instruction_accounts[4].is_signer = true; - - // should pass, signed keyed account and uninitialized - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Withdraw(stake_lamports)).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Ok(()), - ); - assert_eq!(accounts[0].lamports(), 0); - assert_eq!(from(&accounts[0]).unwrap(), StakeStateV2::Uninitialized); - - // initialize stake - let lockup = Lockup { - unix_timestamp: 0, - epoch: 0, - custodian: custodian_address, - }; - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Initialize( - Authorized::auto(&stake_address), - lockup, - )) - .unwrap(), - transaction_accounts.clone(), - vec![ - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: rent::id(), - is_signer: false, - is_writable: false, - }, - ], - Ok(()), - ); - transaction_accounts[0] = (stake_address, accounts[0].clone()); - - // should fail, signed keyed account and locked up, more than available - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Withdraw(stake_lamports + 1)).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(InstructionError::InsufficientFunds), - ); - - // Stake some lamports (available lamports for withdrawals will reduce to zero) - #[allow(deprecated)] - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::DelegateStake).unwrap(), - transaction_accounts.clone(), - vec![ - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: vote_address, - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: clock::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_history::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_config::id(), - is_signer: false, - is_writable: false, - }, - ], - Ok(()), - ); - transaction_accounts[0] = (stake_address, accounts[0].clone()); - - // simulate rewards - transaction_accounts[0].1.checked_add_lamports(10).unwrap(); - - // withdrawal before deactivate works for rewards amount - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Withdraw(10)).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Ok(()), - ); - - // withdrawal of rewards fails if not in excess of stake - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Withdraw(11)).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(InstructionError::InsufficientFunds), - ); - - // deactivate the stake before withdrawal - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Deactivate).unwrap(), - transaction_accounts.clone(), - vec![ - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: clock::id(), - is_signer: false, - is_writable: false, - }, - ], - Ok(()), - ); - transaction_accounts[0] = (stake_address, accounts[0].clone()); - - // simulate time passing - let clock = Clock { - epoch: 100, - ..Clock::default() - }; - transaction_accounts[5] = (clock::id(), create_account_shared_data_for_test(&clock)); - - // Try to withdraw more than what's available - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Withdraw(stake_lamports + 11)).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(InstructionError::InsufficientFunds), - ); - - // Try to withdraw all lamports - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Withdraw(stake_lamports + 10)).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Ok(()), - ); - assert_eq!(accounts[0].lamports(), 0); - assert_eq!(from(&accounts[0]).unwrap(), StakeStateV2::Uninitialized); - - // overflow - let rent = Rent::default(); - let rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); - let stake_account = AccountSharedData::new_data_with_space( - 1_000_000_000, - &StakeStateV2::Initialized(Meta { - rent_exempt_reserve, - authorized: Authorized { - staker: authority_address, - withdrawer: authority_address, - }, - lockup: Lockup::default(), - }), - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - transaction_accounts[0] = (stake_address, stake_account.clone()); - transaction_accounts[2] = (recipient_address, stake_account); - instruction_accounts[4].pubkey = authority_address; - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Withdraw(u64::MAX - 10)).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(InstructionError::InsufficientFunds), - ); - - // should fail, invalid state - let stake_account = AccountSharedData::new_data_with_space( - stake_lamports, - &StakeStateV2::RewardsPool, - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - transaction_accounts[0] = (stake_address, stake_account); - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Withdraw(stake_lamports)).unwrap(), - transaction_accounts, - instruction_accounts, - Err(InstructionError::InvalidAccountData), - ); - } - - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_withdraw_stake_before_warmup(feature_set: Arc) { - let recipient_address = solana_pubkey::new_rand(); - let stake_address = solana_pubkey::new_rand(); - let minimum_delegation = crate::get_minimum_delegation( - feature_set - .is_active(&agave_feature_set::stake_raise_minimum_delegation_to_1_sol::id()), - ); - let stake_lamports = minimum_delegation; - let total_lamports = stake_lamports + 33; - let stake_account = AccountSharedData::new_data_with_space( - total_lamports, - &StakeStateV2::Initialized(Meta::auto(&stake_address)), - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let vote_address = solana_pubkey::new_rand(); - let mut vote_account = - vote_state::create_account(&vote_address, &solana_pubkey::new_rand(), 0, 100); - vote_account - .set_state(&VoteStateVersions::new_current(VoteState::default())) - .unwrap(); - let mut clock = Clock { - epoch: 16, - ..Clock::default() - }; - #[allow(deprecated)] - let mut transaction_accounts = vec![ - (stake_address, stake_account), - (vote_address, vote_account), - (recipient_address, AccountSharedData::default()), - (clock::id(), create_account_shared_data_for_test(&clock)), - ( - stake_history::id(), - create_account_shared_data_for_test(&StakeHistory::default()), - ), - ( - stake_config::id(), - config::create_account(0, &stake_config::Config::default()), - ), - ( - epoch_schedule::id(), - create_account_shared_data_for_test(&EpochSchedule::default()), - ), - ]; - let instruction_accounts = vec![ - AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: recipient_address, - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: clock::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_history::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: false, - }, - ]; - - // Stake some lamports (available lamports for withdrawals will reduce to zero) - #[allow(deprecated)] - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::DelegateStake).unwrap(), - transaction_accounts.clone(), - vec![ - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: vote_address, - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: clock::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_history::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_config::id(), - is_signer: false, - is_writable: false, - }, - ], - Ok(()), - ); - transaction_accounts[0] = (stake_address, accounts[0].clone()); - - // Try to withdraw stake - let stake_history = create_stake_history_from_delegations( - None, - 0..clock.epoch, - &[stake_from(&accounts[0]).unwrap().delegation], - None, - ); - transaction_accounts[4] = ( - stake_history::id(), - create_account_shared_data_for_test(&stake_history), - ); - clock.epoch = 0; - transaction_accounts[3] = (clock::id(), create_account_shared_data_for_test(&clock)); - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Withdraw( - total_lamports - stake_lamports + 1, - )) - .unwrap(), - transaction_accounts, - instruction_accounts, - Err(InstructionError::InsufficientFunds), - ); - } - - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_withdraw_lockup(feature_set: Arc) { - let recipient_address = solana_pubkey::new_rand(); - let custodian_address = solana_pubkey::new_rand(); - let stake_address = solana_pubkey::new_rand(); - let total_lamports = 100; - let mut meta = Meta { - lockup: Lockup { - unix_timestamp: 0, - epoch: 1, - custodian: custodian_address, - }, - ..Meta::auto(&stake_address) - }; - let stake_account = AccountSharedData::new_data_with_space( - total_lamports, - &StakeStateV2::Initialized(meta), - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let mut clock = Clock::default(); - let mut transaction_accounts = vec![ - (stake_address, stake_account.clone()), - (recipient_address, AccountSharedData::default()), - (custodian_address, AccountSharedData::default()), - (clock::id(), create_account_shared_data_for_test(&clock)), - ( - stake_history::id(), - create_account_shared_data_for_test(&StakeHistory::default()), - ), - ( - epoch_schedule::id(), - create_account_shared_data_for_test(&EpochSchedule::default()), - ), - ]; - let mut instruction_accounts = vec![ - AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: recipient_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: clock::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_history::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: false, - }, - ]; - - // should fail, lockup is still in force - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Withdraw(total_lamports)).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(StakeError::LockupInForce.into()), - ); - - // should pass - instruction_accounts.push(AccountMeta { - pubkey: custodian_address, - is_signer: true, - is_writable: false, - }); - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Withdraw(total_lamports)).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Ok(()), - ); - assert_eq!(from(&accounts[0]).unwrap(), StakeStateV2::Uninitialized); - - // should pass, custodian is the same as the withdraw authority - instruction_accounts[5].pubkey = stake_address; - meta.lockup.custodian = stake_address; - let stake_account_self_as_custodian = AccountSharedData::new_data_with_space( - total_lamports, - &StakeStateV2::Initialized(meta), - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - transaction_accounts[0] = (stake_address, stake_account_self_as_custodian); - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Withdraw(total_lamports)).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Ok(()), - ); - assert_eq!(from(&accounts[0]).unwrap(), StakeStateV2::Uninitialized); - transaction_accounts[0] = (stake_address, stake_account); - - // should pass, lockup has expired - instruction_accounts.pop(); - clock.epoch += 1; - transaction_accounts[3] = (clock::id(), create_account_shared_data_for_test(&clock)); - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Withdraw(total_lamports)).unwrap(), - transaction_accounts, - instruction_accounts, - Ok(()), - ); - assert_eq!(from(&accounts[0]).unwrap(), StakeStateV2::Uninitialized); - } - - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_withdraw_rent_exempt(feature_set: Arc) { - let recipient_address = solana_pubkey::new_rand(); - let custodian_address = solana_pubkey::new_rand(); - let stake_address = solana_pubkey::new_rand(); - let rent = Rent::default(); - let rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); - let minimum_delegation = crate::get_minimum_delegation( - feature_set - .is_active(&agave_feature_set::stake_raise_minimum_delegation_to_1_sol::id()), - ); - let stake_lamports = 7 * minimum_delegation; - let stake_account = AccountSharedData::new_data_with_space( - stake_lamports + rent_exempt_reserve, - &StakeStateV2::Initialized(Meta { - rent_exempt_reserve, - ..Meta::auto(&stake_address) - }), - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let transaction_accounts = vec![ - (stake_address, stake_account), - (recipient_address, AccountSharedData::default()), - (custodian_address, AccountSharedData::default()), - ( - clock::id(), - create_account_shared_data_for_test(&Clock::default()), - ), - ( - stake_history::id(), - create_account_shared_data_for_test(&StakeHistory::default()), - ), - ( - epoch_schedule::id(), - create_account_shared_data_for_test(&EpochSchedule::default()), - ), - ]; - let instruction_accounts = vec![ - AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: recipient_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: clock::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_history::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: false, - }, - ]; - - // should pass, withdrawing initialized account down to minimum balance - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Withdraw(stake_lamports)).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Ok(()), - ); - - // should fail, withdrawal that would leave less than rent-exempt reserve - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Withdraw(stake_lamports + 1)).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(InstructionError::InsufficientFunds), - ); - - // should pass, withdrawal of complete account - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Withdraw( - stake_lamports + rent_exempt_reserve, - )) - .unwrap(), - transaction_accounts, - instruction_accounts, - Ok(()), - ); - } - - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_deactivate(feature_set: Arc) { - let stake_address = solana_pubkey::new_rand(); - let minimum_delegation = crate::get_minimum_delegation( - feature_set - .is_active(&agave_feature_set::stake_raise_minimum_delegation_to_1_sol::id()), - ); - let stake_lamports = minimum_delegation; - let stake_account = AccountSharedData::new_data_with_space( - stake_lamports, - &StakeStateV2::Initialized(Meta::auto(&stake_address)), - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let vote_address = solana_pubkey::new_rand(); - let mut vote_account = - vote_state::create_account(&vote_address, &solana_pubkey::new_rand(), 0, 100); - vote_account - .set_state(&VoteStateVersions::new_current(VoteState::default())) - .unwrap(); - #[allow(deprecated)] - let mut transaction_accounts = vec![ - (stake_address, stake_account), - (vote_address, vote_account), - ( - clock::id(), - create_account_shared_data_for_test(&Clock::default()), - ), - ( - stake_history::id(), - create_account_shared_data_for_test(&StakeHistory::default()), - ), - ( - stake_config::id(), - config::create_account(0, &stake_config::Config::default()), - ), - ( - epoch_schedule::id(), - create_account_shared_data_for_test(&EpochSchedule::default()), - ), - ]; - let mut instruction_accounts = vec![ - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: clock::id(), - is_signer: false, - is_writable: false, - }, - ]; - - // should fail, not signed - instruction_accounts[0].is_signer = false; - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Deactivate).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(InstructionError::InvalidAccountData), - ); - instruction_accounts[0].is_signer = true; - - // should fail, not staked yet - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Deactivate).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(InstructionError::InvalidAccountData), - ); - - // Staking - #[allow(deprecated)] - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::DelegateStake).unwrap(), - transaction_accounts.clone(), - vec![ - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: vote_address, - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: clock::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_history::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_config::id(), - is_signer: false, - is_writable: false, - }, - ], - Ok(()), - ); - transaction_accounts[0] = (stake_address, accounts[0].clone()); - - // should pass - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Deactivate).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Ok(()), - ); - transaction_accounts[0] = (stake_address, accounts[0].clone()); - - // should fail, only works once - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Deactivate).unwrap(), - transaction_accounts, - instruction_accounts, - Err(StakeError::AlreadyDeactivated.into()), - ); - } - - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_set_lockup(feature_set: Arc) { - let custodian_address = solana_pubkey::new_rand(); - let authorized_address = solana_pubkey::new_rand(); - let stake_address = solana_pubkey::new_rand(); - let minimum_delegation = crate::get_minimum_delegation( - feature_set - .is_active(&agave_feature_set::stake_raise_minimum_delegation_to_1_sol::id()), - ); - let stake_lamports = minimum_delegation; - let stake_account = AccountSharedData::new_data_with_space( - stake_lamports, - &StakeStateV2::Uninitialized, - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let vote_address = solana_pubkey::new_rand(); - let mut vote_account = - vote_state::create_account(&vote_address, &solana_pubkey::new_rand(), 0, 100); - vote_account - .set_state(&VoteStateVersions::new_current(VoteState::default())) - .unwrap(); - let instruction_data = serialize(&StakeInstruction::SetLockup(LockupArgs { - unix_timestamp: Some(1), - epoch: Some(1), - custodian: Some(custodian_address), - })) - .unwrap(); - #[allow(deprecated)] - let mut transaction_accounts = vec![ - (stake_address, stake_account), - (vote_address, vote_account), - (authorized_address, AccountSharedData::default()), - (custodian_address, AccountSharedData::default()), - ( - clock::id(), - create_account_shared_data_for_test(&Clock::default()), - ), - ( - rent::id(), - create_account_shared_data_for_test(&Rent::free()), - ), - ( - stake_history::id(), - create_account_shared_data_for_test(&StakeHistory::default()), - ), - ( - stake_config::id(), - config::create_account(0, &stake_config::Config::default()), - ), - ( - epoch_schedule::id(), - create_account_shared_data_for_test(&EpochSchedule::default()), - ), - ]; - let mut instruction_accounts = vec![ - AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: clock::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: custodian_address, - is_signer: true, - is_writable: false, - }, - ]; - - // should fail, wrong state - process_instruction( - Arc::clone(&feature_set), - &instruction_data, - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(InstructionError::InvalidAccountData), - ); - - // initialize stake - let lockup = Lockup { - unix_timestamp: 1, - epoch: 1, - custodian: custodian_address, - }; - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Initialize( - Authorized::auto(&stake_address), - lockup, - )) - .unwrap(), - transaction_accounts.clone(), - vec![ - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: rent::id(), - is_signer: false, - is_writable: false, - }, - ], - Ok(()), - ); - transaction_accounts[0] = (stake_address, accounts[0].clone()); - - // should fail, not signed - instruction_accounts[2].is_signer = false; - process_instruction( - Arc::clone(&feature_set), - &instruction_data, - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(InstructionError::MissingRequiredSignature), - ); - instruction_accounts[2].is_signer = true; - - // should pass - process_instruction( - Arc::clone(&feature_set), - &instruction_data, - transaction_accounts.clone(), - instruction_accounts.clone(), - Ok(()), - ); - - // Staking - #[allow(deprecated)] - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::DelegateStake).unwrap(), - transaction_accounts.clone(), - vec![ - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: vote_address, - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: clock::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_history::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_config::id(), - is_signer: false, - is_writable: false, - }, - ], - Ok(()), - ); - transaction_accounts[0] = (stake_address, accounts[0].clone()); - - // should fail, not signed - instruction_accounts[2].is_signer = false; - process_instruction( - Arc::clone(&feature_set), - &instruction_data, - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(InstructionError::MissingRequiredSignature), - ); - instruction_accounts[2].is_signer = true; - - // should pass - process_instruction( - Arc::clone(&feature_set), - &instruction_data, - transaction_accounts.clone(), - instruction_accounts.clone(), - Ok(()), - ); - - // Lockup in force - let instruction_data = serialize(&StakeInstruction::SetLockup(LockupArgs { - unix_timestamp: Some(2), - epoch: None, - custodian: None, - })) - .unwrap(); - - // should fail, authorized withdrawer cannot change it - instruction_accounts[0].is_signer = true; - instruction_accounts[2].is_signer = false; - process_instruction( - Arc::clone(&feature_set), - &instruction_data, - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(InstructionError::MissingRequiredSignature), - ); - instruction_accounts[0].is_signer = false; - instruction_accounts[2].is_signer = true; - - // should pass, custodian can change it - process_instruction( - Arc::clone(&feature_set), - &instruction_data, - transaction_accounts.clone(), - instruction_accounts.clone(), - Ok(()), - ); - - // Lockup expired - let clock = Clock { - unix_timestamp: UnixTimestamp::MAX, - epoch: Epoch::MAX, - ..Clock::default() - }; - transaction_accounts[4] = (clock::id(), create_account_shared_data_for_test(&clock)); - - // should fail, custodian cannot change it - process_instruction( - Arc::clone(&feature_set), - &instruction_data, - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(InstructionError::MissingRequiredSignature), - ); - - // should pass, authorized withdrawer can change it - instruction_accounts[0].is_signer = true; - instruction_accounts[2].is_signer = false; - process_instruction( - Arc::clone(&feature_set), - &instruction_data, - transaction_accounts.clone(), - instruction_accounts.clone(), - Ok(()), - ); - - // Change authorized withdrawer - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Authorize( - authorized_address, - StakeAuthorize::Withdrawer, - )) - .unwrap(), - transaction_accounts.clone(), - vec![ - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: clock::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: authorized_address, - is_signer: false, - is_writable: false, - }, - ], - Ok(()), - ); - transaction_accounts[0] = (stake_address, accounts[0].clone()); - - // should fail, previous authorized withdrawer cannot change the lockup anymore - process_instruction( - Arc::clone(&feature_set), - &instruction_data, - transaction_accounts, - instruction_accounts, - Err(InstructionError::MissingRequiredSignature), - ); - } - - /// Ensure that `initialize()` respects the minimum balance requirements - /// - Assert 1: accounts with a balance equal-to the rent exemption initialize OK - /// - Assert 2: accounts with a balance less-than the rent exemption do not initialize - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_initialize_minimum_balance(feature_set: Arc) { - let rent = Rent::default(); - let rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); - let stake_address = solana_pubkey::new_rand(); - let instruction_data = serialize(&StakeInstruction::Initialize( - Authorized::auto(&stake_address), - Lockup::default(), - )) - .unwrap(); - let instruction_accounts = vec![ - AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: rent::id(), - is_signer: false, - is_writable: false, - }, - ]; - for (lamports, expected_result) in [ - (rent_exempt_reserve, Ok(())), - ( - rent_exempt_reserve - 1, - Err(InstructionError::InsufficientFunds), - ), - ] { - let stake_account = AccountSharedData::new(lamports, StakeStateV2::size_of(), &id()); - process_instruction( - Arc::clone(&feature_set), - &instruction_data, - vec![ - (stake_address, stake_account), - (rent::id(), create_account_shared_data_for_test(&rent)), - ], - instruction_accounts.clone(), - expected_result, - ); - } - } - - /// Ensure that `delegate()` respects the minimum delegation requirements - /// - Assert 1: delegating an amount equal-to the minimum succeeds - /// - Assert 2: delegating an amount less-than the minimum fails - /// Also test both asserts above over both StakeStateV2::{Initialized and Stake}, since the logic - /// is slightly different for the variants. - /// - /// NOTE: Even though new stake accounts must have a minimum balance that is at least - /// the minimum delegation (plus rent exempt reserve), the old behavior allowed - /// withdrawing below the minimum delegation, then re-delegating successfully (see - /// `test_behavior_withdrawal_then_redelegate_with_less_than_minimum_stake_delegation()` for - /// more information.) - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_delegate_minimum_stake_delegation(feature_set: Arc) { - let minimum_delegation = crate::get_minimum_delegation( - feature_set - .is_active(&agave_feature_set::stake_raise_minimum_delegation_to_1_sol::id()), - ); - let rent = Rent::default(); - let rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); - let stake_address = solana_pubkey::new_rand(); - let meta = Meta { - rent_exempt_reserve, - ..Meta::auto(&stake_address) - }; - let vote_address = solana_pubkey::new_rand(); - let vote_account = - vote_state::create_account(&vote_address, &solana_pubkey::new_rand(), 0, 100); - #[allow(deprecated)] - let instruction_accounts = vec![ - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: vote_address, - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: clock::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_history::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_config::id(), - is_signer: false, - is_writable: false, - }, - ]; - for (stake_delegation, expected_result) in &[ - (minimum_delegation, Ok(())), - ( - minimum_delegation - 1, - Err(StakeError::InsufficientDelegation), - ), - ] { - for stake_state in &[ - StakeStateV2::Initialized(meta), - just_stake(meta, *stake_delegation), - ] { - let stake_account = AccountSharedData::new_data_with_space( - stake_delegation + rent_exempt_reserve, - stake_state, - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - #[allow(deprecated)] - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::DelegateStake).unwrap(), - vec![ - (stake_address, stake_account), - (vote_address, vote_account.clone()), - ( - clock::id(), - create_account_shared_data_for_test(&Clock::default()), - ), - ( - stake_history::id(), - create_account_shared_data_for_test(&StakeHistory::default()), - ), - ( - stake_config::id(), - config::create_account(0, &stake_config::Config::default()), - ), - ( - epoch_schedule::id(), - create_account_shared_data_for_test(&EpochSchedule::default()), - ), - ], - instruction_accounts.clone(), - expected_result.clone().map_err(|e| e.into()), - ); - } - } - } - - /// Ensure that `split()` respects the minimum delegation requirements. This applies to - /// both the source and destination acounts. Thus, we have four permutations possible based on - /// if each account's post-split delegation is equal-to (EQ) or less-than (LT) the minimum: - /// - /// source | dest | result - /// --------+------+-------- - /// EQ | EQ | Ok - /// EQ | LT | Err - /// LT | EQ | Err - /// LT | LT | Err - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_split_minimum_stake_delegation(feature_set: Arc) { - let minimum_delegation = crate::get_minimum_delegation( - feature_set - .is_active(&agave_feature_set::stake_raise_minimum_delegation_to_1_sol::id()), - ); - let rent = Rent::default(); - let rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); - let stake_history = StakeHistory::default(); - let current_epoch = 100; - let clock = Clock { - epoch: current_epoch, - ..Clock::default() - }; - let source_address = Pubkey::new_unique(); - let source_meta = Meta { - rent_exempt_reserve, - ..Meta::auto(&source_address) - }; - let dest_address = Pubkey::new_unique(); - let dest_account = AccountSharedData::new_data_with_space( - rent_exempt_reserve, - &StakeStateV2::Uninitialized, - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let instruction_accounts = vec![ - AccountMeta { - pubkey: source_address, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: dest_address, - is_signer: false, - is_writable: true, - }, - ]; - for (source_delegation, split_amount, expected_result) in [ - (minimum_delegation * 2, minimum_delegation, Ok(())), - ( - minimum_delegation * 2, - minimum_delegation - 1, - Err(InstructionError::InsufficientFunds), - ), - ( - (minimum_delegation * 2) - 1, - minimum_delegation, - Err(InstructionError::InsufficientFunds), - ), - ( - (minimum_delegation - 1) * 2, - minimum_delegation - 1, - Err(InstructionError::InsufficientFunds), - ), - ] { - let source_account = AccountSharedData::new_data_with_space( - source_delegation + rent_exempt_reserve, - &just_stake(source_meta, source_delegation), - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let expected_active_stake = get_active_stake_for_tests( - &[source_account.clone(), dest_account.clone()], - &clock, - &stake_history, - ); - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Split(split_amount)).unwrap(), - vec![ - (source_address, source_account), - (dest_address, dest_account.clone()), - (rent::id(), create_account_shared_data_for_test(&rent)), - ( - stake_history::id(), - create_account_shared_data_for_test(&stake_history), - ), - (clock::id(), create_account_shared_data_for_test(&clock)), - ( - epoch_schedule::id(), - create_account_shared_data_for_test(&EpochSchedule::default()), - ), - ], - instruction_accounts.clone(), - expected_result.clone(), - ); - assert_eq!( - expected_active_stake, - get_active_stake_for_tests(&accounts[0..2], &clock, &stake_history) - ); - } - } - - /// Ensure that splitting the full amount from an account respects the minimum delegation - /// requirements. This ensures that we are future-proofing/testing any raises to the minimum - /// delegation. - /// - Assert 1: splitting the full amount from an account that has at least the minimum - /// delegation is OK - /// - Assert 2: splitting the full amount from an account that has less than the minimum - /// delegation is not OK - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_split_full_amount_minimum_stake_delegation(feature_set: Arc) { - let minimum_delegation = crate::get_minimum_delegation( - feature_set - .is_active(&agave_feature_set::stake_raise_minimum_delegation_to_1_sol::id()), - ); - let rent = Rent::default(); - let rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); - let stake_history = StakeHistory::default(); - let current_epoch = 100; - let clock = Clock { - epoch: current_epoch, - ..Clock::default() - }; - let source_address = Pubkey::new_unique(); - let source_meta = Meta { - rent_exempt_reserve, - ..Meta::auto(&source_address) - }; - let dest_address = Pubkey::new_unique(); - let dest_account = AccountSharedData::new_data_with_space( - 0, - &StakeStateV2::Uninitialized, - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let instruction_accounts = vec![ - AccountMeta { - pubkey: source_address, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: dest_address, - is_signer: false, - is_writable: true, - }, - ]; - for (reserve, expected_result) in [ - (rent_exempt_reserve, Ok(())), - ( - rent_exempt_reserve - 1, - Err(InstructionError::InsufficientFunds), - ), - ] { - for (stake_delegation, source_stake_state) in &[ - (0, StakeStateV2::Initialized(source_meta)), - ( - minimum_delegation, - just_stake(source_meta, minimum_delegation), - ), - ] { - let source_account = AccountSharedData::new_data_with_space( - stake_delegation + reserve, - source_stake_state, - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let expected_active_stake = get_active_stake_for_tests( - &[source_account.clone(), dest_account.clone()], - &clock, - &stake_history, - ); - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Split(source_account.lamports())).unwrap(), - vec![ - (source_address, source_account), - (dest_address, dest_account.clone()), - (rent::id(), create_account_shared_data_for_test(&rent)), - ( - stake_history::id(), - create_account_shared_data_for_test(&stake_history), - ), - (clock::id(), create_account_shared_data_for_test(&clock)), - ( - epoch_schedule::id(), - create_account_shared_data_for_test(&EpochSchedule::default()), - ), - ], - instruction_accounts.clone(), - expected_result.clone(), - ); - assert_eq!( - expected_active_stake, - get_active_stake_for_tests(&accounts[0..2], &clock, &stake_history) - ); - } - } - } - - /// Ensure that `split()` correctly handles prefunded destination accounts from - /// initialized stakes. When a destination account already has funds, ensure - /// the minimum split amount reduces accordingly. - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_initialized_split_destination_minimum_balance(feature_set: Arc) { - let rent = Rent::default(); - let rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); - let source_address = Pubkey::new_unique(); - let destination_address = Pubkey::new_unique(); - let instruction_accounts = vec![ - AccountMeta { - pubkey: source_address, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: destination_address, - is_signer: false, - is_writable: true, - }, - ]; - for (destination_starting_balance, split_amount, expected_result) in [ - // split amount must be non zero - ( - rent_exempt_reserve, - 0, - Err(InstructionError::InsufficientFunds), - ), - // any split amount is OK when destination account is already fully funded - (rent_exempt_reserve, 1, Ok(())), - // if destination is only short by 1 lamport, then split amount can be 1 lamport - (rent_exempt_reserve - 1, 1, Ok(())), - // destination short by 2 lamports, then 1 isn't enough (non-zero split amount) - ( - rent_exempt_reserve - 2, - 1, - Err(InstructionError::InsufficientFunds), - ), - // destination has smallest non-zero balance, so can split the minimum balance - // requirements minus what destination already has - (1, rent_exempt_reserve - 1, Ok(())), - // destination has smallest non-zero balance, but cannot split less than the minimum - // balance requirements minus what destination already has - ( - 1, - rent_exempt_reserve - 2, - Err(InstructionError::InsufficientFunds), - ), - // destination has zero lamports, so split must be at least rent exempt reserve - (0, rent_exempt_reserve, Ok(())), - // destination has zero lamports, but split amount is less than rent exempt reserve - ( - 0, - rent_exempt_reserve - 1, - Err(InstructionError::InsufficientFunds), - ), - ] { - // Set the source's starting balance to something large to ensure its post-split - // balance meets all the requirements - let source_balance = rent_exempt_reserve + split_amount; - let source_meta = Meta { - rent_exempt_reserve, - ..Meta::auto(&source_address) - }; - let source_account = AccountSharedData::new_data_with_space( - source_balance, - &StakeStateV2::Initialized(source_meta), - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let destination_account = AccountSharedData::new_data_with_space( - destination_starting_balance, - &StakeStateV2::Uninitialized, - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Split(split_amount)).unwrap(), - vec![ - (source_address, source_account), - (destination_address, destination_account), - (rent::id(), create_account_shared_data_for_test(&rent)), - ], - instruction_accounts.clone(), - expected_result.clone(), - ); - } - } - - /// Ensure that `split()` correctly handles prefunded destination accounts from staked stakes. - /// When a destination account already has funds, ensure the minimum split amount reduces - /// accordingly. - #[test_case(feature_set_no_minimum_delegation(), &[Ok(()), Ok(())]; "old_behavior")] - #[test_case(feature_set_all_enabled(), &[Err(StakeError::InsufficientDelegation.into()), Err(StakeError::InsufficientDelegation.into())]; "all_enabled")] - fn test_staked_split_destination_minimum_balance( - feature_set: Arc, - expected_results: &[Result<(), InstructionError>], - ) { - let minimum_delegation = crate::get_minimum_delegation( - feature_set - .is_active(&agave_feature_set::stake_raise_minimum_delegation_to_1_sol::id()), - ); - let rent = Rent::default(); - let rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); - let stake_history = StakeHistory::default(); - let current_epoch = 100; - let clock = Clock { - epoch: current_epoch, - ..Clock::default() - }; - let source_address = Pubkey::new_unique(); - let destination_address = Pubkey::new_unique(); - let instruction_accounts = vec![ - AccountMeta { - pubkey: source_address, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: destination_address, - is_signer: false, - is_writable: true, - }, - ]; - for (destination_starting_balance, split_amount, expected_result) in [ - // split amount must be non zero - ( - rent_exempt_reserve + minimum_delegation, - 0, - Err(InstructionError::InsufficientFunds), - ), - // destination is fully funded: - // - old behavior: any split amount is OK - // - new behavior: split amount must be at least the minimum delegation - ( - rent_exempt_reserve + minimum_delegation, - 1, - expected_results[0].clone(), - ), - // if destination is only short by 1 lamport, then... - // - old behavior: split amount can be 1 lamport - // - new behavior: split amount must be at least the minimum delegation - ( - rent_exempt_reserve + minimum_delegation - 1, - 1, - expected_results[1].clone(), - ), - // destination short by 2 lamports, so 1 isn't enough (non-zero split amount) - ( - rent_exempt_reserve + minimum_delegation - 2, - 1, - Err(InstructionError::InsufficientFunds), - ), - // destination is rent exempt, so split enough for minimum delegation - (rent_exempt_reserve, minimum_delegation, Ok(())), - // destination is rent exempt, but split amount less than minimum delegation - ( - rent_exempt_reserve, - minimum_delegation.saturating_sub(1), // when minimum is 0, this blows up! - Err(InstructionError::InsufficientFunds), - ), - // destination is not rent exempt, so any split amount fails, including enough for rent - // and minimum delegation - ( - rent_exempt_reserve - 1, - minimum_delegation + 1, - Err(InstructionError::InsufficientFunds), - ), - // destination is not rent exempt, but split amount only for minimum delegation - ( - rent_exempt_reserve - 1, - minimum_delegation, - Err(InstructionError::InsufficientFunds), - ), - // destination is not rent exempt, so any split amount fails, including case where - // destination has smallest non-zero balance - ( - 1, - rent_exempt_reserve + minimum_delegation - 1, - Err(InstructionError::InsufficientFunds), - ), - // destination has smallest non-zero balance, but cannot split less than the minimum - // balance requirements minus what destination already has - ( - 1, - rent_exempt_reserve + minimum_delegation - 2, - Err(InstructionError::InsufficientFunds), - ), - // destination has zero lamports, so any split amount fails, including at least rent - // exempt reserve plus minimum delegation - ( - 0, - rent_exempt_reserve + minimum_delegation, - Err(InstructionError::InsufficientFunds), - ), - // destination has zero lamports, but split amount is less than rent exempt reserve - // plus minimum delegation - ( - 0, - rent_exempt_reserve + minimum_delegation - 1, - Err(InstructionError::InsufficientFunds), - ), - ] { - // Set the source's starting balance to something large to ensure its post-split - // balance meets all the requirements - let source_balance = rent_exempt_reserve + minimum_delegation + split_amount; - let source_meta = Meta { - rent_exempt_reserve, - ..Meta::auto(&source_address) - }; - let source_stake_delegation = source_balance - rent_exempt_reserve; - let source_account = AccountSharedData::new_data_with_space( - source_balance, - &just_stake(source_meta, source_stake_delegation), - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let destination_account = AccountSharedData::new_data_with_space( - destination_starting_balance, - &StakeStateV2::Uninitialized, - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let expected_active_stake = get_active_stake_for_tests( - &[source_account.clone(), destination_account.clone()], - &clock, - &stake_history, - ); - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Split(split_amount)).unwrap(), - vec![ - (source_address, source_account.clone()), - (destination_address, destination_account), - (rent::id(), create_account_shared_data_for_test(&rent)), - ( - stake_history::id(), - create_account_shared_data_for_test(&stake_history), - ), - (clock::id(), create_account_shared_data_for_test(&clock)), - ( - epoch_schedule::id(), - create_account_shared_data_for_test(&EpochSchedule::default()), - ), - ], - instruction_accounts.clone(), - expected_result.clone(), - ); - assert_eq!( - expected_active_stake, - get_active_stake_for_tests(&accounts[0..2], &clock, &stake_history) - ); - // For the expected OK cases, when the source's StakeStateV2 is Stake, then the - // destination's StakeStateV2 *must* also end up as Stake as well. Additionally, - // check to ensure the destination's delegation amount is correct. If the - // destination is already rent exempt, then the destination's stake delegation - // *must* equal the split amount. Otherwise, the split amount must first be used to - // make the destination rent exempt, and then the leftover lamports are delegated. - if expected_result.is_ok() { - assert_matches!(accounts[0].state().unwrap(), StakeStateV2::Stake(_, _, _)); - if let StakeStateV2::Stake(_, destination_stake, _) = accounts[1].state().unwrap() { - let destination_initial_rent_deficit = - rent_exempt_reserve.saturating_sub(destination_starting_balance); - let expected_destination_stake_delegation = - split_amount - destination_initial_rent_deficit; - assert_eq!( - expected_destination_stake_delegation, - destination_stake.delegation.stake - ); - assert!(destination_stake.delegation.stake >= minimum_delegation,); - } else { - panic!( - "destination state must be StakeStake::Stake after successful split when \ - source is also StakeStateV2::Stake!" - ); - } - } - } - } - - /// Ensure that `withdraw()` respects the minimum delegation requirements - /// - Assert 1: withdrawing so remaining stake is equal-to the minimum is OK - /// - Assert 2: withdrawing so remaining stake is less-than the minimum is not OK - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_withdraw_minimum_stake_delegation(feature_set: Arc) { - let minimum_delegation = crate::get_minimum_delegation( - feature_set - .is_active(&agave_feature_set::stake_raise_minimum_delegation_to_1_sol::id()), - ); - let rent = Rent::default(); - let rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); - let stake_address = solana_pubkey::new_rand(); - let meta = Meta { - rent_exempt_reserve, - ..Meta::auto(&stake_address) - }; - let recipient_address = solana_pubkey::new_rand(); - let instruction_accounts = vec![ - AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: recipient_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: clock::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_history::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: false, - }, - ]; - let starting_stake_delegation = minimum_delegation; - for (ending_stake_delegation, expected_result) in [ - (minimum_delegation, Ok(())), - ( - minimum_delegation - 1, - Err(InstructionError::InsufficientFunds), - ), - ] { - for (stake_delegation, stake_state) in &[ - (0, StakeStateV2::Initialized(meta)), - (minimum_delegation, just_stake(meta, minimum_delegation)), - ] { - let rewards_balance = 123; - let stake_account = AccountSharedData::new_data_with_space( - stake_delegation + rent_exempt_reserve + rewards_balance, - stake_state, - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let withdraw_amount = - (starting_stake_delegation + rewards_balance) - ending_stake_delegation; - #[allow(deprecated)] - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Withdraw(withdraw_amount)).unwrap(), - vec![ - (stake_address, stake_account), - ( - recipient_address, - AccountSharedData::new(rent_exempt_reserve, 0, &system_program::id()), - ), - ( - clock::id(), - create_account_shared_data_for_test(&Clock::default()), - ), - ( - rent::id(), - create_account_shared_data_for_test(&Rent::free()), - ), - ( - stake_history::id(), - create_account_shared_data_for_test(&StakeHistory::default()), - ), - ( - stake_config::id(), - config::create_account(0, &stake_config::Config::default()), - ), - ( - epoch_schedule::id(), - create_account_shared_data_for_test(&EpochSchedule::default()), - ), - ], - instruction_accounts.clone(), - expected_result.clone(), - ); - } - } - } - - /// The stake program's old behavior allowed delegations below the minimum stake delegation - /// (see also `test_delegate_minimum_stake_delegation()`). This was not the desired behavior, - /// and has been fixed in the new behavior. This test ensures the behavior is not changed - /// inadvertently. - /// - /// This test: - /// 1. Initialises a stake account (with sufficient balance for both rent and minimum delegation) - /// 2. Delegates the minimum amount - /// 3. Deactives the delegation - /// 4. Withdraws from the account such that the ending balance is *below* rent + minimum delegation - /// 5. Re-delegates, now with less than the minimum delegation, but it still succeeds - #[test] - fn test_behavior_withdrawal_then_redelegate_with_less_than_minimum_stake_delegation() { - let feature_set = feature_set_all_enabled(); - let minimum_delegation = crate::get_minimum_delegation( - feature_set - .is_active(&agave_feature_set::stake_raise_minimum_delegation_to_1_sol::id()), - ); - let rent = Rent::default(); - let rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); - let stake_address = solana_pubkey::new_rand(); - let stake_account = AccountSharedData::new( - rent_exempt_reserve + minimum_delegation, - StakeStateV2::size_of(), - &id(), - ); - let vote_address = solana_pubkey::new_rand(); - let vote_account = - vote_state::create_account(&vote_address, &solana_pubkey::new_rand(), 0, 100); - let recipient_address = solana_pubkey::new_rand(); - let mut clock = Clock::default(); - #[allow(deprecated)] - let mut transaction_accounts = vec![ - (stake_address, stake_account), - (vote_address, vote_account), - ( - recipient_address, - AccountSharedData::new(rent_exempt_reserve, 0, &system_program::id()), - ), - (clock::id(), create_account_shared_data_for_test(&clock)), - ( - stake_history::id(), - create_account_shared_data_for_test(&StakeHistory::default()), - ), - ( - stake_config::id(), - config::create_account(0, &stake_config::Config::default()), - ), - ( - epoch_schedule::id(), - create_account_shared_data_for_test(&EpochSchedule::default()), - ), - (rent::id(), create_account_shared_data_for_test(&rent)), - ]; - #[allow(deprecated)] - let instruction_accounts = vec![ - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: vote_address, - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: clock::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_history::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_config::id(), - is_signer: false, - is_writable: false, - }, - ]; - - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Initialize( - Authorized::auto(&stake_address), - Lockup::default(), - )) - .unwrap(), - transaction_accounts.clone(), - vec![ - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: rent::id(), - is_signer: false, - is_writable: false, - }, - ], - Ok(()), - ); - transaction_accounts[0] = (stake_address, accounts[0].clone()); - - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::DelegateStake).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Ok(()), - ); - transaction_accounts[0] = (stake_address, accounts[0].clone()); - transaction_accounts[1] = (vote_address, accounts[1].clone()); - - clock.epoch += 1; - transaction_accounts[3] = (clock::id(), create_account_shared_data_for_test(&clock)); - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Deactivate).unwrap(), - transaction_accounts.clone(), - vec![ - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: clock::id(), - is_signer: false, - is_writable: false, - }, - ], - Ok(()), - ); - transaction_accounts[0] = (stake_address, accounts[0].clone()); - - clock.epoch += 1; - transaction_accounts[3] = (clock::id(), create_account_shared_data_for_test(&clock)); - let withdraw_amount = - accounts[0].lamports() - (rent_exempt_reserve + minimum_delegation - 1); - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Withdraw(withdraw_amount)).unwrap(), - transaction_accounts.clone(), - vec![ - AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: recipient_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: clock::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_history::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: false, - }, - ], - Ok(()), - ); - transaction_accounts[0] = (stake_address, accounts[0].clone()); - - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::DelegateStake).unwrap(), - transaction_accounts, - instruction_accounts, - Err(StakeError::InsufficientDelegation.into()), - ); - } - - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_split_source_uninitialized(feature_set: Arc) { - let rent = Rent::default(); - let rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); - let minimum_delegation = crate::get_minimum_delegation( - feature_set - .is_active(&agave_feature_set::stake_raise_minimum_delegation_to_1_sol::id()), - ); - let stake_lamports = (rent_exempt_reserve + minimum_delegation) * 2; - let stake_address = solana_pubkey::new_rand(); - let stake_account = AccountSharedData::new_data_with_space( - stake_lamports, - &StakeStateV2::Uninitialized, - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let split_to_address = solana_pubkey::new_rand(); - let split_to_account = AccountSharedData::new_data_with_space( - 0, - &StakeStateV2::Uninitialized, - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let transaction_accounts = vec![ - (stake_address, stake_account), - (split_to_address, split_to_account), - ]; - let mut instruction_accounts = vec![ - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }, - ]; - - // splitting an uninitialized account where the destination is the same as the source - { - // splitting should work when... - // - when split amount is the full balance - // - when split amount is zero - // - when split amount is non-zero and less than the full balance - // - // and splitting should fail when the split amount is greater than the balance - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Split(stake_lamports)).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Ok(()), - ); - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Split(0)).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Ok(()), - ); - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Split(stake_lamports / 2)).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Ok(()), - ); - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Split(stake_lamports + 1)).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(InstructionError::InsufficientFunds), - ); - } - - // this should work - instruction_accounts[1].pubkey = split_to_address; - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Split(stake_lamports / 2)).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Ok(()), - ); - assert_eq!(accounts[0].lamports(), accounts[1].lamports()); - - // no signers should fail - instruction_accounts[0].is_signer = false; - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Split(stake_lamports / 2)).unwrap(), - transaction_accounts, - instruction_accounts, - Err(InstructionError::MissingRequiredSignature), - ); - } - - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_split_split_not_uninitialized(feature_set: Arc) { - let stake_lamports = 42; - let stake_address = solana_pubkey::new_rand(); - let stake_account = AccountSharedData::new_data_with_space( - stake_lamports, - &just_stake(Meta::auto(&stake_address), stake_lamports), - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let split_to_address = solana_pubkey::new_rand(); - let instruction_accounts = vec![ - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }, - ]; - - for split_to_state in &[ - StakeStateV2::Initialized(Meta::default()), - StakeStateV2::Stake(Meta::default(), Stake::default(), StakeFlags::default()), - StakeStateV2::RewardsPool, - ] { - let split_to_account = AccountSharedData::new_data_with_space( - 0, - split_to_state, - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Split(stake_lamports / 2)).unwrap(), - vec![ - (stake_address, stake_account.clone()), - (split_to_address, split_to_account), - ], - instruction_accounts.clone(), - Err(InstructionError::InvalidAccountData), - ); - } - } - - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_split_more_than_staked(feature_set: Arc) { - let rent = Rent::default(); - let rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); - let stake_history = StakeHistory::default(); - let current_epoch = 100; - let minimum_delegation = crate::get_minimum_delegation( - feature_set - .is_active(&agave_feature_set::stake_raise_minimum_delegation_to_1_sol::id()), - ); - let stake_lamports = (rent_exempt_reserve + minimum_delegation) * 2; - let stake_address = solana_pubkey::new_rand(); - let stake_account = AccountSharedData::new_data_with_space( - stake_lamports, - &just_stake( - Meta { - rent_exempt_reserve, - ..Meta::auto(&stake_address) - }, - stake_lamports / 2 - 1, - ), - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let split_to_address = solana_pubkey::new_rand(); - let split_to_account = AccountSharedData::new_data_with_space( - rent_exempt_reserve, - &StakeStateV2::Uninitialized, - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let transaction_accounts = vec![ - (stake_address, stake_account), - (split_to_address, split_to_account), - (rent::id(), create_account_shared_data_for_test(&rent)), - ( - stake_history::id(), - create_account_shared_data_for_test(&stake_history), - ), - ( - clock::id(), - create_account_shared_data_for_test(&Clock { - epoch: current_epoch, - ..Clock::default() - }), - ), - ( - epoch_schedule::id(), - create_account_shared_data_for_test(&EpochSchedule::default()), - ), - ]; - let instruction_accounts = vec![ - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: split_to_address, - is_signer: false, - is_writable: true, - }, - ]; - - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Split(stake_lamports / 2)).unwrap(), - transaction_accounts, - instruction_accounts, - Err(StakeError::InsufficientDelegation.into()), - ); - } - - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_split_with_rent(feature_set: Arc) { - let rent = Rent::default(); - let rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); - let stake_history = StakeHistory::default(); - let current_epoch = 100; - let clock = Clock { - epoch: current_epoch, - ..Clock::default() - }; - let minimum_delegation = crate::get_minimum_delegation( - feature_set - .is_active(&agave_feature_set::stake_raise_minimum_delegation_to_1_sol::id()), - ); - let stake_address = solana_pubkey::new_rand(); - let split_to_address = solana_pubkey::new_rand(); - let split_to_account = AccountSharedData::new_data_with_space( - 0, - &StakeStateV2::Uninitialized, - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let instruction_accounts = vec![ - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: split_to_address, - is_signer: false, - is_writable: true, - }, - ]; - let meta = Meta { - authorized: Authorized::auto(&stake_address), - rent_exempt_reserve, - ..Meta::default() - }; - - // test splitting both an Initialized stake and a Staked stake - for (minimum_balance, state) in &[ - (rent_exempt_reserve, StakeStateV2::Initialized(meta)), - ( - rent_exempt_reserve + minimum_delegation, - just_stake(meta, minimum_delegation * 2 + rent_exempt_reserve), - ), - ] { - let stake_lamports = minimum_balance * 2; - let stake_account = AccountSharedData::new_data_with_space( - stake_lamports, - state, - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let expected_active_stake = get_active_stake_for_tests( - &[stake_account.clone(), split_to_account.clone()], - &clock, - &stake_history, - ); - let mut transaction_accounts = vec![ - (stake_address, stake_account), - (split_to_address, split_to_account.clone()), - (rent::id(), create_account_shared_data_for_test(&rent)), - ( - stake_history::id(), - create_account_shared_data_for_test(&stake_history), - ), - (clock::id(), create_account_shared_data_for_test(&clock)), - ( - epoch_schedule::id(), - create_account_shared_data_for_test(&EpochSchedule::default()), - ), - ]; - - // not enough to make a non-zero stake account - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Split(minimum_balance - 1)).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(InstructionError::InsufficientFunds), - ); - - // doesn't leave enough for initial stake to be non-zero - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Split( - stake_lamports - minimum_balance + 1, - )) - .unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(InstructionError::InsufficientFunds), - ); - - // split account already has enough lamports - transaction_accounts[1].1.set_lamports(*minimum_balance); - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Split(stake_lamports - minimum_balance)).unwrap(), - transaction_accounts, - instruction_accounts.clone(), - Ok(()), - ); - assert_eq!( - expected_active_stake, - get_active_stake_for_tests(&accounts[0..2], &clock, &stake_history) - ); - - // verify no stake leakage in the case of a stake - if let StakeStateV2::Stake(meta, stake, stake_flags) = state { - assert_eq!( - accounts[1].state(), - Ok(StakeStateV2::Stake( - *meta, - Stake { - delegation: Delegation { - stake: stake_lamports - minimum_balance, - ..stake.delegation - }, - ..*stake - }, - *stake_flags, - )) - ); - assert_eq!(accounts[0].lamports(), *minimum_balance,); - assert_eq!(accounts[1].lamports(), stake_lamports,); - } - } - } - - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_split_to_account_with_rent_exempt_reserve(feature_set: Arc) { - let rent = Rent::default(); - let rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); - let stake_history = StakeHistory::default(); - let current_epoch = 100; - let clock = Clock { - epoch: current_epoch, - ..Clock::default() - }; - let minimum_delegation = crate::get_minimum_delegation( - feature_set - .is_active(&agave_feature_set::stake_raise_minimum_delegation_to_1_sol::id()), - ); - let stake_lamports = (rent_exempt_reserve + minimum_delegation) * 2; - let stake_address = solana_pubkey::new_rand(); - let meta = Meta { - authorized: Authorized::auto(&stake_address), - rent_exempt_reserve, - ..Meta::default() - }; - let state = just_stake(meta, stake_lamports - rent_exempt_reserve); - let stake_account = AccountSharedData::new_data_with_space( - stake_lamports, - &state, - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let split_to_address = solana_pubkey::new_rand(); - let instruction_accounts = vec![ - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: split_to_address, - is_signer: false, - is_writable: true, - }, - ]; - - let transaction_accounts = |initial_balance: u64| -> Vec<(Pubkey, AccountSharedData)> { - let split_to_account = AccountSharedData::new_data_with_space( - initial_balance, - &StakeStateV2::Uninitialized, - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - vec![ - (stake_address, stake_account.clone()), - (split_to_address, split_to_account), - (rent::id(), create_account_shared_data_for_test(&rent)), - ( - stake_history::id(), - create_account_shared_data_for_test(&stake_history), - ), - (clock::id(), create_account_shared_data_for_test(&clock)), - ( - epoch_schedule::id(), - create_account_shared_data_for_test(&EpochSchedule::default()), - ), - ] - }; - - // Test insufficient account prefunding, including empty and less than rent_exempt_reserve. - // The empty case is not covered in test_split, since that test uses a Meta with - // rent_exempt_reserve = 0 - let split_lamport_balances = vec![0, rent_exempt_reserve - 1]; - for initial_balance in split_lamport_balances { - let transaction_accounts = transaction_accounts(initial_balance); - // split more than available fails - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Split(stake_lamports + 1)).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(InstructionError::InsufficientFunds), - ); - // split to insufficiently funded dest fails - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Split(stake_lamports / 2)).unwrap(), - transaction_accounts, - instruction_accounts.clone(), - Err(InstructionError::InsufficientFunds), - ); - } - - // Test various account prefunding, including exactly rent_exempt_reserve, and more than - // rent_exempt_reserve - let split_lamport_balances = vec![ - rent_exempt_reserve, - rent_exempt_reserve + minimum_delegation - 1, - rent_exempt_reserve + minimum_delegation, - ]; - for initial_balance in split_lamport_balances { - let transaction_accounts = transaction_accounts(initial_balance); - let expected_active_stake = get_active_stake_for_tests( - &[ - transaction_accounts[0].1.clone(), - transaction_accounts[1].1.clone(), - ], - &clock, - &stake_history, - ); - - // split more than available fails - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Split(stake_lamports + 1)).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(InstructionError::InsufficientFunds), - ); - - // should work - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Split(stake_lamports / 2)).unwrap(), - transaction_accounts, - instruction_accounts.clone(), - Ok(()), - ); - // no lamport leakage - assert_eq!( - accounts[0].lamports() + accounts[1].lamports(), - stake_lamports + initial_balance, - ); - // no deactivated stake - assert_eq!( - expected_active_stake, - get_active_stake_for_tests(&accounts[0..2], &clock, &stake_history) - ); - - if let StakeStateV2::Stake(meta, stake, stake_flags) = state { - let expected_stake = - stake_lamports / 2 - (rent_exempt_reserve.saturating_sub(initial_balance)); - assert_eq!( - Ok(StakeStateV2::Stake( - meta, - Stake { - delegation: Delegation { - stake: stake_lamports / 2 - - (rent_exempt_reserve.saturating_sub(initial_balance)), - ..stake.delegation - }, - ..stake - }, - stake_flags - )), - accounts[1].state(), - ); - assert_eq!( - accounts[1].lamports(), - expected_stake - + rent_exempt_reserve - + initial_balance.saturating_sub(rent_exempt_reserve), - ); - assert_eq!( - Ok(StakeStateV2::Stake( - meta, - Stake { - delegation: Delegation { - stake: stake_lamports / 2 - rent_exempt_reserve, - ..stake.delegation - }, - ..stake - }, - stake_flags, - )), - accounts[0].state(), - ); - } - } - } - - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_split_from_larger_sized_account(feature_set: Arc) { - let rent = Rent::default(); - let source_larger_rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of() + 100); - let split_rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); - let stake_history = StakeHistory::default(); - let current_epoch = 100; - let clock = Clock { - epoch: current_epoch, - ..Clock::default() - }; - let minimum_delegation = crate::get_minimum_delegation( - feature_set - .is_active(&agave_feature_set::stake_raise_minimum_delegation_to_1_sol::id()), - ); - let stake_lamports = (source_larger_rent_exempt_reserve + minimum_delegation) * 2; - let stake_address = solana_pubkey::new_rand(); - let meta = Meta { - authorized: Authorized::auto(&stake_address), - rent_exempt_reserve: source_larger_rent_exempt_reserve, - ..Meta::default() - }; - let state = just_stake(meta, stake_lamports - source_larger_rent_exempt_reserve); - let stake_account = AccountSharedData::new_data_with_space( - stake_lamports, - &state, - StakeStateV2::size_of() + 100, - &id(), - ) - .unwrap(); - let split_to_address = solana_pubkey::new_rand(); - let instruction_accounts = vec![ - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: split_to_address, - is_signer: false, - is_writable: true, - }, - ]; - - let transaction_accounts = |initial_balance: u64| -> Vec<(Pubkey, AccountSharedData)> { - let split_to_account = AccountSharedData::new_data_with_space( - initial_balance, - &StakeStateV2::Uninitialized, - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - vec![ - (stake_address, stake_account.clone()), - (split_to_address, split_to_account), - (rent::id(), create_account_shared_data_for_test(&rent)), - ( - stake_history::id(), - create_account_shared_data_for_test(&stake_history), - ), - (clock::id(), create_account_shared_data_for_test(&clock)), - ( - epoch_schedule::id(), - create_account_shared_data_for_test(&EpochSchedule::default()), - ), - ] - }; - - // Test insufficient account prefunding, including empty and less than rent_exempt_reserve - let split_lamport_balances = vec![0, split_rent_exempt_reserve - 1]; - for initial_balance in split_lamport_balances { - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Split(stake_lamports / 2)).unwrap(), - transaction_accounts(initial_balance), - instruction_accounts.clone(), - Err(InstructionError::InsufficientFunds), - ); - } - - // Test various account prefunding, including exactly rent_exempt_reserve, and more than - // rent_exempt_reserve. The empty case is not covered in test_split, since that test uses a - // Meta with rent_exempt_reserve = 0 - let split_lamport_balances = vec![ - split_rent_exempt_reserve, - split_rent_exempt_reserve + minimum_delegation - 1, - split_rent_exempt_reserve + minimum_delegation, - ]; - for initial_balance in split_lamport_balances { - let transaction_accounts = transaction_accounts(initial_balance); - let expected_active_stake = get_active_stake_for_tests( - &[ - transaction_accounts[0].1.clone(), - transaction_accounts[1].1.clone(), - ], - &clock, - &stake_history, - ); - - // split more than available fails - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Split(stake_lamports + 1)).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(InstructionError::InsufficientFunds), - ); - - // should work - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Split(stake_lamports / 2)).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Ok(()), - ); - // no lamport leakage - assert_eq!( - accounts[0].lamports() + accounts[1].lamports(), - stake_lamports + initial_balance - ); - // no deactivated stake - assert_eq!( - expected_active_stake, - get_active_stake_for_tests(&accounts[0..2], &clock, &stake_history) - ); - - if let StakeStateV2::Stake(meta, stake, stake_flags) = state { - let expected_split_meta = Meta { - authorized: Authorized::auto(&stake_address), - rent_exempt_reserve: split_rent_exempt_reserve, - ..Meta::default() - }; - let expected_stake = stake_lamports / 2 - - (split_rent_exempt_reserve.saturating_sub(initial_balance)); - - assert_eq!( - Ok(StakeStateV2::Stake( - expected_split_meta, - Stake { - delegation: Delegation { - stake: expected_stake, - ..stake.delegation - }, - ..stake - }, - stake_flags, - )), - accounts[1].state() - ); - assert_eq!( - accounts[1].lamports(), - expected_stake - + split_rent_exempt_reserve - + initial_balance.saturating_sub(split_rent_exempt_reserve) - ); - assert_eq!( - Ok(StakeStateV2::Stake( - meta, - Stake { - delegation: Delegation { - stake: stake_lamports / 2 - source_larger_rent_exempt_reserve, - ..stake.delegation - }, - ..stake - }, - stake_flags, - )), - accounts[0].state() - ); - } - } - } - - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_split_from_smaller_sized_account(feature_set: Arc) { - let rent = Rent::default(); - let source_smaller_rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); - let split_rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of() + 100); - let stake_history = StakeHistory::default(); - let current_epoch = 100; - let stake_lamports = split_rent_exempt_reserve + 1; - let stake_address = solana_pubkey::new_rand(); - let meta = Meta { - authorized: Authorized::auto(&stake_address), - rent_exempt_reserve: source_smaller_rent_exempt_reserve, - ..Meta::default() - }; - let state = just_stake(meta, stake_lamports - source_smaller_rent_exempt_reserve); - let stake_account = AccountSharedData::new_data_with_space( - stake_lamports, - &state, - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let split_to_address = solana_pubkey::new_rand(); - let instruction_accounts = vec![ - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: split_to_address, - is_signer: false, - is_writable: true, - }, - ]; - - let split_amount = stake_lamports - (source_smaller_rent_exempt_reserve + 1); // Enough so that split stake is > 0 - let split_lamport_balances = vec![ - 0, - 1, - split_rent_exempt_reserve, - split_rent_exempt_reserve + 1, - ]; - for initial_balance in split_lamport_balances { - let split_to_account = AccountSharedData::new_data_with_space( - initial_balance, - &StakeStateV2::Uninitialized, - StakeStateV2::size_of() + 100, - &id(), - ) - .unwrap(); - let transaction_accounts = vec![ - (stake_address, stake_account.clone()), - (split_to_address, split_to_account), - (rent::id(), create_account_shared_data_for_test(&rent)), - ( - stake_history::id(), - create_account_shared_data_for_test(&stake_history), - ), - ( - clock::id(), - create_account_shared_data_for_test(&Clock { - epoch: current_epoch, - ..Clock::default() - }), - ), - ( - epoch_schedule::id(), - create_account_shared_data_for_test(&EpochSchedule::default()), - ), - ]; - - // should always return error when splitting to larger account - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Split(split_amount)).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(InstructionError::InvalidAccountData), - ); - - // Splitting 100% of source should not make a difference - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Split(stake_lamports)).unwrap(), - transaction_accounts, - instruction_accounts.clone(), - Err(InstructionError::InvalidAccountData), - ); - } - } - - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_split_100_percent_of_source(feature_set: Arc) { - let rent = Rent::default(); - let rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); - let stake_history = StakeHistory::default(); - let current_epoch = 100; - let clock = Clock { - epoch: current_epoch, - ..Clock::default() - }; - let minimum_delegation = crate::get_minimum_delegation( - feature_set - .is_active(&agave_feature_set::stake_raise_minimum_delegation_to_1_sol::id()), - ); - let stake_lamports = rent_exempt_reserve + minimum_delegation; - let stake_address = solana_pubkey::new_rand(); - let meta = Meta { - authorized: Authorized::auto(&stake_address), - rent_exempt_reserve, - ..Meta::default() - }; - let split_to_address = solana_pubkey::new_rand(); - let split_to_account = AccountSharedData::new_data_with_space( - 0, - &StakeStateV2::Uninitialized, - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let instruction_accounts = vec![ - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: split_to_address, - is_signer: false, - is_writable: true, - }, - ]; - - // test splitting both an Initialized stake and a Staked stake - for state in &[ - StakeStateV2::Initialized(meta), - just_stake(meta, stake_lamports - rent_exempt_reserve), - ] { - let stake_account = AccountSharedData::new_data_with_space( - stake_lamports, - &state, - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let expected_active_stake = get_active_stake_for_tests( - &[stake_account.clone(), split_to_account.clone()], - &clock, - &stake_history, - ); - let transaction_accounts = vec![ - (stake_address, stake_account), - (split_to_address, split_to_account.clone()), - (rent::id(), create_account_shared_data_for_test(&rent)), - ( - stake_history::id(), - create_account_shared_data_for_test(&stake_history), - ), - (clock::id(), create_account_shared_data_for_test(&clock)), - ( - epoch_schedule::id(), - create_account_shared_data_for_test(&EpochSchedule::default()), - ), - ]; - - // split 100% over to dest - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Split(stake_lamports)).unwrap(), - transaction_accounts, - instruction_accounts.clone(), - Ok(()), - ); - - // no lamport leakage - assert_eq!( - accounts[0].lamports() + accounts[1].lamports(), - stake_lamports - ); - // no deactivated stake - assert_eq!( - expected_active_stake, - get_active_stake_for_tests(&accounts[0..2], &clock, &stake_history) - ); - - match state { - StakeStateV2::Initialized(_) => { - assert_eq!(Ok(*state), accounts[1].state()); - assert_eq!(Ok(StakeStateV2::Uninitialized), accounts[0].state()); - } - StakeStateV2::Stake(meta, stake, stake_flags) => { - assert_eq!( - Ok(StakeStateV2::Stake( - *meta, - Stake { - delegation: Delegation { - stake: stake_lamports - rent_exempt_reserve, - ..stake.delegation - }, - ..*stake - }, - *stake_flags - )), - accounts[1].state() - ); - assert_eq!(Ok(StakeStateV2::Uninitialized), accounts[0].state()); - } - _ => unreachable!(), - } - } - } - - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_split_100_percent_of_source_to_account_with_lamports(feature_set: Arc) { - let rent = Rent::default(); - let rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); - let stake_history = StakeHistory::default(); - let current_epoch = 100; - let clock = Clock { - epoch: current_epoch, - ..Clock::default() - }; - let minimum_delegation = crate::get_minimum_delegation( - feature_set - .is_active(&agave_feature_set::stake_raise_minimum_delegation_to_1_sol::id()), - ); - let stake_lamports = rent_exempt_reserve + minimum_delegation; - let stake_address = solana_pubkey::new_rand(); - let meta = Meta { - authorized: Authorized::auto(&stake_address), - rent_exempt_reserve, - ..Meta::default() - }; - let state = just_stake(meta, stake_lamports - rent_exempt_reserve); - let stake_account = AccountSharedData::new_data_with_space( - stake_lamports, - &state, - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let split_to_address = solana_pubkey::new_rand(); - let instruction_accounts = vec![ - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: split_to_address, - is_signer: false, - is_writable: true, - }, - ]; - - // Test various account prefunding, including empty, less than rent_exempt_reserve, exactly - // rent_exempt_reserve, and more than rent_exempt_reserve. Technically, the empty case is - // covered in test_split_100_percent_of_source, but included here as well for readability - let split_lamport_balances = vec![ - 0, - rent_exempt_reserve - 1, - rent_exempt_reserve, - rent_exempt_reserve + minimum_delegation - 1, - rent_exempt_reserve + minimum_delegation, - ]; - for initial_balance in split_lamport_balances { - let split_to_account = AccountSharedData::new_data_with_space( - initial_balance, - &StakeStateV2::Uninitialized, - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let expected_active_stake = get_active_stake_for_tests( - &[stake_account.clone(), split_to_account.clone()], - &clock, - &stake_history, - ); - let transaction_accounts = vec![ - (stake_address, stake_account.clone()), - (split_to_address, split_to_account), - (rent::id(), create_account_shared_data_for_test(&rent)), - ( - stake_history::id(), - create_account_shared_data_for_test(&stake_history), - ), - (clock::id(), create_account_shared_data_for_test(&clock)), - ( - epoch_schedule::id(), - create_account_shared_data_for_test(&EpochSchedule::default()), - ), - ]; - - // split 100% over to dest - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Split(stake_lamports)).unwrap(), - transaction_accounts, - instruction_accounts.clone(), - Ok(()), - ); - - // no lamport leakage - assert_eq!( - accounts[0].lamports() + accounts[1].lamports(), - stake_lamports + initial_balance - ); - // no deactivated stake - assert_eq!( - expected_active_stake, - get_active_stake_for_tests(&accounts[0..2], &clock, &stake_history) - ); - - if let StakeStateV2::Stake(meta, stake, stake_flags) = state { - assert_eq!( - Ok(StakeStateV2::Stake( - meta, - Stake { - delegation: Delegation { - stake: stake_lamports - rent_exempt_reserve, - ..stake.delegation - }, - ..stake - }, - stake_flags, - )), - accounts[1].state() - ); - assert_eq!(Ok(StakeStateV2::Uninitialized), accounts[0].state()); - } - } - } - - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_split_rent_exemptness(feature_set: Arc) { - let rent = Rent::default(); - let source_rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of() + 100); - let split_rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); - let stake_history = StakeHistory::default(); - let current_epoch = 100; - let clock = Clock { - epoch: current_epoch, - ..Clock::default() - }; - let minimum_delegation = crate::get_minimum_delegation( - feature_set - .is_active(&agave_feature_set::stake_raise_minimum_delegation_to_1_sol::id()), - ); - let stake_lamports = source_rent_exempt_reserve + minimum_delegation; - let stake_address = solana_pubkey::new_rand(); - let meta = Meta { - authorized: Authorized::auto(&stake_address), - rent_exempt_reserve: source_rent_exempt_reserve, - ..Meta::default() - }; - let split_to_address = solana_pubkey::new_rand(); - let instruction_accounts = vec![ - AccountMeta { - pubkey: stake_address, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: split_to_address, - is_signer: false, - is_writable: true, - }, - ]; - - for state in &[ - StakeStateV2::Initialized(meta), - just_stake(meta, stake_lamports - source_rent_exempt_reserve), - ] { - // Test that splitting to a larger account fails - let stake_account = AccountSharedData::new_data_with_space( - stake_lamports, - &state, - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let split_to_account = AccountSharedData::new_data_with_space( - 0, - &StakeStateV2::Uninitialized, - StakeStateV2::size_of() + 10000, - &id(), - ) - .unwrap(); - let transaction_accounts = vec![ - (stake_address, stake_account), - (split_to_address, split_to_account), - (rent::id(), create_account_shared_data_for_test(&rent)), - ( - stake_history::id(), - create_account_shared_data_for_test(&stake_history), - ), - (clock::id(), create_account_shared_data_for_test(&clock)), - ( - epoch_schedule::id(), - create_account_shared_data_for_test(&EpochSchedule::default()), - ), - ]; - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Split(stake_lamports)).unwrap(), - transaction_accounts, - instruction_accounts.clone(), - Err(InstructionError::InvalidAccountData), - ); - - // Test that splitting from a larger account to a smaller one works. - // Split amount should not matter, assuming other fund criteria are met - let stake_account = AccountSharedData::new_data_with_space( - stake_lamports, - &state, - StakeStateV2::size_of() + 100, - &id(), - ) - .unwrap(); - let split_to_account = AccountSharedData::new_data_with_space( - 0, - &StakeStateV2::Uninitialized, - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let expected_active_stake = get_active_stake_for_tests( - &[stake_account.clone(), split_to_account.clone()], - &clock, - &stake_history, - ); - let transaction_accounts = vec![ - (stake_address, stake_account), - (split_to_address, split_to_account), - (rent::id(), create_account_shared_data_for_test(&rent)), - ( - stake_history::id(), - create_account_shared_data_for_test(&stake_history), - ), - ( - clock::id(), - create_account_shared_data_for_test(&Clock { - epoch: current_epoch, - ..Clock::default() - }), - ), - ( - epoch_schedule::id(), - create_account_shared_data_for_test(&EpochSchedule::default()), - ), - ]; - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Split(stake_lamports)).unwrap(), - transaction_accounts, - instruction_accounts.clone(), - Ok(()), - ); - assert_eq!(accounts[1].lamports(), stake_lamports); - assert_eq!( - expected_active_stake, - get_active_stake_for_tests(&accounts[0..2], &clock, &stake_history) - ); - - let expected_split_meta = Meta { - authorized: Authorized::auto(&stake_address), - rent_exempt_reserve: split_rent_exempt_reserve, - ..Meta::default() - }; - match state { - StakeStateV2::Initialized(_) => { - assert_eq!( - Ok(StakeStateV2::Initialized(expected_split_meta)), - accounts[1].state() - ); - assert_eq!(Ok(StakeStateV2::Uninitialized), accounts[0].state()); - } - StakeStateV2::Stake(_meta, stake, stake_flags) => { - // Expected stake should reflect original stake amount so that extra lamports - // from the rent_exempt_reserve inequality do not magically activate - let expected_stake = stake_lamports - source_rent_exempt_reserve; - - assert_eq!( - Ok(StakeStateV2::Stake( - expected_split_meta, - Stake { - delegation: Delegation { - stake: expected_stake, - ..stake.delegation - }, - ..*stake - }, - *stake_flags, - )), - accounts[1].state() - ); - assert_eq!( - accounts[1].lamports(), - expected_stake + source_rent_exempt_reserve, - ); - assert_eq!(Ok(StakeStateV2::Uninitialized), accounts[0].state()); - } - _ => unreachable!(), - } - } - } - - #[test_case(feature_set_all_enabled(), Err(InstructionError::InsufficientFunds); "all_enabled")] - fn test_split_require_rent_exempt_destination( - feature_set: Arc, - expected_result: Result<(), InstructionError>, - ) { - let rent = Rent::default(); - let rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); - let stake_history = StakeHistory::default(); - let current_epoch = 100; - let clock = Clock { - epoch: current_epoch, - ..Clock::default() - }; - let minimum_delegation = crate::get_minimum_delegation( - feature_set - .is_active(&agave_feature_set::stake_raise_minimum_delegation_to_1_sol::id()), - ); - let delegation_amount = 3 * minimum_delegation; - let source_lamports = rent_exempt_reserve + delegation_amount; - let source_address = Pubkey::new_unique(); - let destination_address = Pubkey::new_unique(); - let meta = Meta { - authorized: Authorized::auto(&source_address), - rent_exempt_reserve, - ..Meta::default() - }; - let instruction_accounts = vec![ - AccountMeta { - pubkey: source_address, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: destination_address, - is_signer: false, - is_writable: true, - }, - ]; - - for (split_amount, expected_result) in [ - (2 * minimum_delegation, expected_result), - (source_lamports, Ok(())), - ] { - for (state, expected_result) in &[ - (StakeStateV2::Initialized(meta), Ok(())), - (just_stake(meta, delegation_amount), expected_result), - ] { - let source_account = AccountSharedData::new_data_with_space( - source_lamports, - &state, - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - - let transaction_accounts = - |initial_balance: u64| -> Vec<(Pubkey, AccountSharedData)> { - let destination_account = AccountSharedData::new_data_with_space( - initial_balance, - &StakeStateV2::Uninitialized, - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - vec![ - (source_address, source_account.clone()), - (destination_address, destination_account), - (rent::id(), create_account_shared_data_for_test(&rent)), - ( - stake_history::id(), - create_account_shared_data_for_test(&stake_history), - ), - (clock::id(), create_account_shared_data_for_test(&clock)), - ( - epoch_schedule::id(), - create_account_shared_data_for_test(&EpochSchedule::default()), - ), - ] - }; - - // Test insufficient recipient prefunding; should error once feature is activated - let split_lamport_balances = vec![0, rent_exempt_reserve - 1]; - for initial_balance in split_lamport_balances { - let transaction_accounts = transaction_accounts(initial_balance); - let expected_active_stake = get_active_stake_for_tests( - &[source_account.clone(), transaction_accounts[1].1.clone()], - &clock, - &stake_history, - ); - let result_accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Split(split_amount)).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - expected_result.clone(), - ); - let result_active_stake = - get_active_stake_for_tests(&result_accounts[0..2], &clock, &stake_history); - if expected_active_stake > 0 // starting stake was delegated - // partial split - && result_accounts[0].lamports() > 0 - // successful split to deficient recipient - && expected_result.is_ok() - { - assert_ne!(expected_active_stake, result_active_stake); - } else { - assert_eq!(expected_active_stake, result_active_stake); - } - } - - // Test recipient prefunding, including exactly rent_exempt_reserve, and more than - // rent_exempt_reserve. - let split_lamport_balances = vec![rent_exempt_reserve, rent_exempt_reserve + 1]; - for initial_balance in split_lamport_balances { - let transaction_accounts = transaction_accounts(initial_balance); - let expected_active_stake = get_active_stake_for_tests( - &[source_account.clone(), transaction_accounts[1].1.clone()], - &clock, - &stake_history, - ); - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Split(split_amount)).unwrap(), - transaction_accounts, - instruction_accounts.clone(), - Ok(()), - ); - - // no lamport leakage - assert_eq!( - accounts[0].lamports() + accounts[1].lamports(), - source_lamports + initial_balance - ); - - // no deactivated stake - assert_eq!( - expected_active_stake, - get_active_stake_for_tests(&accounts[0..2], &clock, &stake_history) - ); - - if let StakeStateV2::Stake(meta, stake, stake_flags) = state { - // split entire source account, including rent-exempt reserve - if accounts[0].lamports() == 0 { - assert_eq!(Ok(StakeStateV2::Uninitialized), accounts[0].state()); - assert_eq!( - Ok(StakeStateV2::Stake( - *meta, - Stake { - delegation: Delegation { - // delegated amount should not include source - // rent-exempt reserve - stake: delegation_amount, - ..stake.delegation - }, - ..*stake - }, - *stake_flags, - )), - accounts[1].state() - ); - } else { - assert_eq!( - Ok(StakeStateV2::Stake( - *meta, - Stake { - delegation: Delegation { - stake: minimum_delegation, - ..stake.delegation - }, - ..*stake - }, - *stake_flags, - )), - accounts[0].state() - ); - assert_eq!( - Ok(StakeStateV2::Stake( - *meta, - Stake { - delegation: Delegation { - stake: split_amount, - ..stake.delegation - }, - ..*stake - }, - *stake_flags, - )), - accounts[1].state() - ); - } - } - } - } - } - } - - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_merge(feature_set: Arc) { - let stake_address = solana_pubkey::new_rand(); - let merge_from_address = solana_pubkey::new_rand(); - let authorized_address = solana_pubkey::new_rand(); - let meta = Meta::auto(&authorized_address); - let stake_lamports = 42; - let mut instruction_accounts = vec![ - AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: merge_from_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: clock::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_history::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: authorized_address, - is_signer: true, - is_writable: false, - }, - ]; - - for state in &[ - StakeStateV2::Initialized(meta), - just_stake(meta, stake_lamports), - ] { - let stake_account = AccountSharedData::new_data_with_space( - stake_lamports, - state, - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - for merge_from_state in &[ - StakeStateV2::Initialized(meta), - just_stake(meta, stake_lamports), - ] { - let merge_from_account = AccountSharedData::new_data_with_space( - stake_lamports, - merge_from_state, - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let transaction_accounts = vec![ - (stake_address, stake_account.clone()), - (merge_from_address, merge_from_account), - (authorized_address, AccountSharedData::default()), - ( - clock::id(), - create_account_shared_data_for_test(&Clock::default()), - ), - ( - stake_history::id(), - create_account_shared_data_for_test(&StakeHistory::default()), - ), - ( - epoch_schedule::id(), - create_account_shared_data_for_test(&EpochSchedule::default()), - ), - ]; - - // Authorized staker signature required... - instruction_accounts[4].is_signer = false; - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Merge).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(InstructionError::MissingRequiredSignature), - ); - instruction_accounts[4].is_signer = true; - - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Merge).unwrap(), - transaction_accounts, - instruction_accounts.clone(), - Ok(()), - ); - - // check lamports - assert_eq!(accounts[0].lamports(), stake_lamports * 2); - assert_eq!(accounts[1].lamports(), 0); - - // check state - match state { - StakeStateV2::Initialized(meta) => { - assert_eq!(accounts[0].state(), Ok(StakeStateV2::Initialized(*meta)),); - } - StakeStateV2::Stake(meta, stake, stake_flags) => { - let expected_stake = stake.delegation.stake - + merge_from_state - .stake() - .map(|stake| stake.delegation.stake) - .unwrap_or_else(|| { - stake_lamports - - merge_from_state.meta().unwrap().rent_exempt_reserve - }); - assert_eq!( - accounts[0].state(), - Ok(StakeStateV2::Stake( - *meta, - Stake { - delegation: Delegation { - stake: expected_stake, - ..stake.delegation - }, - ..*stake - }, - *stake_flags, - )), - ); - } - _ => unreachable!(), - } - assert_eq!(accounts[1].state(), Ok(StakeStateV2::Uninitialized)); - } - } - } - - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_merge_self_fails(feature_set: Arc) { - let stake_address = solana_pubkey::new_rand(); - let authorized_address = solana_pubkey::new_rand(); - let rent = Rent::default(); - let rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); - let stake_amount = 4242424242; - let stake_lamports = rent_exempt_reserve + stake_amount; - let meta = Meta { - rent_exempt_reserve, - ..Meta::auto(&authorized_address) - }; - let stake = Stake { - delegation: Delegation { - stake: stake_amount, - activation_epoch: 0, - ..Delegation::default() - }, - ..Stake::default() - }; - let stake_account = AccountSharedData::new_data_with_space( - stake_lamports, - &StakeStateV2::Stake(meta, stake, StakeFlags::empty()), - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let transaction_accounts = vec![ - (stake_address, stake_account), - (authorized_address, AccountSharedData::default()), - ( - clock::id(), - create_account_shared_data_for_test(&Clock::default()), - ), - ( - stake_history::id(), - create_account_shared_data_for_test(&StakeHistory::default()), - ), - ]; - let instruction_accounts = vec![ - AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: clock::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_history::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: authorized_address, - is_signer: true, - is_writable: false, - }, - ]; - - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Merge).unwrap(), - transaction_accounts, - instruction_accounts, - Err(InstructionError::InvalidArgument), - ); - } - - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_merge_incorrect_authorized_staker(feature_set: Arc) { - let stake_address = solana_pubkey::new_rand(); - let merge_from_address = solana_pubkey::new_rand(); - let authorized_address = solana_pubkey::new_rand(); - let wrong_authorized_address = solana_pubkey::new_rand(); - let stake_lamports = 42; - let mut instruction_accounts = vec![ - AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: merge_from_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: clock::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_history::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: authorized_address, - is_signer: true, - is_writable: false, - }, - ]; - - for state in &[ - StakeStateV2::Initialized(Meta::auto(&authorized_address)), - just_stake(Meta::auto(&authorized_address), stake_lamports), - ] { - let stake_account = AccountSharedData::new_data_with_space( - stake_lamports, - state, - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - for merge_from_state in &[ - StakeStateV2::Initialized(Meta::auto(&wrong_authorized_address)), - just_stake(Meta::auto(&wrong_authorized_address), stake_lamports), - ] { - let merge_from_account = AccountSharedData::new_data_with_space( - stake_lamports, - merge_from_state, - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let transaction_accounts = vec![ - (stake_address, stake_account.clone()), - (merge_from_address, merge_from_account), - (authorized_address, AccountSharedData::default()), - (wrong_authorized_address, AccountSharedData::default()), - ( - clock::id(), - create_account_shared_data_for_test(&Clock::default()), - ), - ( - stake_history::id(), - create_account_shared_data_for_test(&StakeHistory::default()), - ), - ( - epoch_schedule::id(), - create_account_shared_data_for_test(&EpochSchedule::default()), - ), - ]; - - instruction_accounts[4].pubkey = wrong_authorized_address; - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Merge).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(InstructionError::MissingRequiredSignature), - ); - instruction_accounts[4].pubkey = authorized_address; - - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Merge).unwrap(), - transaction_accounts, - instruction_accounts.clone(), - Err(StakeError::MergeMismatch.into()), - ); - } - } - } - - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_merge_invalid_account_data(feature_set: Arc) { - let stake_address = solana_pubkey::new_rand(); - let merge_from_address = solana_pubkey::new_rand(); - let authorized_address = solana_pubkey::new_rand(); - let stake_lamports = 42; - let instruction_accounts = vec![ - AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: merge_from_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: clock::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_history::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: authorized_address, - is_signer: true, - is_writable: false, - }, - ]; - - for state in &[ - StakeStateV2::Uninitialized, - StakeStateV2::RewardsPool, - StakeStateV2::Initialized(Meta::auto(&authorized_address)), - just_stake(Meta::auto(&authorized_address), stake_lamports), - ] { - let stake_account = AccountSharedData::new_data_with_space( - stake_lamports, - state, - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - for merge_from_state in &[StakeStateV2::Uninitialized, StakeStateV2::RewardsPool] { - let merge_from_account = AccountSharedData::new_data_with_space( - stake_lamports, - merge_from_state, - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let transaction_accounts = vec![ - (stake_address, stake_account.clone()), - (merge_from_address, merge_from_account), - (authorized_address, AccountSharedData::default()), - ( - clock::id(), - create_account_shared_data_for_test(&Clock::default()), - ), - ( - stake_history::id(), - create_account_shared_data_for_test(&StakeHistory::default()), - ), - ( - epoch_schedule::id(), - create_account_shared_data_for_test(&EpochSchedule::default()), - ), - ]; - - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Merge).unwrap(), - transaction_accounts, - instruction_accounts.clone(), - Err(InstructionError::InvalidAccountData), - ); - } - } - } - - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_merge_fake_stake_source(feature_set: Arc) { - let stake_address = solana_pubkey::new_rand(); - let merge_from_address = solana_pubkey::new_rand(); - let authorized_address = solana_pubkey::new_rand(); - let stake_lamports = 42; - let stake_account = AccountSharedData::new_data_with_space( - stake_lamports, - &just_stake(Meta::auto(&authorized_address), stake_lamports), - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let merge_from_account = AccountSharedData::new_data_with_space( - stake_lamports, - &just_stake(Meta::auto(&authorized_address), stake_lamports), - StakeStateV2::size_of(), - &solana_pubkey::new_rand(), - ) - .unwrap(); - let transaction_accounts = vec![ - (stake_address, stake_account), - (merge_from_address, merge_from_account), - (authorized_address, AccountSharedData::default()), - ( - clock::id(), - create_account_shared_data_for_test(&Clock::default()), - ), - ( - stake_history::id(), - create_account_shared_data_for_test(&StakeHistory::default()), - ), - ]; - let instruction_accounts = vec![ - AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: merge_from_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: clock::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_history::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: authorized_address, - is_signer: true, - is_writable: false, - }, - ]; - - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Merge).unwrap(), - transaction_accounts, - instruction_accounts, - Err(InstructionError::IncorrectProgramId), - ); - } - - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_merge_active_stake(feature_set: Arc) { - let stake_address = solana_pubkey::new_rand(); - let merge_from_address = solana_pubkey::new_rand(); - let authorized_address = solana_pubkey::new_rand(); - let base_lamports = 4242424242; - let rent = Rent::default(); - let rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); - let stake_amount = base_lamports; - let stake_lamports = rent_exempt_reserve + stake_amount; - let merge_from_amount = base_lamports; - let merge_from_lamports = rent_exempt_reserve + merge_from_amount; - let meta = Meta { - rent_exempt_reserve, - ..Meta::auto(&authorized_address) - }; - let mut stake = Stake { - delegation: Delegation { - stake: stake_amount, - activation_epoch: 0, - ..Delegation::default() - }, - ..Stake::default() - }; - let stake_account = AccountSharedData::new_data_with_space( - stake_lamports, - &StakeStateV2::Stake(meta, stake, StakeFlags::empty()), - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let merge_from_activation_epoch = 2; - let mut merge_from_stake = Stake { - delegation: Delegation { - stake: merge_from_amount, - activation_epoch: merge_from_activation_epoch, - ..stake.delegation - }, - ..stake - }; - let merge_from_account = AccountSharedData::new_data_with_space( - merge_from_lamports, - &StakeStateV2::Stake(meta, merge_from_stake, StakeFlags::empty()), - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - let mut clock = Clock::default(); - let mut stake_history = StakeHistory::default(); - let mut effective = base_lamports; - let mut activating = stake_amount; - let mut deactivating = 0; - stake_history.add( - clock.epoch, - StakeHistoryEntry { - effective, - activating, - deactivating, - }, - ); - let mut transaction_accounts = vec![ - (stake_address, stake_account), - (merge_from_address, merge_from_account), - (authorized_address, AccountSharedData::default()), - (clock::id(), create_account_shared_data_for_test(&clock)), - ( - stake_history::id(), - create_account_shared_data_for_test(&stake_history), - ), - ( - epoch_schedule::id(), - create_account_shared_data_for_test(&EpochSchedule::default()), - ), - ]; - let instruction_accounts = vec![ - AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: merge_from_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: clock::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_history::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: authorized_address, - is_signer: true, - is_writable: false, - }, - ]; - - fn try_merge( - feature_set: Arc, - transaction_accounts: Vec<(Pubkey, AccountSharedData)>, - mut instruction_accounts: Vec, - expected_result: Result<(), InstructionError>, - ) { - for iteration in 0..2 { - if iteration == 1 { - instruction_accounts.swap(0, 1); - } - let accounts = process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Merge).unwrap(), - transaction_accounts.clone(), - instruction_accounts.clone(), - expected_result.clone(), - ); - if expected_result.is_ok() { - assert_eq!( - accounts[1 - iteration].state(), - Ok(StakeStateV2::Uninitialized) - ); - } - } - } - - // stake activation epoch, source initialized succeeds - try_merge( - Arc::clone(&feature_set), - transaction_accounts.clone(), - instruction_accounts.clone(), - Ok(()), - ); - - let new_warmup_cooldown_rate_epoch = - feature_set.new_warmup_cooldown_rate_epoch(&EpochSchedule::default()); - - // both activating fails - loop { - clock.epoch += 1; - if clock.epoch == merge_from_activation_epoch { - activating += merge_from_amount; - } - let delta = activating.min( - (effective as f64 - * warmup_cooldown_rate(clock.epoch, new_warmup_cooldown_rate_epoch)) - as u64, - ); - effective += delta; - activating -= delta; - stake_history.add( - clock.epoch, - StakeHistoryEntry { - effective, - activating, - deactivating, - }, - ); - transaction_accounts[3] = (clock::id(), create_account_shared_data_for_test(&clock)); - transaction_accounts[4] = ( - stake_history::id(), - create_account_shared_data_for_test(&stake_history), - ); - if stake_amount - == stake.stake(clock.epoch, &stake_history, new_warmup_cooldown_rate_epoch) - && merge_from_amount - == merge_from_stake.stake( - clock.epoch, - &stake_history, - new_warmup_cooldown_rate_epoch, - ) - { - break; - } - try_merge( - Arc::clone(&feature_set), - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(InstructionError::from(StakeError::MergeTransientStake)), - ); - } - - // Both fully activated works - try_merge( - Arc::clone(&feature_set), - transaction_accounts.clone(), - instruction_accounts.clone(), - Ok(()), - ); - - // deactivate setup for deactivation - let merge_from_deactivation_epoch = clock.epoch + 1; - let stake_deactivation_epoch = clock.epoch + 2; - - // active/deactivating and deactivating/inactive mismatches fail - loop { - clock.epoch += 1; - let delta = deactivating.min( - (effective as f64 - * warmup_cooldown_rate(clock.epoch, new_warmup_cooldown_rate_epoch)) - as u64, - ); - effective -= delta; - deactivating -= delta; - if clock.epoch == stake_deactivation_epoch { - deactivating += stake_amount; - stake = Stake { - delegation: Delegation { - deactivation_epoch: stake_deactivation_epoch, - ..stake.delegation - }, - ..stake - }; - transaction_accounts[0] - .1 - .set_state(&StakeStateV2::Stake(meta, stake, StakeFlags::empty())) - .unwrap(); - } - if clock.epoch == merge_from_deactivation_epoch { - deactivating += merge_from_amount; - merge_from_stake = Stake { - delegation: Delegation { - deactivation_epoch: merge_from_deactivation_epoch, - ..merge_from_stake.delegation - }, - ..merge_from_stake - }; - transaction_accounts[1] - .1 - .set_state(&StakeStateV2::Stake( - meta, - merge_from_stake, - StakeFlags::empty(), - )) - .unwrap(); - } - stake_history.add( - clock.epoch, - StakeHistoryEntry { - effective, - activating, - deactivating, - }, - ); - transaction_accounts[3] = (clock::id(), create_account_shared_data_for_test(&clock)); - transaction_accounts[4] = ( - stake_history::id(), - create_account_shared_data_for_test(&stake_history), - ); - if 0 == stake.stake(clock.epoch, &stake_history, new_warmup_cooldown_rate_epoch) - && 0 == merge_from_stake.stake( - clock.epoch, - &stake_history, - new_warmup_cooldown_rate_epoch, - ) - { - break; - } - try_merge( - Arc::clone(&feature_set), - transaction_accounts.clone(), - instruction_accounts.clone(), - Err(InstructionError::from(StakeError::MergeTransientStake)), - ); - } - - // Both fully deactivated works - try_merge( - Arc::clone(&feature_set), - transaction_accounts, - instruction_accounts, - Ok(()), - ); - } - - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_stake_get_minimum_delegation(feature_set: Arc) { - let stake_address = Pubkey::new_unique(); - let stake_account = create_default_stake_account(); - let instruction_data = serialize(&StakeInstruction::GetMinimumDelegation).unwrap(); - let transaction_accounts = vec![(stake_address, stake_account)]; - let instruction_accounts = vec![AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }]; - - mock_process_instruction_with_feature_set( - &id(), - Vec::new(), - &instruction_data, - transaction_accounts, - instruction_accounts, - Ok(()), - Entrypoint::vm, - |_invoke_context| {}, - |invoke_context| { - let expected_minimum_delegation = crate::get_minimum_delegation( - invoke_context.is_stake_raise_minimum_delegation_to_1_sol_active(), - ) - .to_le_bytes(); - let actual_minimum_delegation = - invoke_context.transaction_context.get_return_data().1; - assert_eq!(expected_minimum_delegation, actual_minimum_delegation); - }, - &feature_set.runtime_features(), - ); - } - - // Ensure that the correct errors are returned when processing instructions - // - // The GetMinimumDelegation instruction does not take any accounts; so when it was added, - // `process_instruction()` needed to be updated to *not* need a stake account passed in, which - // changes the error *ordering* conditions. - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_stake_process_instruction_error_ordering(feature_set: Arc) { - let rent = Rent::default(); - let rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); - let rent_address = rent::id(); - let rent_account = create_account_shared_data_for_test(&rent); - - let good_stake_address = Pubkey::new_unique(); - let good_stake_account = - AccountSharedData::new(rent_exempt_reserve, StakeStateV2::size_of(), &id()); - let good_instruction = instruction::initialize( - &good_stake_address, - &Authorized::auto(&good_stake_address), - &Lockup::default(), - ); - let good_transaction_accounts = vec![ - (good_stake_address, good_stake_account), - (rent_address, rent_account), - ]; - let good_instruction_accounts = vec![ - AccountMeta { - pubkey: good_stake_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: rent_address, - is_signer: false, - is_writable: false, - }, - ]; - let good_accounts = (good_transaction_accounts, good_instruction_accounts); - - // The instruction data needs to deserialize to a bogus StakeInstruction. We likely never - // will have `usize::MAX`-number of instructions, so this should be a safe constant to - // always map to an invalid stake instruction. - let bad_instruction = Instruction::new_with_bincode(id(), &usize::MAX, Vec::default()); - let bad_transaction_accounts = Vec::default(); - let bad_instruction_accounts = Vec::default(); - let bad_accounts = (bad_transaction_accounts, bad_instruction_accounts); - - for (instruction, (transaction_accounts, instruction_accounts), expected_result) in [ - (&good_instruction, &good_accounts, Ok(())), - ( - &bad_instruction, - &good_accounts, - Err(InstructionError::InvalidInstructionData), - ), - ( - &good_instruction, - &bad_accounts, - Err(InstructionError::NotEnoughAccountKeys), - ), - ( - &bad_instruction, - &bad_accounts, - Err(InstructionError::InvalidInstructionData), - ), - ] { - process_instruction( - feature_set.clone(), - &instruction.data, - transaction_accounts.clone(), - instruction_accounts.clone(), - expected_result, - ); - } - } - - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_deactivate_delinquent(feature_set: Arc) { - let reference_vote_address = Pubkey::new_unique(); - let vote_address = Pubkey::new_unique(); - let stake_address = Pubkey::new_unique(); - - let initial_stake_state = StakeStateV2::Stake( - Meta::default(), - new_stake( - 1, /* stake */ - &vote_address, - &VoteState::default(), - 1, /* activation_epoch */ - ), - StakeFlags::empty(), - ); - - let stake_account = AccountSharedData::new_data_with_space( - 1, /* lamports */ - &initial_stake_state, - StakeStateV2::size_of(), - &id(), - ) - .unwrap(); - - let mut vote_account = AccountSharedData::new_data_with_space( - 1, /* lamports */ - &VoteStateVersions::new_current(VoteState::default()), - VoteState::size_of(), - &solana_sdk_ids::vote::id(), - ) - .unwrap(); - - let mut reference_vote_account = AccountSharedData::new_data_with_space( - 1, /* lamports */ - &VoteStateVersions::new_current(VoteState::default()), - VoteState::size_of(), - &solana_sdk_ids::vote::id(), - ) - .unwrap(); - - let current_epoch = 20; - - let process_instruction_deactivate_delinquent = - |stake_address: &Pubkey, - stake_account: &AccountSharedData, - vote_account: &AccountSharedData, - reference_vote_account: &AccountSharedData, - expected_result| { - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::DeactivateDelinquent).unwrap(), - vec![ - (*stake_address, stake_account.clone()), - (vote_address, vote_account.clone()), - (reference_vote_address, reference_vote_account.clone()), - ( - clock::id(), - create_account_shared_data_for_test(&Clock { - epoch: current_epoch, - ..Clock::default() - }), - ), - ( - stake_history::id(), - create_account_shared_data_for_test(&StakeHistory::default()), - ), - ], - vec![ - AccountMeta { - pubkey: *stake_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: vote_address, - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: reference_vote_address, - is_signer: false, - is_writable: false, - }, - ], - expected_result, - ) - }; - - // `reference_vote_account` has not voted. Instruction will fail - process_instruction_deactivate_delinquent( - &stake_address, - &stake_account, - &vote_account, - &reference_vote_account, - Err(StakeError::InsufficientReferenceVotes.into()), - ); - - // `reference_vote_account` has not consistently voted for at least - // `MINIMUM_DELINQUENT_EPOCHS_FOR_DEACTIVATION`. - // Instruction will fail - let mut reference_vote_state = VoteState::default(); - for epoch in 0..MINIMUM_DELINQUENT_EPOCHS_FOR_DEACTIVATION / 2 { - reference_vote_state.increment_credits(epoch as Epoch, 1); - } - reference_vote_account - .serialize_data(&VoteStateVersions::new_current(reference_vote_state)) - .unwrap(); - - process_instruction_deactivate_delinquent( - &stake_address, - &stake_account, - &vote_account, - &reference_vote_account, - Err(StakeError::InsufficientReferenceVotes.into()), - ); - - // `reference_vote_account` has not consistently voted for the last - // `MINIMUM_DELINQUENT_EPOCHS_FOR_DEACTIVATION`. - // Instruction will fail - let mut reference_vote_state = VoteState::default(); - for epoch in 0..=current_epoch { - reference_vote_state.increment_credits(epoch, 1); - } - assert_eq!( - reference_vote_state.epoch_credits[current_epoch as usize - 2].0, - current_epoch - 2 - ); - reference_vote_state - .epoch_credits - .remove(current_epoch as usize - 2); - assert_eq!( - reference_vote_state.epoch_credits[current_epoch as usize - 2].0, - current_epoch - 1 - ); - reference_vote_account - .serialize_data(&VoteStateVersions::new_current(reference_vote_state)) - .unwrap(); - - process_instruction_deactivate_delinquent( - &stake_address, - &stake_account, - &vote_account, - &reference_vote_account, - Err(StakeError::InsufficientReferenceVotes.into()), - ); - - // `reference_vote_account` has consistently voted and `vote_account` has never voted. - // Instruction will succeed - let mut reference_vote_state = VoteState::default(); - for epoch in 0..=current_epoch { - reference_vote_state.increment_credits(epoch, 1); - } - reference_vote_account - .serialize_data(&VoteStateVersions::new_current(reference_vote_state)) - .unwrap(); - - let post_stake_account = &process_instruction_deactivate_delinquent( - &stake_address, - &stake_account, - &vote_account, - &reference_vote_account, - Ok(()), - )[0]; - - assert_eq!( - stake_from(post_stake_account) - .unwrap() - .delegation - .deactivation_epoch, - current_epoch - ); - - // `reference_vote_account` has consistently voted and `vote_account` has not voted for the - // last `MINIMUM_DELINQUENT_EPOCHS_FOR_DEACTIVATION`. - // Instruction will succeed - - let mut vote_state = VoteState::default(); - for epoch in 0..MINIMUM_DELINQUENT_EPOCHS_FOR_DEACTIVATION / 2 { - vote_state.increment_credits(epoch as Epoch, 1); - } - vote_account - .serialize_data(&VoteStateVersions::new_current(vote_state)) - .unwrap(); - - let post_stake_account = &process_instruction_deactivate_delinquent( - &stake_address, - &stake_account, - &vote_account, - &reference_vote_account, - Ok(()), - )[0]; - - assert_eq!( - stake_from(post_stake_account) - .unwrap() - .delegation - .deactivation_epoch, - current_epoch - ); - - // `reference_vote_account` has consistently voted and `vote_account` has not voted for the - // last `MINIMUM_DELINQUENT_EPOCHS_FOR_DEACTIVATION`. Try to deactivate an unrelated stake - // account. Instruction will fail - let unrelated_vote_address = Pubkey::new_unique(); - let unrelated_stake_address = Pubkey::new_unique(); - let mut unrelated_stake_account = stake_account.clone(); - assert_ne!(unrelated_vote_address, vote_address); - unrelated_stake_account - .serialize_data(&StakeStateV2::Stake( - Meta::default(), - new_stake( - 1, /* stake */ - &unrelated_vote_address, - &VoteState::default(), - 1, /* activation_epoch */ - ), - StakeFlags::empty(), - )) - .unwrap(); - - process_instruction_deactivate_delinquent( - &unrelated_stake_address, - &unrelated_stake_account, - &vote_account, - &reference_vote_account, - Err(StakeError::VoteAddressMismatch.into()), - ); - - // `reference_vote_account` has consistently voted and `vote_account` voted once - // `MINIMUM_DELINQUENT_EPOCHS_FOR_DEACTIVATION` ago. - // Instruction will succeed - let mut vote_state = VoteState::default(); - vote_state.increment_credits( - current_epoch - MINIMUM_DELINQUENT_EPOCHS_FOR_DEACTIVATION as Epoch, - 1, - ); - vote_account - .serialize_data(&VoteStateVersions::new_current(vote_state)) - .unwrap(); - process_instruction_deactivate_delinquent( - &stake_address, - &stake_account, - &vote_account, - &reference_vote_account, - Ok(()), - ); - - // `reference_vote_account` has consistently voted and `vote_account` voted once - // `MINIMUM_DELINQUENT_EPOCHS_FOR_DEACTIVATION` - 1 epochs ago - // Instruction will fail - let mut vote_state = VoteState::default(); - vote_state.increment_credits( - current_epoch - (MINIMUM_DELINQUENT_EPOCHS_FOR_DEACTIVATION - 1) as Epoch, - 1, - ); - vote_account - .serialize_data(&VoteStateVersions::new_current(vote_state)) - .unwrap(); - process_instruction_deactivate_delinquent( - &stake_address, - &stake_account, - &vote_account, - &reference_vote_account, - Err(StakeError::MinimumDelinquentEpochsForDeactivationNotMet.into()), - ); - } - - #[test] - fn test_stake_process_instruction_with_epoch_rewards_active() { - let feature_set = feature_set_all_enabled(); - - let process_instruction_as_one_arg = |feature_set: Arc, - instruction: &Instruction, - expected_result: Result<(), InstructionError>| - -> Vec { - let mut transaction_accounts = get_default_transaction_accounts(instruction); - - // Initialize EpochRewards sysvar account - let epoch_rewards_sysvar = EpochRewards { - active: true, - ..EpochRewards::default() - }; - transaction_accounts.push(( - epoch_rewards::id(), - create_account_shared_data_for_test(&epoch_rewards_sysvar), - )); - - process_instruction( - Arc::clone(&feature_set), - &instruction.data, - transaction_accounts, - instruction.accounts.clone(), - expected_result, - ) - }; - - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::initialize( - &Pubkey::new_unique(), - &Authorized::default(), - &Lockup::default(), - ), - Err(StakeError::EpochRewardsActive.into()), - ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::authorize( - &Pubkey::new_unique(), - &Pubkey::new_unique(), - &Pubkey::new_unique(), - StakeAuthorize::Staker, - None, - ), - Err(StakeError::EpochRewardsActive.into()), - ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::delegate_stake( - &Pubkey::new_unique(), - &Pubkey::new_unique(), - &invalid_vote_state_pubkey(), - ), - Err(StakeError::EpochRewardsActive.into()), - ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::split( - &Pubkey::new_unique(), - &Pubkey::new_unique(), - 100, - &invalid_stake_state_pubkey(), - )[2], - Err(StakeError::EpochRewardsActive.into()), - ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::withdraw( - &Pubkey::new_unique(), - &Pubkey::new_unique(), - &Pubkey::new_unique(), - 100, - None, - ), - Err(StakeError::EpochRewardsActive.into()), - ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::deactivate_stake(&Pubkey::new_unique(), &Pubkey::new_unique()), - Err(StakeError::EpochRewardsActive.into()), - ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::set_lockup( - &Pubkey::new_unique(), - &LockupArgs::default(), - &Pubkey::new_unique(), - ), - Err(StakeError::EpochRewardsActive.into()), - ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::merge( - &Pubkey::new_unique(), - &invalid_stake_state_pubkey(), - &Pubkey::new_unique(), - )[0], - Err(StakeError::EpochRewardsActive.into()), - ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::authorize_with_seed( - &Pubkey::new_unique(), - &Pubkey::new_unique(), - "seed".to_string(), - &Pubkey::new_unique(), - &Pubkey::new_unique(), - StakeAuthorize::Staker, - None, - ), - Err(StakeError::EpochRewardsActive.into()), - ); - - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::initialize_checked(&Pubkey::new_unique(), &Authorized::default()), - Err(StakeError::EpochRewardsActive.into()), - ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::authorize_checked( - &Pubkey::new_unique(), - &Pubkey::new_unique(), - &Pubkey::new_unique(), - StakeAuthorize::Staker, - None, - ), - Err(StakeError::EpochRewardsActive.into()), - ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::authorize_checked_with_seed( - &Pubkey::new_unique(), - &Pubkey::new_unique(), - "seed".to_string(), - &Pubkey::new_unique(), - &Pubkey::new_unique(), - StakeAuthorize::Staker, - None, - ), - Err(StakeError::EpochRewardsActive.into()), - ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::set_lockup_checked( - &Pubkey::new_unique(), - &LockupArgs::default(), - &Pubkey::new_unique(), - ), - Err(StakeError::EpochRewardsActive.into()), - ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::deactivate_delinquent_stake( - &Pubkey::new_unique(), - &invalid_vote_state_pubkey(), - &Pubkey::new_unique(), - ), - Err(StakeError::EpochRewardsActive.into()), - ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::move_stake( - &Pubkey::new_unique(), - &Pubkey::new_unique(), - &Pubkey::new_unique(), - 100, - ), - Err(StakeError::EpochRewardsActive.into()), - ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::move_lamports( - &Pubkey::new_unique(), - &Pubkey::new_unique(), - &Pubkey::new_unique(), - 100, - ), - Err(StakeError::EpochRewardsActive.into()), - ); - - // Only GetMinimumDelegation should not return StakeError::EpochRewardsActive - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::get_minimum_delegation(), - Ok(()), - ); - } -} diff --git a/programs/stake/src/stake_state.rs b/programs/stake/src/stake_state.rs index 5e5870d47e7683..447afecf788fac 100644 --- a/programs/stake/src/stake_state.rs +++ b/programs/stake/src/stake_state.rs @@ -1,7 +1,4 @@ -//! Stake state -//! * delegate stakes to vote accounts -//! * keep track of rewards -//! * own mining pools +//! Stake state: helper functions for creating and parsing account data #[deprecated( since = "1.8.0", @@ -10,35 +7,14 @@ pub use solana_stake_interface::state::*; use { solana_account::{state_traits::StateMut, AccountSharedData, ReadableAccount}, - solana_clock::{Clock, Epoch}, - solana_instruction::error::InstructionError, - solana_log_collector::ic_msg, - solana_program_runtime::invoke_context::InvokeContext, + solana_clock::Epoch, solana_pubkey::Pubkey, solana_rent::Rent, solana_sdk_ids::stake::id, - solana_stake_interface::{ - error::StakeError, - instruction::LockupArgs, - stake_flags::StakeFlags, - tools::{acceptable_reference_epoch_credits, eligible_for_deactivate_delinquent}, - }, - solana_sysvar::stake_history::{StakeHistory, StakeHistoryEntry}, - solana_transaction_context::{ - BorrowedAccount, IndexOfAccount, InstructionContext, TransactionContext, - }, - solana_vote_interface::state::{VoteState, VoteStateVersions}, - std::{collections::HashSet, convert::TryFrom}, + solana_stake_interface::stake_flags::StakeFlags, + solana_vote_interface::state::VoteStateV3, }; -// feature_set::reduce_stake_warmup_cooldown changed the warmup/cooldown from -// 25% to 9%. this number is necessary to calculate historical effective stake from -// stake history, but we only care that stake we are dealing with in the present -// epoch has been fully (de)activated. this means, as long as one epoch has -// passed since activation where all prior stake had escaped warmup/cooldown, -// we can pretend the rate has always beein 9% without issue. so we do that -const PERPETUAL_NEW_WARMUP_COOLDOWN_RATE_EPOCH: Option = Some(0); - // utility function, used by Stakes, tests pub fn from>(account: &T) -> Option { account.state().ok() @@ -64,147 +40,10 @@ pub fn meta_from(account: &AccountSharedData) -> Option { from(account).and_then(|state: StakeStateV2| state.meta()) } -pub(crate) fn new_warmup_cooldown_rate_epoch() -> Option { - PERPETUAL_NEW_WARMUP_COOLDOWN_RATE_EPOCH -} - -fn checked_add(a: u64, b: u64) -> Result { - a.checked_add(b).ok_or(InstructionError::InsufficientFunds) -} - -fn get_stake_status( - invoke_context: &InvokeContext, - stake: &Stake, - clock: &Clock, -) -> Result { - let stake_history = invoke_context.get_sysvar_cache().get_stake_history()?; - Ok(stake.delegation.stake_activating_and_deactivating( - clock.epoch, - stake_history.as_ref(), - new_warmup_cooldown_rate_epoch(), - )) -} - -fn redelegate_stake( - stake: &mut Stake, - stake_lamports: u64, - voter_pubkey: &Pubkey, - vote_state: &VoteState, - clock: &Clock, - stake_history: &StakeHistory, -) -> Result<(), StakeError> { - let new_rate_activation_epoch = new_warmup_cooldown_rate_epoch(); - // If stake is currently active: - if stake.stake(clock.epoch, stake_history, new_rate_activation_epoch) != 0 { - // If pubkey of new voter is the same as current, - // and we are scheduled to start deactivating this epoch, - // we rescind deactivation - if stake.delegation.voter_pubkey == *voter_pubkey - && clock.epoch == stake.delegation.deactivation_epoch - { - stake.delegation.deactivation_epoch = u64::MAX; - return Ok(()); - } else { - // can't redelegate to another pubkey if stake is active. - return Err(StakeError::TooSoonToRedelegate); - } - } - // Either the stake is freshly activated, is active but has been - // deactivated this epoch, or has fully de-activated. - // Redelegation implies either re-activation or un-deactivation - - stake.delegation.stake = stake_lamports; - stake.delegation.activation_epoch = clock.epoch; - stake.delegation.deactivation_epoch = u64::MAX; - stake.delegation.voter_pubkey = *voter_pubkey; - stake.credits_observed = vote_state.credits(); - Ok(()) -} - -fn move_stake_or_lamports_shared_checks( - invoke_context: &InvokeContext, - transaction_context: &TransactionContext, - instruction_context: &InstructionContext, - source_account: &BorrowedAccount, - lamports: u64, - destination_account: &BorrowedAccount, - stake_authority_index: IndexOfAccount, -) -> Result<(MergeKind, MergeKind), InstructionError> { - // authority must sign - let stake_authority_pubkey = transaction_context.get_key_of_account_at_index( - instruction_context - .get_index_of_instruction_account_in_transaction(stake_authority_index)?, - )?; - if !instruction_context.is_instruction_account_signer(stake_authority_index)? { - return Err(InstructionError::MissingRequiredSignature); - } - - let mut signers = HashSet::new(); - signers.insert(*stake_authority_pubkey); - - // check owners - if *source_account.get_owner() != id() || *destination_account.get_owner() != id() { - return Err(InstructionError::IncorrectProgramId); - } - - // confirm not the same account - if *source_account.get_key() == *destination_account.get_key() { - return Err(InstructionError::InvalidInstructionData); - } - - // source and destination must be writable - if !source_account.is_writable() || !destination_account.is_writable() { - return Err(InstructionError::InvalidInstructionData); - } - - // must move something - if lamports == 0 { - return Err(InstructionError::InvalidArgument); - } - - let clock = invoke_context.get_sysvar_cache().get_clock()?; - let stake_history = invoke_context.get_sysvar_cache().get_stake_history()?; - - // get_if_mergeable ensures accounts are not partly activated or in any form of deactivating - // we still need to exclude activating state ourselves - let source_merge_kind = MergeKind::get_if_mergeable( - invoke_context, - &source_account.get_state()?, - source_account.get_lamports(), - &clock, - &stake_history, - )?; - - // Authorized staker is allowed to move stake - source_merge_kind - .meta() - .authorized - .check(&signers, StakeAuthorize::Staker)?; - - // same transient assurance as with source - let destination_merge_kind = MergeKind::get_if_mergeable( - invoke_context, - &destination_account.get_state()?, - destination_account.get_lamports(), - &clock, - &stake_history, - )?; - - // ensure all authorities match and lockups match if lockup is in force - MergeKind::metas_can_merge( - invoke_context, - source_merge_kind.meta(), - destination_merge_kind.meta(), - &clock, - )?; - - Ok((source_merge_kind, destination_merge_kind)) -} - -pub(crate) fn new_stake( +fn new_stake( stake: u64, voter_pubkey: &Pubkey, - vote_state: &VoteState, + vote_state: &VoteStateV3, activation_epoch: Epoch, ) -> Stake { Stake { @@ -213,1140 +52,6 @@ pub(crate) fn new_stake( } } -pub fn initialize( - stake_account: &mut BorrowedAccount, - authorized: &Authorized, - lockup: &Lockup, - rent: &Rent, -) -> Result<(), InstructionError> { - if stake_account.get_data().len() != StakeStateV2::size_of() { - return Err(InstructionError::InvalidAccountData); - } - - if let StakeStateV2::Uninitialized = stake_account.get_state()? { - let rent_exempt_reserve = rent.minimum_balance(stake_account.get_data().len()); - if stake_account.get_lamports() >= rent_exempt_reserve { - stake_account.set_state(&StakeStateV2::Initialized(Meta { - rent_exempt_reserve, - authorized: *authorized, - lockup: *lockup, - })) - } else { - Err(InstructionError::InsufficientFunds) - } - } else { - Err(InstructionError::InvalidAccountData) - } -} - -/// Authorize the given pubkey to manage stake (deactivate, withdraw). This may be called -/// multiple times, but will implicitly withdraw authorization from the previously authorized -/// staker. The default staker is the owner of the stake account's pubkey. -pub fn authorize( - stake_account: &mut BorrowedAccount, - signers: &HashSet, - new_authority: &Pubkey, - stake_authorize: StakeAuthorize, - clock: &Clock, - custodian: Option<&Pubkey>, -) -> Result<(), InstructionError> { - match stake_account.get_state()? { - StakeStateV2::Stake(mut meta, stake, stake_flags) => { - meta.authorized.authorize( - signers, - new_authority, - stake_authorize, - Some((&meta.lockup, clock, custodian)), - )?; - stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags)) - } - StakeStateV2::Initialized(mut meta) => { - meta.authorized.authorize( - signers, - new_authority, - stake_authorize, - Some((&meta.lockup, clock, custodian)), - )?; - stake_account.set_state(&StakeStateV2::Initialized(meta)) - } - _ => Err(InstructionError::InvalidAccountData), - } -} - -#[allow(clippy::too_many_arguments)] -pub fn authorize_with_seed( - transaction_context: &TransactionContext, - instruction_context: &InstructionContext, - stake_account: &mut BorrowedAccount, - authority_base_index: IndexOfAccount, - authority_seed: &str, - authority_owner: &Pubkey, - new_authority: &Pubkey, - stake_authorize: StakeAuthorize, - clock: &Clock, - custodian: Option<&Pubkey>, -) -> Result<(), InstructionError> { - let mut signers = HashSet::default(); - if instruction_context.is_instruction_account_signer(authority_base_index)? { - let base_pubkey = transaction_context.get_key_of_account_at_index( - instruction_context - .get_index_of_instruction_account_in_transaction(authority_base_index)?, - )?; - signers.insert(Pubkey::create_with_seed( - base_pubkey, - authority_seed, - authority_owner, - )?); - } - authorize( - stake_account, - &signers, - new_authority, - stake_authorize, - clock, - custodian, - ) -} - -#[allow(clippy::too_many_arguments)] -pub fn delegate( - transaction_context: &TransactionContext, - instruction_context: &InstructionContext, - stake_account_index: IndexOfAccount, - vote_account_index: IndexOfAccount, - clock: &Clock, - stake_history: &StakeHistory, - signers: &HashSet, - invoke_context: &InvokeContext, -) -> Result<(), InstructionError> { - let vote_account = instruction_context - .try_borrow_instruction_account(transaction_context, vote_account_index)?; - if *vote_account.get_owner() != solana_sdk_ids::vote::id() { - return Err(InstructionError::IncorrectProgramId); - } - let vote_pubkey = *vote_account.get_key(); - let vote_state = vote_account.get_state::(); - drop(vote_account); - - let mut stake_account = instruction_context - .try_borrow_instruction_account(transaction_context, stake_account_index)?; - match stake_account.get_state()? { - StakeStateV2::Initialized(meta) => { - meta.authorized.check(signers, StakeAuthorize::Staker)?; - let ValidatedDelegatedInfo { stake_amount } = - validate_delegated_amount(&stake_account, &meta, invoke_context)?; - let stake = new_stake( - stake_amount, - &vote_pubkey, - &vote_state?.convert_to_current(), - clock.epoch, - ); - stake_account.set_state(&StakeStateV2::Stake(meta, stake, StakeFlags::empty())) - } - StakeStateV2::Stake(meta, mut stake, stake_flags) => { - meta.authorized.check(signers, StakeAuthorize::Staker)?; - let ValidatedDelegatedInfo { stake_amount } = - validate_delegated_amount(&stake_account, &meta, invoke_context)?; - redelegate_stake( - &mut stake, - stake_amount, - &vote_pubkey, - &vote_state?.convert_to_current(), - clock, - stake_history, - )?; - stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags)) - } - _ => Err(InstructionError::InvalidAccountData), - } -} - -pub fn deactivate( - stake_account: &mut BorrowedAccount, - clock: &Clock, - signers: &HashSet, -) -> Result<(), InstructionError> { - if let StakeStateV2::Stake(meta, mut stake, stake_flags) = stake_account.get_state()? { - meta.authorized.check(signers, StakeAuthorize::Staker)?; - stake.deactivate(clock.epoch)?; - stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags)) - } else { - Err(InstructionError::InvalidAccountData) - } -} - -pub fn set_lockup( - stake_account: &mut BorrowedAccount, - lockup: &LockupArgs, - signers: &HashSet, - clock: &Clock, -) -> Result<(), InstructionError> { - match stake_account.get_state()? { - StakeStateV2::Initialized(mut meta) => { - meta.set_lockup(lockup, signers, clock)?; - stake_account.set_state(&StakeStateV2::Initialized(meta)) - } - StakeStateV2::Stake(mut meta, stake, stake_flags) => { - meta.set_lockup(lockup, signers, clock)?; - stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags)) - } - _ => Err(InstructionError::InvalidAccountData), - } -} - -pub fn split( - invoke_context: &InvokeContext, - transaction_context: &TransactionContext, - instruction_context: &InstructionContext, - stake_account_index: IndexOfAccount, - lamports: u64, - split_index: IndexOfAccount, - signers: &HashSet, -) -> Result<(), InstructionError> { - let split = - instruction_context.try_borrow_instruction_account(transaction_context, split_index)?; - if *split.get_owner() != id() { - return Err(InstructionError::IncorrectProgramId); - } - if split.get_data().len() != StakeStateV2::size_of() { - return Err(InstructionError::InvalidAccountData); - } - if !matches!(split.get_state()?, StakeStateV2::Uninitialized) { - return Err(InstructionError::InvalidAccountData); - } - let split_lamport_balance = split.get_lamports(); - drop(split); - let stake_account = instruction_context - .try_borrow_instruction_account(transaction_context, stake_account_index)?; - if lamports > stake_account.get_lamports() { - return Err(InstructionError::InsufficientFunds); - } - let stake_state = stake_account.get_state()?; - drop(stake_account); - - match stake_state { - StakeStateV2::Stake(meta, mut stake, stake_flags) => { - meta.authorized.check(signers, StakeAuthorize::Staker)?; - let minimum_delegation = crate::get_minimum_delegation( - invoke_context.is_stake_raise_minimum_delegation_to_1_sol_active(), - ); - let is_active = { - let clock = invoke_context.get_sysvar_cache().get_clock()?; - let status = get_stake_status(invoke_context, &stake, &clock)?; - status.effective > 0 - }; - let validated_split_info = validate_split_amount( - invoke_context, - transaction_context, - instruction_context, - stake_account_index, - split_index, - lamports, - &meta, - minimum_delegation, - is_active, - )?; - - // split the stake, subtract rent_exempt_balance unless - // the destination account already has those lamports - // in place. - // this means that the new stake account will have a stake equivalent to - // lamports minus rent_exempt_reserve if it starts out with a zero balance - let (remaining_stake_delta, split_stake_amount) = - if validated_split_info.source_remaining_balance == 0 { - // If split amount equals the full source stake (as implied by 0 - // source_remaining_balance), the new split stake must equal the same - // amount, regardless of any current lamport balance in the split account. - // Since split accounts retain the state of their source account, this - // prevents any magic activation of stake by prefunding the split account. - // - // The new split stake also needs to ignore any positive delta between the - // original rent_exempt_reserve and the split_rent_exempt_reserve, in order - // to prevent magic activation of stake by splitting between accounts of - // different sizes. - let remaining_stake_delta = lamports.saturating_sub(meta.rent_exempt_reserve); - (remaining_stake_delta, remaining_stake_delta) - } else { - // Otherwise, the new split stake should reflect the entire split - // requested, less any lamports needed to cover the split_rent_exempt_reserve. - - if stake.delegation.stake.saturating_sub(lamports) < minimum_delegation { - return Err(StakeError::InsufficientDelegation.into()); - } - - ( - lamports, - lamports.saturating_sub( - validated_split_info - .destination_rent_exempt_reserve - .saturating_sub(split_lamport_balance), - ), - ) - }; - - if split_stake_amount < minimum_delegation { - return Err(StakeError::InsufficientDelegation.into()); - } - - let split_stake = stake.split(remaining_stake_delta, split_stake_amount)?; - let mut split_meta = meta; - split_meta.rent_exempt_reserve = validated_split_info.destination_rent_exempt_reserve; - - let mut stake_account = instruction_context - .try_borrow_instruction_account(transaction_context, stake_account_index)?; - stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags))?; - drop(stake_account); - let mut split = instruction_context - .try_borrow_instruction_account(transaction_context, split_index)?; - split.set_state(&StakeStateV2::Stake(split_meta, split_stake, stake_flags))?; - } - StakeStateV2::Initialized(meta) => { - meta.authorized.check(signers, StakeAuthorize::Staker)?; - let validated_split_info = validate_split_amount( - invoke_context, - transaction_context, - instruction_context, - stake_account_index, - split_index, - lamports, - &meta, - 0, // additional_required_lamports - false, - )?; - let mut split_meta = meta; - split_meta.rent_exempt_reserve = validated_split_info.destination_rent_exempt_reserve; - let mut split = instruction_context - .try_borrow_instruction_account(transaction_context, split_index)?; - split.set_state(&StakeStateV2::Initialized(split_meta))?; - } - StakeStateV2::Uninitialized => { - let stake_pubkey = transaction_context.get_key_of_account_at_index( - instruction_context - .get_index_of_instruction_account_in_transaction(stake_account_index)?, - )?; - if !signers.contains(stake_pubkey) { - return Err(InstructionError::MissingRequiredSignature); - } - } - _ => return Err(InstructionError::InvalidAccountData), - } - - // Deinitialize state upon zero balance - let mut stake_account = instruction_context - .try_borrow_instruction_account(transaction_context, stake_account_index)?; - if lamports == stake_account.get_lamports() { - stake_account.set_state(&StakeStateV2::Uninitialized)?; - } - drop(stake_account); - - let mut split = - instruction_context.try_borrow_instruction_account(transaction_context, split_index)?; - split.checked_add_lamports(lamports)?; - drop(split); - let mut stake_account = instruction_context - .try_borrow_instruction_account(transaction_context, stake_account_index)?; - stake_account.checked_sub_lamports(lamports)?; - Ok(()) -} - -pub fn merge( - invoke_context: &InvokeContext, - transaction_context: &TransactionContext, - instruction_context: &InstructionContext, - stake_account_index: IndexOfAccount, - source_account_index: IndexOfAccount, - clock: &Clock, - stake_history: &StakeHistory, - signers: &HashSet, -) -> Result<(), InstructionError> { - let mut source_account = instruction_context - .try_borrow_instruction_account(transaction_context, source_account_index)?; - // Ensure source isn't spoofed - if *source_account.get_owner() != id() { - return Err(InstructionError::IncorrectProgramId); - } - // Close the stake_account-reference loophole - if instruction_context.get_index_of_instruction_account_in_transaction(stake_account_index)? - == instruction_context - .get_index_of_instruction_account_in_transaction(source_account_index)? - { - return Err(InstructionError::InvalidArgument); - } - let mut stake_account = instruction_context - .try_borrow_instruction_account(transaction_context, stake_account_index)?; - - ic_msg!(invoke_context, "Checking if destination stake is mergeable"); - let stake_merge_kind = MergeKind::get_if_mergeable( - invoke_context, - &stake_account.get_state()?, - stake_account.get_lamports(), - clock, - stake_history, - )?; - - // Authorized staker is allowed to split/merge accounts - stake_merge_kind - .meta() - .authorized - .check(signers, StakeAuthorize::Staker)?; - - ic_msg!(invoke_context, "Checking if source stake is mergeable"); - let source_merge_kind = MergeKind::get_if_mergeable( - invoke_context, - &source_account.get_state()?, - source_account.get_lamports(), - clock, - stake_history, - )?; - - ic_msg!(invoke_context, "Merging stake accounts"); - if let Some(merged_state) = stake_merge_kind.merge(invoke_context, source_merge_kind, clock)? { - stake_account.set_state(&merged_state)?; - } - - // Source is about to be drained, deinitialize its state - source_account.set_state(&StakeStateV2::Uninitialized)?; - - // Drain the source stake account - let lamports = source_account.get_lamports(); - source_account.checked_sub_lamports(lamports)?; - stake_account.checked_add_lamports(lamports)?; - Ok(()) -} - -pub fn move_stake( - invoke_context: &InvokeContext, - transaction_context: &TransactionContext, - instruction_context: &InstructionContext, - source_account_index: IndexOfAccount, - lamports: u64, - destination_account_index: IndexOfAccount, - stake_authority_index: IndexOfAccount, -) -> Result<(), InstructionError> { - let mut source_account = instruction_context - .try_borrow_instruction_account(transaction_context, source_account_index)?; - - let mut destination_account = instruction_context - .try_borrow_instruction_account(transaction_context, destination_account_index)?; - - let (source_merge_kind, destination_merge_kind) = move_stake_or_lamports_shared_checks( - invoke_context, - transaction_context, - instruction_context, - &source_account, - lamports, - &destination_account, - stake_authority_index, - )?; - - // ensure source and destination are the right size for the current version of StakeState - // this a safeguard in case there is a new version of the struct that cannot fit into an old account - if source_account.get_data().len() != StakeStateV2::size_of() - || destination_account.get_data().len() != StakeStateV2::size_of() - { - return Err(InstructionError::InvalidAccountData); - } - - // source must be fully active - let MergeKind::FullyActive(source_meta, mut source_stake) = source_merge_kind else { - return Err(InstructionError::InvalidAccountData); - }; - - let minimum_delegation = crate::get_minimum_delegation( - invoke_context.is_stake_raise_minimum_delegation_to_1_sol_active(), - ); - let source_effective_stake = source_stake.delegation.stake; - - // source cannot move more stake than it has, regardless of how many lamports it has - let source_final_stake = source_effective_stake - .checked_sub(lamports) - .ok_or(InstructionError::InvalidArgument)?; - - // unless all stake is being moved, source must retain at least the minimum delegation - if source_final_stake != 0 && source_final_stake < minimum_delegation { - return Err(InstructionError::InvalidArgument); - } - - // destination must be fully active or fully inactive - let destination_meta = match destination_merge_kind { - MergeKind::FullyActive(destination_meta, mut destination_stake) => { - // if active, destination must be delegated to the same vote account as source - if source_stake.delegation.voter_pubkey != destination_stake.delegation.voter_pubkey { - return Err(StakeError::VoteAddressMismatch.into()); - } - - let destination_effective_stake = destination_stake.delegation.stake; - let destination_final_stake = destination_effective_stake - .checked_add(lamports) - .ok_or(InstructionError::ArithmeticOverflow)?; - - // ensure destination meets miniumum delegation - // since it is already active, this only really applies if the minimum is raised - if destination_final_stake < minimum_delegation { - return Err(InstructionError::InvalidArgument); - } - - merge_delegation_stake_and_credits_observed( - &mut destination_stake, - lamports, - source_stake.credits_observed, - )?; - - destination_account.set_state(&StakeStateV2::Stake( - destination_meta, - destination_stake, - StakeFlags::empty(), - ))?; - - destination_meta - } - MergeKind::Inactive(destination_meta, _, _) => { - // if destination is inactive, it must be given at least the minimum delegation - if lamports < minimum_delegation { - return Err(InstructionError::InvalidArgument); - } - - let mut destination_stake = source_stake; - destination_stake.delegation.stake = lamports; - - destination_account.set_state(&StakeStateV2::Stake( - destination_meta, - destination_stake, - StakeFlags::empty(), - ))?; - - destination_meta - } - _ => return Err(InstructionError::InvalidAccountData), - }; - - if source_final_stake == 0 { - source_account.set_state(&StakeStateV2::Initialized(source_meta))?; - } else { - source_stake.delegation.stake = source_final_stake; - - source_account.set_state(&StakeStateV2::Stake( - source_meta, - source_stake, - StakeFlags::empty(), - ))?; - } - - source_account.checked_sub_lamports(lamports)?; - destination_account.checked_add_lamports(lamports)?; - - // this should be impossible, but because we do all our math with delegations, best to guard it - if source_account.get_lamports() < source_meta.rent_exempt_reserve - || destination_account.get_lamports() < destination_meta.rent_exempt_reserve - { - ic_msg!( - invoke_context, - "Delegation calculations violated lamport balance assumptions" - ); - return Err(InstructionError::InvalidArgument); - } - - Ok(()) -} - -pub fn move_lamports( - invoke_context: &InvokeContext, - transaction_context: &TransactionContext, - instruction_context: &InstructionContext, - source_account_index: IndexOfAccount, - lamports: u64, - destination_account_index: IndexOfAccount, - stake_authority_index: IndexOfAccount, -) -> Result<(), InstructionError> { - let mut source_account = instruction_context - .try_borrow_instruction_account(transaction_context, source_account_index)?; - - let mut destination_account = instruction_context - .try_borrow_instruction_account(transaction_context, destination_account_index)?; - - let (source_merge_kind, _) = move_stake_or_lamports_shared_checks( - invoke_context, - transaction_context, - instruction_context, - &source_account, - lamports, - &destination_account, - stake_authority_index, - )?; - - let source_free_lamports = match source_merge_kind { - MergeKind::FullyActive(source_meta, source_stake) => source_account - .get_lamports() - .saturating_sub(source_stake.delegation.stake) - .saturating_sub(source_meta.rent_exempt_reserve), - MergeKind::Inactive(source_meta, source_lamports, _) => { - source_lamports.saturating_sub(source_meta.rent_exempt_reserve) - } - _ => return Err(InstructionError::InvalidAccountData), - }; - - if lamports > source_free_lamports { - return Err(InstructionError::InvalidArgument); - } - - source_account.checked_sub_lamports(lamports)?; - destination_account.checked_add_lamports(lamports)?; - - Ok(()) -} - -#[allow(clippy::too_many_arguments)] -pub fn withdraw( - transaction_context: &TransactionContext, - instruction_context: &InstructionContext, - stake_account_index: IndexOfAccount, - lamports: u64, - to_index: IndexOfAccount, - clock: &Clock, - stake_history: &StakeHistory, - withdraw_authority_index: IndexOfAccount, - custodian_index: Option, - new_rate_activation_epoch: Option, -) -> Result<(), InstructionError> { - let withdraw_authority_pubkey = transaction_context.get_key_of_account_at_index( - instruction_context - .get_index_of_instruction_account_in_transaction(withdraw_authority_index)?, - )?; - if !instruction_context.is_instruction_account_signer(withdraw_authority_index)? { - return Err(InstructionError::MissingRequiredSignature); - } - let mut signers = HashSet::new(); - signers.insert(*withdraw_authority_pubkey); - - let mut stake_account = instruction_context - .try_borrow_instruction_account(transaction_context, stake_account_index)?; - let (lockup, reserve, is_staked) = match stake_account.get_state()? { - StakeStateV2::Stake(meta, stake, _stake_flag) => { - meta.authorized - .check(&signers, StakeAuthorize::Withdrawer)?; - // if we have a deactivation epoch and we're in cooldown - let staked = if clock.epoch >= stake.delegation.deactivation_epoch { - stake - .delegation - .stake(clock.epoch, stake_history, new_rate_activation_epoch) - } else { - // Assume full stake if the stake account hasn't been - // de-activated, because in the future the exposed stake - // might be higher than stake.stake() due to warmup - stake.delegation.stake - }; - - let staked_and_reserve = checked_add(staked, meta.rent_exempt_reserve)?; - (meta.lockup, staked_and_reserve, staked != 0) - } - StakeStateV2::Initialized(meta) => { - meta.authorized - .check(&signers, StakeAuthorize::Withdrawer)?; - // stake accounts must have a balance >= rent_exempt_reserve - (meta.lockup, meta.rent_exempt_reserve, false) - } - StakeStateV2::Uninitialized => { - if !signers.contains(stake_account.get_key()) { - return Err(InstructionError::MissingRequiredSignature); - } - (Lockup::default(), 0, false) // no lockup, no restrictions - } - _ => return Err(InstructionError::InvalidAccountData), - }; - - // verify that lockup has expired or that the withdrawal is signed by - // the custodian, both epoch and unix_timestamp must have passed - let custodian_pubkey = if let Some(custodian_index) = custodian_index { - if instruction_context.is_instruction_account_signer(custodian_index)? { - Some( - transaction_context.get_key_of_account_at_index( - instruction_context - .get_index_of_instruction_account_in_transaction(custodian_index)?, - )?, - ) - } else { - None - } - } else { - None - }; - if lockup.is_in_force(clock, custodian_pubkey) { - return Err(StakeError::LockupInForce.into()); - } - - if lamports == stake_account.get_lamports() { - // if the stake is active, we mustn't allow the account to go away - if is_staked { - return Err(InstructionError::InsufficientFunds); - } - - // Deinitialize state upon zero balance - stake_account.set_state(&StakeStateV2::Uninitialized)?; - } else { - // Don't allow withdrawing the reserved rent balance or active stake - let lamports_and_reserve = checked_add(lamports, reserve)?; - if lamports_and_reserve > stake_account.get_lamports() { - return Err(InstructionError::InsufficientFunds); - } - } - - stake_account.checked_sub_lamports(lamports)?; - drop(stake_account); - let mut to = - instruction_context.try_borrow_instruction_account(transaction_context, to_index)?; - to.checked_add_lamports(lamports)?; - Ok(()) -} - -pub(crate) fn deactivate_delinquent( - transaction_context: &TransactionContext, - instruction_context: &InstructionContext, - stake_account: &mut BorrowedAccount, - delinquent_vote_account_index: IndexOfAccount, - reference_vote_account_index: IndexOfAccount, - current_epoch: Epoch, -) -> Result<(), InstructionError> { - let delinquent_vote_account_pubkey = transaction_context.get_key_of_account_at_index( - instruction_context - .get_index_of_instruction_account_in_transaction(delinquent_vote_account_index)?, - )?; - let delinquent_vote_account = instruction_context - .try_borrow_instruction_account(transaction_context, delinquent_vote_account_index)?; - if *delinquent_vote_account.get_owner() != solana_sdk_ids::vote::id() { - return Err(InstructionError::IncorrectProgramId); - } - let delinquent_vote_state = delinquent_vote_account - .get_state::()? - .convert_to_current(); - - let reference_vote_account = instruction_context - .try_borrow_instruction_account(transaction_context, reference_vote_account_index)?; - if *reference_vote_account.get_owner() != solana_sdk_ids::vote::id() { - return Err(InstructionError::IncorrectProgramId); - } - let reference_vote_state = reference_vote_account - .get_state::()? - .convert_to_current(); - - if !acceptable_reference_epoch_credits(&reference_vote_state.epoch_credits, current_epoch) { - return Err(StakeError::InsufficientReferenceVotes.into()); - } - - if let StakeStateV2::Stake(meta, mut stake, stake_flags) = stake_account.get_state()? { - if stake.delegation.voter_pubkey != *delinquent_vote_account_pubkey { - return Err(StakeError::VoteAddressMismatch.into()); - } - - // Deactivate the stake account if its delegated vote account has never voted or has not - // voted in the last `MINIMUM_DELINQUENT_EPOCHS_FOR_DEACTIVATION` - if eligible_for_deactivate_delinquent(&delinquent_vote_state.epoch_credits, current_epoch) { - stake.deactivate(current_epoch)?; - stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags)) - } else { - Err(StakeError::MinimumDelinquentEpochsForDeactivationNotMet.into()) - } - } else { - Err(InstructionError::InvalidAccountData) - } -} - -/// After calling `validate_delegated_amount()`, this struct contains calculated values that are used -/// by the caller. -struct ValidatedDelegatedInfo { - stake_amount: u64, -} - -/// Ensure the stake delegation amount is valid. This checks that the account meets the minimum -/// balance requirements of delegated stake. If not, return an error. -fn validate_delegated_amount( - account: &BorrowedAccount, - meta: &Meta, - invoke_context: &InvokeContext, -) -> Result { - let stake_amount = account - .get_lamports() - .saturating_sub(meta.rent_exempt_reserve); // can't stake the rent - - // Stake accounts may be initialized with a stake amount below the minimum delegation so check - // that the minimum is met before delegation. - if stake_amount - < crate::get_minimum_delegation( - invoke_context.is_stake_raise_minimum_delegation_to_1_sol_active(), - ) - { - return Err(StakeError::InsufficientDelegation.into()); - } - Ok(ValidatedDelegatedInfo { stake_amount }) -} - -/// After calling `validate_split_amount()`, this struct contains calculated values that are used -/// by the caller. -#[derive(Copy, Clone, Debug, Default)] -struct ValidatedSplitInfo { - source_remaining_balance: u64, - destination_rent_exempt_reserve: u64, -} - -/// Ensure the split amount is valid. This checks the source and destination accounts meet the -/// minimum balance requirements, which is the rent exempt reserve plus the minimum stake -/// delegation, and that the source account has enough lamports for the request split amount. If -/// not, return an error. -fn validate_split_amount( - invoke_context: &InvokeContext, - transaction_context: &TransactionContext, - instruction_context: &InstructionContext, - source_account_index: IndexOfAccount, - destination_account_index: IndexOfAccount, - lamports: u64, - source_meta: &Meta, - additional_required_lamports: u64, - source_is_active: bool, -) -> Result { - let source_account = instruction_context - .try_borrow_instruction_account(transaction_context, source_account_index)?; - let source_lamports = source_account.get_lamports(); - drop(source_account); - let destination_account = instruction_context - .try_borrow_instruction_account(transaction_context, destination_account_index)?; - let destination_lamports = destination_account.get_lamports(); - let destination_data_len = destination_account.get_data().len(); - drop(destination_account); - - // Split amount has to be something - if lamports == 0 { - return Err(InstructionError::InsufficientFunds); - } - - // Obviously cannot split more than what the source account has - if lamports > source_lamports { - return Err(InstructionError::InsufficientFunds); - } - - // Verify that the source account still has enough lamports left after splitting: - // EITHER at least the minimum balance, OR zero (in this case the source - // account is transferring all lamports to new destination account, and the source - // account will be closed) - let source_minimum_balance = source_meta - .rent_exempt_reserve - .saturating_add(additional_required_lamports); - let source_remaining_balance = source_lamports.saturating_sub(lamports); - if source_remaining_balance == 0 { - // full amount is a withdrawal - // nothing to do here - } else if source_remaining_balance < source_minimum_balance { - // the remaining balance is too low to do the split - return Err(InstructionError::InsufficientFunds); - } else { - // all clear! - // nothing to do here - } - - let rent = invoke_context.get_sysvar_cache().get_rent()?; - let destination_rent_exempt_reserve = rent.minimum_balance(destination_data_len); - - // If the source is active stake, one of these criteria must be met: - // 1. the destination account must be prefunded with at least the rent-exempt reserve, or - // 2. the split must consume 100% of the source - if source_is_active - && source_remaining_balance != 0 - && destination_lamports < destination_rent_exempt_reserve - { - return Err(InstructionError::InsufficientFunds); - } - - // Verify the destination account meets the minimum balance requirements - // This must handle: - // 1. The destination account having a different rent exempt reserve due to data size changes - // 2. The destination account being prefunded, which would lower the minimum split amount - let destination_minimum_balance = - destination_rent_exempt_reserve.saturating_add(additional_required_lamports); - let destination_balance_deficit = - destination_minimum_balance.saturating_sub(destination_lamports); - if lamports < destination_balance_deficit { - return Err(InstructionError::InsufficientFunds); - } - - Ok(ValidatedSplitInfo { - source_remaining_balance, - destination_rent_exempt_reserve, - }) -} - -#[derive(Clone, Debug, PartialEq)] -enum MergeKind { - Inactive(Meta, u64, StakeFlags), - ActivationEpoch(Meta, Stake, StakeFlags), - FullyActive(Meta, Stake), -} - -impl MergeKind { - fn meta(&self) -> &Meta { - match self { - Self::Inactive(meta, _, _) => meta, - Self::ActivationEpoch(meta, _, _) => meta, - Self::FullyActive(meta, _) => meta, - } - } - - fn active_stake(&self) -> Option<&Stake> { - match self { - Self::Inactive(_, _, _) => None, - Self::ActivationEpoch(_, stake, _) => Some(stake), - Self::FullyActive(_, stake) => Some(stake), - } - } - - fn get_if_mergeable( - invoke_context: &InvokeContext, - stake_state: &StakeStateV2, - stake_lamports: u64, - clock: &Clock, - stake_history: &StakeHistory, - ) -> Result { - match stake_state { - StakeStateV2::Stake(meta, stake, stake_flags) => { - // stake must not be in a transient state. Transient here meaning - // activating or deactivating with non-zero effective stake. - let status = stake.delegation.stake_activating_and_deactivating( - clock.epoch, - stake_history, - new_warmup_cooldown_rate_epoch(), - ); - - match (status.effective, status.activating, status.deactivating) { - (0, 0, 0) => Ok(Self::Inactive(*meta, stake_lamports, *stake_flags)), - (0, _, _) => Ok(Self::ActivationEpoch(*meta, *stake, *stake_flags)), - (_, 0, 0) => Ok(Self::FullyActive(*meta, *stake)), - _ => { - let err = StakeError::MergeTransientStake; - ic_msg!(invoke_context, "{}", err); - Err(err.into()) - } - } - } - StakeStateV2::Initialized(meta) => { - Ok(Self::Inactive(*meta, stake_lamports, StakeFlags::empty())) - } - _ => Err(InstructionError::InvalidAccountData), - } - } - - fn metas_can_merge( - invoke_context: &InvokeContext, - stake: &Meta, - source: &Meta, - clock: &Clock, - ) -> Result<(), InstructionError> { - // lockups may mismatch so long as both have expired - let can_merge_lockups = stake.lockup == source.lockup - || (!stake.lockup.is_in_force(clock, None) && !source.lockup.is_in_force(clock, None)); - // `rent_exempt_reserve` has no bearing on the mergeability of accounts, - // as the source account will be culled by runtime once the operation - // succeeds. Considering it here would needlessly prevent merging stake - // accounts with differing data lengths, which already exist in the wild - // due to an SDK bug - if stake.authorized == source.authorized && can_merge_lockups { - Ok(()) - } else { - ic_msg!(invoke_context, "Unable to merge due to metadata mismatch"); - Err(StakeError::MergeMismatch.into()) - } - } - - fn active_delegations_can_merge( - invoke_context: &InvokeContext, - stake: &Delegation, - source: &Delegation, - ) -> Result<(), InstructionError> { - if stake.voter_pubkey != source.voter_pubkey { - ic_msg!(invoke_context, "Unable to merge due to voter mismatch"); - Err(StakeError::MergeMismatch.into()) - } else if stake.deactivation_epoch == Epoch::MAX && source.deactivation_epoch == Epoch::MAX - { - Ok(()) - } else { - ic_msg!(invoke_context, "Unable to merge due to stake deactivation"); - Err(StakeError::MergeMismatch.into()) - } - } - - fn merge( - self, - invoke_context: &InvokeContext, - source: Self, - clock: &Clock, - ) -> Result, InstructionError> { - Self::metas_can_merge(invoke_context, self.meta(), source.meta(), clock)?; - self.active_stake() - .zip(source.active_stake()) - .map(|(stake, source)| { - Self::active_delegations_can_merge( - invoke_context, - &stake.delegation, - &source.delegation, - ) - }) - .unwrap_or(Ok(()))?; - let merged_state = match (self, source) { - (Self::Inactive(_, _, _), Self::Inactive(_, _, _)) => None, - (Self::Inactive(_, _, _), Self::ActivationEpoch(_, _, _)) => None, - ( - Self::ActivationEpoch(meta, mut stake, stake_flags), - Self::Inactive(_, source_lamports, source_stake_flags), - ) => { - stake.delegation.stake = checked_add(stake.delegation.stake, source_lamports)?; - Some(StakeStateV2::Stake( - meta, - stake, - stake_flags.union(source_stake_flags), - )) - } - ( - Self::ActivationEpoch(meta, mut stake, stake_flags), - Self::ActivationEpoch(source_meta, source_stake, source_stake_flags), - ) => { - let source_lamports = checked_add( - source_meta.rent_exempt_reserve, - source_stake.delegation.stake, - )?; - merge_delegation_stake_and_credits_observed( - &mut stake, - source_lamports, - source_stake.credits_observed, - )?; - Some(StakeStateV2::Stake( - meta, - stake, - stake_flags.union(source_stake_flags), - )) - } - (Self::FullyActive(meta, mut stake), Self::FullyActive(_, source_stake)) => { - // Don't stake the source account's `rent_exempt_reserve` to - // protect against the magic activation loophole. It will - // instead be moved into the destination account as extra, - // withdrawable `lamports` - merge_delegation_stake_and_credits_observed( - &mut stake, - source_stake.delegation.stake, - source_stake.credits_observed, - )?; - Some(StakeStateV2::Stake(meta, stake, StakeFlags::empty())) - } - _ => return Err(StakeError::MergeMismatch.into()), - }; - Ok(merged_state) - } -} - -fn merge_delegation_stake_and_credits_observed( - stake: &mut Stake, - absorbed_lamports: u64, - absorbed_credits_observed: u64, -) -> Result<(), InstructionError> { - stake.credits_observed = - stake_weighted_credits_observed(stake, absorbed_lamports, absorbed_credits_observed) - .ok_or(InstructionError::ArithmeticOverflow)?; - stake.delegation.stake = checked_add(stake.delegation.stake, absorbed_lamports)?; - Ok(()) -} - -/// Calculate the effective credits observed for two stakes when merging -/// -/// When merging two `ActivationEpoch` or `FullyActive` stakes, the credits -/// observed of the merged stake is the weighted average of the two stakes' -/// credits observed. -/// -/// This is because we can derive the effective credits_observed by reversing the staking -/// rewards equation, _while keeping the rewards unchanged after merge (i.e. strong -/// requirement)_, like below: -/// -/// a(N) => account, r => rewards, s => stake, c => credits: -/// assume: -/// a3 = merge(a1, a2) -/// then: -/// a3.s = a1.s + a2.s -/// -/// Next, given: -/// aN.r = aN.c * aN.s (for every N) -/// finally: -/// a3.r = a1.r + a2.r -/// a3.c * a3.s = a1.c * a1.s + a2.c * a2.s -/// a3.c = (a1.c * a1.s + a2.c * a2.s) / (a1.s + a2.s) // QED -/// -/// (For this discussion, we omitted irrelevant variables, including distance -/// calculation against vote_account and point indirection.) -fn stake_weighted_credits_observed( - stake: &Stake, - absorbed_lamports: u64, - absorbed_credits_observed: u64, -) -> Option { - if stake.credits_observed == absorbed_credits_observed { - Some(stake.credits_observed) - } else { - let total_stake = u128::from(stake.delegation.stake.checked_add(absorbed_lamports)?); - let stake_weighted_credits = - u128::from(stake.credits_observed).checked_mul(u128::from(stake.delegation.stake))?; - let absorbed_weighted_credits = - u128::from(absorbed_credits_observed).checked_mul(u128::from(absorbed_lamports))?; - // Discard fractional credits as a merge side-effect friction by taking - // the ceiling, done by adding `denominator - 1` to the numerator. - let total_weighted_credits = stake_weighted_credits - .checked_add(absorbed_weighted_credits)? - .checked_add(total_stake)? - .checked_sub(1)?; - u64::try_from(total_weighted_credits.checked_div(total_stake)?).ok() - } -} - -pub type RewriteStakeStatus = (&'static str, (u64, u64), (u64, u64)); - -// utility function, used by runtime::Stakes, tests -pub fn new_stake_history_entry<'a, I>( - epoch: Epoch, - stakes: I, - history: &StakeHistory, - new_rate_activation_epoch: Option, -) -> StakeHistoryEntry -where - I: Iterator, -{ - stakes.fold(StakeHistoryEntry::default(), |sum, stake| { - sum + stake.stake_activating_and_deactivating(epoch, history, new_rate_activation_epoch) - }) -} - -// utility function, used by tests -pub fn create_stake_history_from_delegations( - bootstrap: Option, - epochs: std::ops::Range, - delegations: &[Delegation], - new_rate_activation_epoch: Option, -) -> StakeHistory { - let mut stake_history = StakeHistory::default(); - - let bootstrap_delegation = if let Some(bootstrap) = bootstrap { - vec![Delegation { - activation_epoch: u64::MAX, - stake: bootstrap, - ..Delegation::default() - }] - } else { - vec![] - }; - - for epoch in epochs { - let entry = new_stake_history_entry( - epoch, - delegations.iter().chain(bootstrap_delegation.iter()), - &stake_history, - new_rate_activation_epoch, - ); - stake_history.add(epoch, entry); - } - - stake_history -} - // genesis investor accounts pub fn create_lockup_stake_account( authorized: &Authorized, @@ -1391,25 +96,6 @@ pub fn create_account( ) } -// utility function, used by tests -pub fn create_account_with_activation_epoch( - authorized: &Pubkey, - voter_pubkey: &Pubkey, - vote_account: &AccountSharedData, - rent: &Rent, - lamports: u64, - activation_epoch: Epoch, -) -> AccountSharedData { - do_create_account( - authorized, - voter_pubkey, - vote_account, - rent, - lamports, - activation_epoch, - ) -} - fn do_create_account( authorized: &Pubkey, voter_pubkey: &Pubkey, @@ -1420,7 +106,7 @@ fn do_create_account( ) -> AccountSharedData { let mut stake_account = AccountSharedData::new(lamports, StakeStateV2::size_of(), &id()); - let vote_state = VoteState::deserialize(vote_account.data()).expect("vote_state"); + let vote_state = VoteStateV3::deserialize(vote_account.data()).expect("vote_state"); let rent_exempt_reserve = rent.minimum_balance(stake_account.data().len()); @@ -1443,1528 +129,3 @@ fn do_create_account( stake_account } - -#[cfg(test)] -mod tests { - use { - super::*, - proptest::prelude::*, - solana_account::{create_account_shared_data_for_test, AccountSharedData}, - solana_epoch_schedule::EpochSchedule, - solana_program_runtime::with_mock_invoke_context, - solana_pubkey::Pubkey, - solana_sdk_ids::sysvar::epoch_schedule, - solana_stake_interface::state::warmup_cooldown_rate, - solana_sysvar_id::SysvarId, - test_case::test_case, - }; - - #[test] - fn test_authorized_authorize() { - let staker = solana_pubkey::new_rand(); - let mut authorized = Authorized::auto(&staker); - let mut signers = HashSet::new(); - assert_eq!( - authorized.authorize(&signers, &staker, StakeAuthorize::Staker, None), - Err(InstructionError::MissingRequiredSignature) - ); - signers.insert(staker); - assert_eq!( - authorized.authorize(&signers, &staker, StakeAuthorize::Staker, None), - Ok(()) - ); - } - - #[test] - fn test_authorized_authorize_with_custodian() { - let staker = solana_pubkey::new_rand(); - let custodian = solana_pubkey::new_rand(); - let invalid_custodian = solana_pubkey::new_rand(); - let mut authorized = Authorized::auto(&staker); - let mut signers = HashSet::new(); - signers.insert(staker); - - let lockup = Lockup { - epoch: 1, - unix_timestamp: 1, - custodian, - }; - let clock = Clock { - epoch: 0, - unix_timestamp: 0, - ..Clock::default() - }; - - // No lockup, no custodian - assert_eq!( - authorized.authorize( - &signers, - &staker, - StakeAuthorize::Withdrawer, - Some((&Lockup::default(), &clock, None)) - ), - Ok(()) - ); - - // No lockup, invalid custodian not a signer - assert_eq!( - authorized.authorize( - &signers, - &staker, - StakeAuthorize::Withdrawer, - Some((&Lockup::default(), &clock, Some(&invalid_custodian))) - ), - Ok(()) // <== invalid custodian doesn't matter, there's no lockup - ); - - // Lockup active, invalid custodian not a signer - assert_eq!( - authorized.authorize( - &signers, - &staker, - StakeAuthorize::Withdrawer, - Some((&lockup, &clock, Some(&invalid_custodian))) - ), - Err(StakeError::CustodianSignatureMissing.into()), - ); - - signers.insert(invalid_custodian); - - // No lockup, invalid custodian is a signer - assert_eq!( - authorized.authorize( - &signers, - &staker, - StakeAuthorize::Withdrawer, - Some((&Lockup::default(), &clock, Some(&invalid_custodian))) - ), - Ok(()) // <== invalid custodian doesn't matter, there's no lockup - ); - - // Lockup active, invalid custodian is a signer - signers.insert(invalid_custodian); - assert_eq!( - authorized.authorize( - &signers, - &staker, - StakeAuthorize::Withdrawer, - Some((&lockup, &clock, Some(&invalid_custodian))) - ), - Err(StakeError::LockupInForce.into()), // <== invalid custodian rejected - ); - - signers.remove(&invalid_custodian); - - // Lockup active, no custodian - assert_eq!( - authorized.authorize( - &signers, - &staker, - StakeAuthorize::Withdrawer, - Some((&lockup, &clock, None)) - ), - Err(StakeError::CustodianMissing.into()), - ); - - // Lockup active, custodian not a signer - assert_eq!( - authorized.authorize( - &signers, - &staker, - StakeAuthorize::Withdrawer, - Some((&lockup, &clock, Some(&custodian))) - ), - Err(StakeError::CustodianSignatureMissing.into()), - ); - - // Lockup active, custodian is a signer - signers.insert(custodian); - assert_eq!( - authorized.authorize( - &signers, - &staker, - StakeAuthorize::Withdrawer, - Some((&lockup, &clock, Some(&custodian))) - ), - Ok(()) - ); - } - - #[test] - fn test_stake_state_stake_from_fail() { - let mut stake_account = AccountSharedData::new(0, StakeStateV2::size_of(), &id()); - - stake_account - .set_state(&StakeStateV2::default()) - .expect("set_state"); - - assert_eq!(stake_from(&stake_account), None); - } - - #[test] - fn test_stake_is_bootstrap() { - assert!(Delegation { - activation_epoch: u64::MAX, - ..Delegation::default() - } - .is_bootstrap()); - assert!(!Delegation { - activation_epoch: 0, - ..Delegation::default() - } - .is_bootstrap()); - } - - #[test] - fn test_stake_activating_and_deactivating() { - let stake = Delegation { - stake: 1_000, - activation_epoch: 0, // activating at zero - deactivation_epoch: 5, - ..Delegation::default() - }; - - // save this off so stake.config.warmup_rate changes don't break this test - let increment = (1_000_f64 * warmup_cooldown_rate(0, None)) as u64; - - let mut stake_history = StakeHistory::default(); - // assert that this stake follows step function if there's no history - assert_eq!( - stake.stake_activating_and_deactivating(stake.activation_epoch, &stake_history, None), - StakeActivationStatus::with_effective_and_activating(0, stake.stake), - ); - for epoch in stake.activation_epoch + 1..stake.deactivation_epoch { - assert_eq!( - stake.stake_activating_and_deactivating(epoch, &stake_history, None), - StakeActivationStatus::with_effective(stake.stake), - ); - } - // assert that this stake is full deactivating - assert_eq!( - stake.stake_activating_and_deactivating(stake.deactivation_epoch, &stake_history, None), - StakeActivationStatus::with_deactivating(stake.stake), - ); - // assert that this stake is fully deactivated if there's no history - assert_eq!( - stake.stake_activating_and_deactivating( - stake.deactivation_epoch + 1, - &stake_history, - None - ), - StakeActivationStatus::default(), - ); - - stake_history.add( - 0u64, // entry for zero doesn't have my activating amount - StakeHistoryEntry { - effective: 1_000, - ..StakeHistoryEntry::default() - }, - ); - // assert that this stake is broken, because above setup is broken - assert_eq!( - stake.stake_activating_and_deactivating(1, &stake_history, None), - StakeActivationStatus::with_effective_and_activating(0, stake.stake), - ); - - stake_history.add( - 0u64, // entry for zero has my activating amount - StakeHistoryEntry { - effective: 1_000, - activating: 1_000, - ..StakeHistoryEntry::default() - }, - // no entry for 1, so this stake gets shorted - ); - // assert that this stake is broken, because above setup is broken - assert_eq!( - stake.stake_activating_and_deactivating(2, &stake_history, None), - StakeActivationStatus::with_effective_and_activating( - increment, - stake.stake - increment - ), - ); - - // start over, test deactivation edge cases - let mut stake_history = StakeHistory::default(); - - stake_history.add( - stake.deactivation_epoch, // entry for zero doesn't have my de-activating amount - StakeHistoryEntry { - effective: 1_000, - ..StakeHistoryEntry::default() - }, - ); - // assert that this stake is broken, because above setup is broken - assert_eq!( - stake.stake_activating_and_deactivating( - stake.deactivation_epoch + 1, - &stake_history, - None, - ), - StakeActivationStatus::with_deactivating(stake.stake), - ); - - // put in my initial deactivating amount, but don't put in an entry for next - stake_history.add( - stake.deactivation_epoch, // entry for zero has my de-activating amount - StakeHistoryEntry { - effective: 1_000, - deactivating: 1_000, - ..StakeHistoryEntry::default() - }, - ); - // assert that this stake is broken, because above setup is broken - assert_eq!( - stake.stake_activating_and_deactivating( - stake.deactivation_epoch + 2, - &stake_history, - None, - ), - // hung, should be lower - StakeActivationStatus::with_deactivating(stake.stake - increment), - ); - } - - mod same_epoch_activation_then_deactivation { - use super::*; - - enum OldDeactivationBehavior { - Stuck, - Slow, - } - - fn do_test( - old_behavior: OldDeactivationBehavior, - expected_stakes: &[StakeActivationStatus], - ) { - let cluster_stake = 1_000; - let activating_stake = 10_000; - let some_stake = 700; - let some_epoch = 0; - - let stake = Delegation { - stake: some_stake, - activation_epoch: some_epoch, - deactivation_epoch: some_epoch, - ..Delegation::default() - }; - - let mut stake_history = StakeHistory::default(); - let cluster_deactivation_at_stake_modified_epoch = match old_behavior { - OldDeactivationBehavior::Stuck => 0, - OldDeactivationBehavior::Slow => 1000, - }; - - let stake_history_entries = vec![ - ( - cluster_stake, - activating_stake, - cluster_deactivation_at_stake_modified_epoch, - ), - (cluster_stake, activating_stake, 1000), - (cluster_stake, activating_stake, 1000), - (cluster_stake, activating_stake, 100), - (cluster_stake, activating_stake, 100), - (cluster_stake, activating_stake, 100), - (cluster_stake, activating_stake, 100), - ]; - - for (epoch, (effective, activating, deactivating)) in - stake_history_entries.into_iter().enumerate() - { - stake_history.add( - epoch as Epoch, - StakeHistoryEntry { - effective, - activating, - deactivating, - }, - ); - } - - assert_eq!( - expected_stakes, - (0..expected_stakes.len()) - .map(|epoch| stake.stake_activating_and_deactivating( - epoch as u64, - &stake_history, - None, - )) - .collect::>() - ); - } - - #[test] - fn test_new_behavior_previously_slow() { - // any stake accounts activated and deactivated at the same epoch - // shouldn't been activated (then deactivated) at all! - - do_test( - OldDeactivationBehavior::Slow, - &[ - StakeActivationStatus::default(), - StakeActivationStatus::default(), - StakeActivationStatus::default(), - StakeActivationStatus::default(), - StakeActivationStatus::default(), - StakeActivationStatus::default(), - StakeActivationStatus::default(), - ], - ); - } - - #[test] - fn test_new_behavior_previously_stuck() { - // any stake accounts activated and deactivated at the same epoch - // shouldn't been activated (then deactivated) at all! - - do_test( - OldDeactivationBehavior::Stuck, - &[ - StakeActivationStatus::default(), - StakeActivationStatus::default(), - StakeActivationStatus::default(), - StakeActivationStatus::default(), - StakeActivationStatus::default(), - StakeActivationStatus::default(), - StakeActivationStatus::default(), - ], - ); - } - } - - #[test] - fn test_inflation_and_slashing_with_activating_and_deactivating_stake() { - // some really boring delegation and stake_history setup - let (delegated_stake, mut stake, stake_history) = { - let cluster_stake = 1_000; - let delegated_stake = 700; - - let stake = Delegation { - stake: delegated_stake, - activation_epoch: 0, - deactivation_epoch: 4, - ..Delegation::default() - }; - - let mut stake_history = StakeHistory::default(); - stake_history.add( - 0, - StakeHistoryEntry { - effective: cluster_stake, - activating: delegated_stake, - ..StakeHistoryEntry::default() - }, - ); - let newly_effective_at_epoch1 = (cluster_stake as f64 * 0.25) as u64; - assert_eq!(newly_effective_at_epoch1, 250); - stake_history.add( - 1, - StakeHistoryEntry { - effective: cluster_stake + newly_effective_at_epoch1, - activating: delegated_stake - newly_effective_at_epoch1, - ..StakeHistoryEntry::default() - }, - ); - let newly_effective_at_epoch2 = - ((cluster_stake + newly_effective_at_epoch1) as f64 * 0.25) as u64; - assert_eq!(newly_effective_at_epoch2, 312); - stake_history.add( - 2, - StakeHistoryEntry { - effective: cluster_stake - + newly_effective_at_epoch1 - + newly_effective_at_epoch2, - activating: delegated_stake - - newly_effective_at_epoch1 - - newly_effective_at_epoch2, - ..StakeHistoryEntry::default() - }, - ); - stake_history.add( - 3, - StakeHistoryEntry { - effective: cluster_stake + delegated_stake, - ..StakeHistoryEntry::default() - }, - ); - stake_history.add( - 4, - StakeHistoryEntry { - effective: cluster_stake + delegated_stake, - deactivating: delegated_stake, - ..StakeHistoryEntry::default() - }, - ); - let newly_not_effective_stake_at_epoch5 = - ((cluster_stake + delegated_stake) as f64 * 0.25) as u64; - assert_eq!(newly_not_effective_stake_at_epoch5, 425); - stake_history.add( - 5, - StakeHistoryEntry { - effective: cluster_stake + delegated_stake - - newly_not_effective_stake_at_epoch5, - deactivating: delegated_stake - newly_not_effective_stake_at_epoch5, - ..StakeHistoryEntry::default() - }, - ); - - (delegated_stake, stake, stake_history) - }; - - // helper closures - let calculate_each_staking_status = |stake: &Delegation, epoch_count: usize| -> Vec<_> { - (0..epoch_count) - .map(|epoch| { - stake.stake_activating_and_deactivating(epoch as u64, &stake_history, None) - }) - .collect::>() - }; - let adjust_staking_status = |rate: f64, status: &[StakeActivationStatus]| { - status - .iter() - .map(|entry| StakeActivationStatus { - effective: (entry.effective as f64 * rate) as u64, - activating: (entry.activating as f64 * rate) as u64, - deactivating: (entry.deactivating as f64 * rate) as u64, - }) - .collect::>() - }; - - let expected_staking_status_transition = vec![ - StakeActivationStatus::with_effective_and_activating(0, 700), - StakeActivationStatus::with_effective_and_activating(250, 450), - StakeActivationStatus::with_effective_and_activating(562, 138), - StakeActivationStatus::with_effective(700), - StakeActivationStatus::with_deactivating(700), - StakeActivationStatus::with_deactivating(275), - StakeActivationStatus::default(), - ]; - let expected_staking_status_transition_base = vec![ - StakeActivationStatus::with_effective_and_activating(0, 700), - StakeActivationStatus::with_effective_and_activating(250, 450), - StakeActivationStatus::with_effective_and_activating(562, 138 + 1), // +1 is needed for rounding - StakeActivationStatus::with_effective(700), - StakeActivationStatus::with_deactivating(700), - StakeActivationStatus::with_deactivating(275 + 1), // +1 is needed for rounding - StakeActivationStatus::default(), - ]; - - // normal stake activating and deactivating transition test, just in case - assert_eq!( - expected_staking_status_transition, - calculate_each_staking_status(&stake, expected_staking_status_transition.len()) - ); - - // 10% inflation rewards assuming some sizable epochs passed! - let rate = 1.10; - stake.stake = (delegated_stake as f64 * rate) as u64; - let expected_staking_status_transition = - adjust_staking_status(rate, &expected_staking_status_transition_base); - - assert_eq!( - expected_staking_status_transition, - calculate_each_staking_status(&stake, expected_staking_status_transition_base.len()), - ); - - // 50% slashing!!! - let rate = 0.5; - stake.stake = (delegated_stake as f64 * rate) as u64; - let expected_staking_status_transition = - adjust_staking_status(rate, &expected_staking_status_transition_base); - - assert_eq!( - expected_staking_status_transition, - calculate_each_staking_status(&stake, expected_staking_status_transition_base.len()), - ); - } - - #[test] - fn test_stop_activating_after_deactivation() { - let stake = Delegation { - stake: 1_000, - activation_epoch: 0, - deactivation_epoch: 3, - ..Delegation::default() - }; - - let base_stake = 1_000; - let mut stake_history = StakeHistory::default(); - let mut effective = base_stake; - let other_activation = 100; - let mut other_activations = vec![0]; - - // Build a stake history where the test staker always consumes all of the available warm - // up and cool down stake. However, simulate other stakers beginning to activate during - // the test staker's deactivation. - for epoch in 0..=stake.deactivation_epoch + 1 { - let (activating, deactivating) = if epoch < stake.deactivation_epoch { - (stake.stake + base_stake - effective, 0) - } else { - let other_activation_sum: u64 = other_activations.iter().sum(); - let deactivating = effective - base_stake - other_activation_sum; - (other_activation, deactivating) - }; - - stake_history.add( - epoch, - StakeHistoryEntry { - effective, - activating, - deactivating, - }, - ); - - let effective_rate_limited = (effective as f64 * warmup_cooldown_rate(0, None)) as u64; - if epoch < stake.deactivation_epoch { - effective += effective_rate_limited.min(activating); - other_activations.push(0); - } else { - effective -= effective_rate_limited.min(deactivating); - effective += other_activation; - other_activations.push(other_activation); - } - } - - for epoch in 0..=stake.deactivation_epoch + 1 { - let history = stake_history.get(epoch).unwrap(); - let other_activations: u64 = other_activations[..=epoch as usize].iter().sum(); - let expected_stake = history.effective - base_stake - other_activations; - let (expected_activating, expected_deactivating) = if epoch < stake.deactivation_epoch { - (history.activating, 0) - } else { - (0, history.deactivating) - }; - assert_eq!( - stake.stake_activating_and_deactivating(epoch, &stake_history, None), - StakeActivationStatus { - effective: expected_stake, - activating: expected_activating, - deactivating: expected_deactivating, - }, - ); - } - } - - #[test] - fn test_stake_warmup_cooldown_sub_integer_moves() { - let delegations = [Delegation { - stake: 2, - activation_epoch: 0, // activating at zero - deactivation_epoch: 5, - ..Delegation::default() - }]; - // give 2 epochs of cooldown - let epochs = 7; - // make bootstrap stake smaller than warmup so warmup/cooldownn - // increment is always smaller than 1 - let bootstrap = (warmup_cooldown_rate(0, None) * 100.0 / 2.0) as u64; - let stake_history = - create_stake_history_from_delegations(Some(bootstrap), 0..epochs, &delegations, None); - let mut max_stake = 0; - let mut min_stake = 2; - - for epoch in 0..epochs { - let stake = delegations - .iter() - .map(|delegation| delegation.stake(epoch, &stake_history, None)) - .sum::(); - max_stake = max_stake.max(stake); - min_stake = min_stake.min(stake); - } - assert_eq!(max_stake, 2); - assert_eq!(min_stake, 0); - } - - #[test_case(None ; "old rate")] - #[test_case(Some(1) ; "new rate activated in epoch 1")] - #[test_case(Some(10) ; "new rate activated in epoch 10")] - #[test_case(Some(30) ; "new rate activated in epoch 30")] - #[test_case(Some(50) ; "new rate activated in epoch 50")] - #[test_case(Some(60) ; "new rate activated in epoch 60")] - fn test_stake_warmup_cooldown(new_rate_activation_epoch: Option) { - let delegations = [ - Delegation { - // never deactivates - stake: 1_000, - activation_epoch: u64::MAX, - ..Delegation::default() - }, - Delegation { - stake: 1_000, - activation_epoch: 0, - deactivation_epoch: 9, - ..Delegation::default() - }, - Delegation { - stake: 1_000, - activation_epoch: 1, - deactivation_epoch: 6, - ..Delegation::default() - }, - Delegation { - stake: 1_000, - activation_epoch: 2, - deactivation_epoch: 5, - ..Delegation::default() - }, - Delegation { - stake: 1_000, - activation_epoch: 2, - deactivation_epoch: 4, - ..Delegation::default() - }, - Delegation { - stake: 1_000, - activation_epoch: 4, - deactivation_epoch: 4, - ..Delegation::default() - }, - ]; - // chosen to ensure that the last activated stake (at 4) finishes - // warming up and cooling down - // a stake takes 2.0f64.log(1.0 + STAKE_WARMUP_RATE) epochs to warm up or cool down - // when all alone, but the above overlap a lot - let epochs = 60; - - let stake_history = create_stake_history_from_delegations( - None, - 0..epochs, - &delegations, - new_rate_activation_epoch, - ); - - let mut prev_total_effective_stake = delegations - .iter() - .map(|delegation| delegation.stake(0, &stake_history, new_rate_activation_epoch)) - .sum::(); - - // uncomment and add ! for fun with graphing - // eprintln("\n{:8} {:8} {:8}", " epoch", " total", " delta"); - for epoch in 1..epochs { - let total_effective_stake = delegations - .iter() - .map(|delegation| { - delegation.stake(epoch, &stake_history, new_rate_activation_epoch) - }) - .sum::(); - - let delta = total_effective_stake.abs_diff(prev_total_effective_stake); - - // uncomment and add ! for fun with graphing - // eprint("{:8} {:8} {:8} ", epoch, total_effective_stake, delta); - // (0..(total_effective_stake as usize / (delegations.len() * 5))).for_each(|_| eprint("#")); - // eprintln(); - - assert!( - delta - <= ((prev_total_effective_stake as f64 - * warmup_cooldown_rate(epoch, new_rate_activation_epoch)) - as u64) - .max(1) - ); - - prev_total_effective_stake = total_effective_stake; - } - } - - #[test] - fn test_lockup_is_expired() { - let custodian = solana_pubkey::new_rand(); - let lockup = Lockup { - epoch: 1, - unix_timestamp: 1, - custodian, - }; - // neither time - assert!(lockup.is_in_force( - &Clock { - epoch: 0, - unix_timestamp: 0, - ..Clock::default() - }, - None - )); - // not timestamp - assert!(lockup.is_in_force( - &Clock { - epoch: 2, - unix_timestamp: 0, - ..Clock::default() - }, - None - )); - // not epoch - assert!(lockup.is_in_force( - &Clock { - epoch: 0, - unix_timestamp: 2, - ..Clock::default() - }, - None - )); - // both, no custodian - assert!(!lockup.is_in_force( - &Clock { - epoch: 1, - unix_timestamp: 1, - ..Clock::default() - }, - None - )); - // neither, but custodian - assert!(!lockup.is_in_force( - &Clock { - epoch: 0, - unix_timestamp: 0, - ..Clock::default() - }, - Some(&custodian), - )); - } - - #[test] - #[ignore] - #[should_panic] - fn test_dbg_stake_minimum_balance() { - let minimum_balance = Rent::default().minimum_balance(StakeStateV2::size_of()); - panic!( - "stake minimum_balance: {} lamports, {} SOL", - minimum_balance, - minimum_balance as f64 / solana_native_token::LAMPORTS_PER_SOL as f64 - ); - } - - #[test] - fn test_things_can_merge() { - with_mock_invoke_context!(invoke_context, transaction_context, Vec::new()); - let good_stake = Stake { - credits_observed: 4242, - delegation: Delegation { - voter_pubkey: Pubkey::new_unique(), - stake: 424242424242, - activation_epoch: 42, - ..Delegation::default() - }, - }; - - let identical = good_stake; - assert!(MergeKind::active_delegations_can_merge( - &invoke_context, - &good_stake.delegation, - &identical.delegation - ) - .is_ok()); - - let good_delegation = good_stake.delegation; - let different_stake_ok = Delegation { - stake: good_delegation.stake + 1, - ..good_delegation - }; - assert!(MergeKind::active_delegations_can_merge( - &invoke_context, - &good_delegation, - &different_stake_ok - ) - .is_ok()); - - let different_activation_epoch_ok = Delegation { - activation_epoch: good_delegation.activation_epoch + 1, - ..good_delegation - }; - assert!(MergeKind::active_delegations_can_merge( - &invoke_context, - &good_delegation, - &different_activation_epoch_ok - ) - .is_ok()); - - let bad_voter = Delegation { - voter_pubkey: Pubkey::new_unique(), - ..good_delegation - }; - assert!(MergeKind::active_delegations_can_merge( - &invoke_context, - &good_delegation, - &bad_voter - ) - .is_err()); - - let bad_deactivation_epoch = Delegation { - deactivation_epoch: 43, - ..good_delegation - }; - assert!(MergeKind::active_delegations_can_merge( - &invoke_context, - &good_delegation, - &bad_deactivation_epoch - ) - .is_err()); - assert!(MergeKind::active_delegations_can_merge( - &invoke_context, - &bad_deactivation_epoch, - &good_delegation - ) - .is_err()); - } - - #[test] - fn test_metas_can_merge() { - with_mock_invoke_context!(invoke_context, transaction_context, Vec::new()); - // Identical Metas can merge - assert!(MergeKind::metas_can_merge( - &invoke_context, - &Meta::default(), - &Meta::default(), - &Clock::default() - ) - .is_ok()); - - let mismatched_rent_exempt_reserve_ok = Meta { - rent_exempt_reserve: 42, - ..Meta::default() - }; - assert_ne!( - mismatched_rent_exempt_reserve_ok.rent_exempt_reserve, - Meta::default().rent_exempt_reserve, - ); - assert!(MergeKind::metas_can_merge( - &invoke_context, - &Meta::default(), - &mismatched_rent_exempt_reserve_ok, - &Clock::default() - ) - .is_ok()); - assert!(MergeKind::metas_can_merge( - &invoke_context, - &mismatched_rent_exempt_reserve_ok, - &Meta::default(), - &Clock::default() - ) - .is_ok()); - - let mismatched_authorized_fails = Meta { - authorized: Authorized { - staker: Pubkey::new_unique(), - withdrawer: Pubkey::new_unique(), - }, - ..Meta::default() - }; - assert_ne!( - mismatched_authorized_fails.authorized, - Meta::default().authorized, - ); - assert!(MergeKind::metas_can_merge( - &invoke_context, - &Meta::default(), - &mismatched_authorized_fails, - &Clock::default() - ) - .is_err()); - assert!(MergeKind::metas_can_merge( - &invoke_context, - &mismatched_authorized_fails, - &Meta::default(), - &Clock::default() - ) - .is_err()); - - let lockup1_timestamp = 42; - let lockup2_timestamp = 4242; - let lockup1_epoch = 4; - let lockup2_epoch = 42; - let metas_with_lockup1 = Meta { - lockup: Lockup { - unix_timestamp: lockup1_timestamp, - epoch: lockup1_epoch, - custodian: Pubkey::new_unique(), - }, - ..Meta::default() - }; - let metas_with_lockup2 = Meta { - lockup: Lockup { - unix_timestamp: lockup2_timestamp, - epoch: lockup2_epoch, - custodian: Pubkey::new_unique(), - }, - ..Meta::default() - }; - - // Mismatched lockups fail when both in force - assert_ne!(metas_with_lockup1.lockup, Meta::default().lockup); - assert!(MergeKind::metas_can_merge( - &invoke_context, - &metas_with_lockup1, - &metas_with_lockup2, - &Clock::default() - ) - .is_err()); - assert!(MergeKind::metas_can_merge( - &invoke_context, - &metas_with_lockup2, - &metas_with_lockup1, - &Clock::default() - ) - .is_err()); - - let clock = Clock { - epoch: lockup1_epoch + 1, - unix_timestamp: lockup1_timestamp + 1, - ..Clock::default() - }; - - // Mismatched lockups fail when either in force - assert_ne!(metas_with_lockup1.lockup, Meta::default().lockup); - assert!(MergeKind::metas_can_merge( - &invoke_context, - &metas_with_lockup1, - &metas_with_lockup2, - &clock - ) - .is_err()); - assert!(MergeKind::metas_can_merge( - &invoke_context, - &metas_with_lockup2, - &metas_with_lockup1, - &clock - ) - .is_err()); - - let clock = Clock { - epoch: lockup2_epoch + 1, - unix_timestamp: lockup2_timestamp + 1, - ..Clock::default() - }; - - // Mismatched lockups succeed when both expired - assert_ne!(metas_with_lockup1.lockup, Meta::default().lockup); - assert!(MergeKind::metas_can_merge( - &invoke_context, - &metas_with_lockup1, - &metas_with_lockup2, - &clock - ) - .is_ok()); - assert!(MergeKind::metas_can_merge( - &invoke_context, - &metas_with_lockup2, - &metas_with_lockup1, - &clock - ) - .is_ok()); - } - - #[test] - fn test_merge_kind_get_if_mergeable() { - let transaction_accounts = vec![( - epoch_schedule::id(), - create_account_shared_data_for_test(&EpochSchedule::default()), - )]; - with_mock_invoke_context!(invoke_context, transaction_context, transaction_accounts); - let authority_pubkey = Pubkey::new_unique(); - let initial_lamports = 4242424242; - let rent = Rent::default(); - let rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); - let stake_lamports = rent_exempt_reserve + initial_lamports; - let new_rate_activation_epoch = Some(0); - - let meta = Meta { - rent_exempt_reserve, - ..Meta::auto(&authority_pubkey) - }; - let mut stake_account = AccountSharedData::new_data_with_space( - stake_lamports, - &StakeStateV2::Uninitialized, - StakeStateV2::size_of(), - &id(), - ) - .expect("stake_account"); - let mut clock = Clock::default(); - let mut stake_history = StakeHistory::default(); - - // Uninitialized state fails - assert_eq!( - MergeKind::get_if_mergeable( - &invoke_context, - &stake_account.state().unwrap(), - stake_account.lamports(), - &clock, - &stake_history - ) - .unwrap_err(), - InstructionError::InvalidAccountData - ); - - // RewardsPool state fails - stake_account.set_state(&StakeStateV2::RewardsPool).unwrap(); - assert_eq!( - MergeKind::get_if_mergeable( - &invoke_context, - &stake_account.state().unwrap(), - stake_account.lamports(), - &clock, - &stake_history - ) - .unwrap_err(), - InstructionError::InvalidAccountData - ); - - // Initialized state succeeds - stake_account - .set_state(&StakeStateV2::Initialized(meta)) - .unwrap(); - assert_eq!( - MergeKind::get_if_mergeable( - &invoke_context, - &stake_account.state().unwrap(), - stake_account.lamports(), - &clock, - &stake_history - ) - .unwrap(), - MergeKind::Inactive(meta, stake_lamports, StakeFlags::empty()) - ); - - clock.epoch = 0; - let mut effective = 2 * initial_lamports; - let mut activating = 0; - let mut deactivating = 0; - stake_history.add( - clock.epoch, - StakeHistoryEntry { - effective, - activating, - deactivating, - }, - ); - - clock.epoch += 1; - activating = initial_lamports; - stake_history.add( - clock.epoch, - StakeHistoryEntry { - effective, - activating, - deactivating, - }, - ); - - let stake = Stake { - delegation: Delegation { - stake: initial_lamports, - activation_epoch: 1, - deactivation_epoch: 9, - ..Delegation::default() - }, - ..Stake::default() - }; - stake_account - .set_state(&StakeStateV2::Stake(meta, stake, StakeFlags::empty())) - .unwrap(); - // activation_epoch succeeds - assert_eq!( - MergeKind::get_if_mergeable( - &invoke_context, - &stake_account.state().unwrap(), - stake_account.lamports(), - &clock, - &stake_history - ) - .unwrap(), - MergeKind::ActivationEpoch(meta, stake, StakeFlags::empty()), - ); - - // all paritially activated, transient epochs fail - loop { - clock.epoch += 1; - let delta = activating.min( - (effective as f64 * warmup_cooldown_rate(clock.epoch, new_rate_activation_epoch)) - as u64, - ); - effective += delta; - activating -= delta; - stake_history.add( - clock.epoch, - StakeHistoryEntry { - effective, - activating, - deactivating, - }, - ); - if activating == 0 { - break; - } - assert_eq!( - MergeKind::get_if_mergeable( - &invoke_context, - &stake_account.state().unwrap(), - stake_account.lamports(), - &clock, - &stake_history - ) - .unwrap_err(), - InstructionError::from(StakeError::MergeTransientStake), - ); - } - - // all epochs for which we're fully active succeed - while clock.epoch < stake.delegation.deactivation_epoch - 1 { - clock.epoch += 1; - stake_history.add( - clock.epoch, - StakeHistoryEntry { - effective, - activating, - deactivating, - }, - ); - assert_eq!( - MergeKind::get_if_mergeable( - &invoke_context, - &stake_account.state().unwrap(), - stake_account.lamports(), - &clock, - &stake_history - ) - .unwrap(), - MergeKind::FullyActive(meta, stake), - ); - } - - clock.epoch += 1; - deactivating = stake.delegation.stake; - stake_history.add( - clock.epoch, - StakeHistoryEntry { - effective, - activating, - deactivating, - }, - ); - // deactivation epoch fails, fully transient/deactivating - assert_eq!( - MergeKind::get_if_mergeable( - &invoke_context, - &stake_account.state().unwrap(), - stake_account.lamports(), - &clock, - &stake_history - ) - .unwrap_err(), - InstructionError::from(StakeError::MergeTransientStake), - ); - - // all transient, deactivating epochs fail - loop { - clock.epoch += 1; - let delta = deactivating.min( - (effective as f64 * warmup_cooldown_rate(clock.epoch, new_rate_activation_epoch)) - as u64, - ); - effective -= delta; - deactivating -= delta; - stake_history.add( - clock.epoch, - StakeHistoryEntry { - effective, - activating, - deactivating, - }, - ); - if deactivating == 0 { - break; - } - assert_eq!( - MergeKind::get_if_mergeable( - &invoke_context, - &stake_account.state().unwrap(), - stake_account.lamports(), - &clock, - &stake_history - ) - .unwrap_err(), - InstructionError::from(StakeError::MergeTransientStake), - ); - } - - // first fully-deactivated epoch succeeds - assert_eq!( - MergeKind::get_if_mergeable( - &invoke_context, - &stake_account.state().unwrap(), - stake_account.lamports(), - &clock, - &stake_history - ) - .unwrap(), - MergeKind::Inactive(meta, stake_lamports, StakeFlags::empty()), - ); - } - - #[test] - fn test_merge_kind_merge() { - with_mock_invoke_context!(invoke_context, transaction_context, Vec::new()); - let clock = Clock::default(); - let lamports = 424242; - let meta = Meta { - rent_exempt_reserve: 42, - ..Meta::default() - }; - let stake = Stake { - delegation: Delegation { - stake: 4242, - ..Delegation::default() - }, - ..Stake::default() - }; - let inactive = MergeKind::Inactive(Meta::default(), lamports, StakeFlags::empty()); - let activation_epoch = MergeKind::ActivationEpoch(meta, stake, StakeFlags::empty()); - let fully_active = MergeKind::FullyActive(meta, stake); - - assert_eq!( - inactive - .clone() - .merge(&invoke_context, inactive.clone(), &clock) - .unwrap(), - None - ); - assert_eq!( - inactive - .clone() - .merge(&invoke_context, activation_epoch.clone(), &clock) - .unwrap(), - None - ); - assert!(inactive - .clone() - .merge(&invoke_context, fully_active.clone(), &clock) - .is_err()); - assert!(activation_epoch - .clone() - .merge(&invoke_context, fully_active.clone(), &clock) - .is_err()); - assert!(fully_active - .clone() - .merge(&invoke_context, inactive.clone(), &clock) - .is_err()); - assert!(fully_active - .clone() - .merge(&invoke_context, activation_epoch.clone(), &clock) - .is_err()); - - let new_state = activation_epoch - .clone() - .merge(&invoke_context, inactive, &clock) - .unwrap() - .unwrap(); - let delegation = new_state.delegation().unwrap(); - assert_eq!(delegation.stake, stake.delegation.stake + lamports); - - let new_state = activation_epoch - .clone() - .merge(&invoke_context, activation_epoch, &clock) - .unwrap() - .unwrap(); - let delegation = new_state.delegation().unwrap(); - assert_eq!( - delegation.stake, - 2 * stake.delegation.stake + meta.rent_exempt_reserve - ); - - let new_state = fully_active - .clone() - .merge(&invoke_context, fully_active, &clock) - .unwrap() - .unwrap(); - let delegation = new_state.delegation().unwrap(); - assert_eq!(delegation.stake, 2 * stake.delegation.stake); - } - - #[test] - fn test_active_stake_merge() { - let transaction_accounts = vec![( - Rent::id(), - create_account_shared_data_for_test(&Rent::default()), - )]; - with_mock_invoke_context!(invoke_context, transaction_context, transaction_accounts); - let clock = Clock::default(); - let delegation_a = 4_242_424_242u64; - let delegation_b = 6_200_000_000u64; - let credits_a = 124_521_000u64; - let rent_exempt_reserve = 227_000_000u64; - let meta = Meta { - rent_exempt_reserve, - ..Meta::default() - }; - let stake_a = Stake { - delegation: Delegation { - stake: delegation_a, - ..Delegation::default() - }, - credits_observed: credits_a, - }; - let stake_b = Stake { - delegation: Delegation { - stake: delegation_b, - ..Delegation::default() - }, - credits_observed: credits_a, - }; - - // activating stake merge, match credits observed - let activation_epoch_a = MergeKind::ActivationEpoch(meta, stake_a, StakeFlags::empty()); - let activation_epoch_b = MergeKind::ActivationEpoch(meta, stake_b, StakeFlags::empty()); - let new_stake = activation_epoch_a - .merge(&invoke_context, activation_epoch_b, &clock) - .unwrap() - .unwrap() - .stake() - .unwrap(); - assert_eq!(new_stake.credits_observed, credits_a); - assert_eq!( - new_stake.delegation.stake, - delegation_a + delegation_b + rent_exempt_reserve - ); - - // active stake merge, match credits observed - let fully_active_a = MergeKind::FullyActive(meta, stake_a); - let fully_active_b = MergeKind::FullyActive(meta, stake_b); - let new_stake = fully_active_a - .merge(&invoke_context, fully_active_b, &clock) - .unwrap() - .unwrap() - .stake() - .unwrap(); - assert_eq!(new_stake.credits_observed, credits_a); - assert_eq!(new_stake.delegation.stake, delegation_a + delegation_b); - - // activating stake merge, unmatched credits observed - let credits_b = 125_124_521u64; - let stake_b = Stake { - delegation: Delegation { - stake: delegation_b, - ..Delegation::default() - }, - credits_observed: credits_b, - }; - let activation_epoch_a = MergeKind::ActivationEpoch(meta, stake_a, StakeFlags::empty()); - let activation_epoch_b = MergeKind::ActivationEpoch(meta, stake_b, StakeFlags::empty()); - let new_stake = activation_epoch_a - .merge(&invoke_context, activation_epoch_b, &clock) - .unwrap() - .unwrap() - .stake() - .unwrap(); - assert_eq!( - new_stake.credits_observed, - (credits_a * delegation_a + credits_b * (delegation_b + rent_exempt_reserve)) - / (delegation_a + delegation_b + rent_exempt_reserve) - + 1 - ); - assert_eq!( - new_stake.delegation.stake, - delegation_a + delegation_b + rent_exempt_reserve - ); - - // active stake merge, unmatched credits observed - let fully_active_a = MergeKind::FullyActive(meta, stake_a); - let fully_active_b = MergeKind::FullyActive(meta, stake_b); - let new_stake = fully_active_a - .merge(&invoke_context, fully_active_b, &clock) - .unwrap() - .unwrap() - .stake() - .unwrap(); - assert_eq!( - new_stake.credits_observed, - (credits_a * delegation_a + credits_b * delegation_b) / (delegation_a + delegation_b) - + 1 - ); - assert_eq!(new_stake.delegation.stake, delegation_a + delegation_b); - - // active stake merge, unmatched credits observed, no need to ceiling the calculation - let delegation = 1_000_000u64; - let credits_a = 200_000_000u64; - let credits_b = 100_000_000u64; - let rent_exempt_reserve = 227_000_000u64; - let meta = Meta { - rent_exempt_reserve, - ..Meta::default() - }; - let stake_a = Stake { - delegation: Delegation { - stake: delegation, - ..Delegation::default() - }, - credits_observed: credits_a, - }; - let stake_b = Stake { - delegation: Delegation { - stake: delegation, - ..Delegation::default() - }, - credits_observed: credits_b, - }; - let fully_active_a = MergeKind::FullyActive(meta, stake_a); - let fully_active_b = MergeKind::FullyActive(meta, stake_b); - let new_stake = fully_active_a - .merge(&invoke_context, fully_active_b, &clock) - .unwrap() - .unwrap() - .stake() - .unwrap(); - assert_eq!( - new_stake.credits_observed, - (credits_a * delegation + credits_b * delegation) / (delegation + delegation) - ); - assert_eq!(new_stake.delegation.stake, delegation * 2); - } - - prop_compose! { - pub fn sum_within(max: u64)(total in 1..max) - (intermediate in 1..total, total in Just(total)) - -> (u64, u64) { - (intermediate, total - intermediate) - } - } - - proptest! { - #[test] - fn test_stake_weighted_credits_observed( - (credits_a, credits_b) in sum_within(u64::MAX), - (delegation_a, delegation_b) in sum_within(u64::MAX), - ) { - let stake = Stake { - delegation: Delegation { - stake: delegation_a, - ..Delegation::default() - }, - credits_observed: credits_a - }; - let credits_observed = stake_weighted_credits_observed( - &stake, - delegation_b, - credits_b, - ).unwrap(); - - // calculated credits observed should always be between the credits of a and b - if credits_a < credits_b { - assert!(credits_a < credits_observed); - assert!(credits_observed <= credits_b); - } else { - assert!(credits_b <= credits_observed); - assert!(credits_observed <= credits_a); - } - - // the difference of the combined weighted credits and the separate weighted credits - // should be 1 or 0 - let weighted_credits_total = credits_observed as u128 * (delegation_a + delegation_b) as u128; - let weighted_credits_a = credits_a as u128 * delegation_a as u128; - let weighted_credits_b = credits_b as u128 * delegation_b as u128; - let raw_diff = weighted_credits_total - (weighted_credits_a + weighted_credits_b); - let credits_observed_diff = raw_diff / (delegation_a + delegation_b) as u128; - assert!(credits_observed_diff <= 1); - } - } -} diff --git a/programs/system/Cargo.toml b/programs/system/Cargo.toml index efdbb17443c831..8526437ba83102 100644 --- a/programs/system/Cargo.toml +++ b/programs/system/Cargo.toml @@ -25,17 +25,17 @@ solana-account = { workspace = true } solana-bincode = { workspace = true } solana-fee-calculator = { workspace = true } solana-instruction = { workspace = true } -solana-log-collector = { workspace = true } solana-nonce = { workspace = true, features = ["serde"] } solana-nonce-account = { workspace = true } solana-packet = { workspace = true } solana-program-runtime = { workspace = true } solana-pubkey = { workspace = true, features = ["sha2"] } solana-sdk-ids = { workspace = true } +solana-svm-log-collector = { workspace = true } +solana-svm-type-overrides = { workspace = true } solana-system-interface = { workspace = true, features = ["serde"] } solana-sysvar = { workspace = true } solana-transaction-context = { workspace = true, features = ["bincode"] } -solana-type-overrides = { workspace = true } [dev-dependencies] agave-feature-set = { workspace = true } diff --git a/programs/system/benches/system.rs b/programs/system/benches/system.rs index 3d952f069f8d90..8a8cd593072be7 100644 --- a/programs/system/benches/system.rs +++ b/programs/system/benches/system.rs @@ -460,7 +460,7 @@ impl TestSetup { fn run(&self) { mock_process_instruction( &solana_system_program::id(), - Vec::new(), + None, &self.instruction_data, self.transaction_accounts.clone(), self.instruction_accounts.clone(), diff --git a/programs/system/src/system_instruction.rs b/programs/system/src/system_instruction.rs index fc2fea06cb4e27..f1c1f973403e23 100644 --- a/programs/system/src/system_instruction.rs +++ b/programs/system/src/system_instruction.rs @@ -1,6 +1,5 @@ use { solana_instruction::error::InstructionError, - solana_log_collector::ic_msg, solana_nonce::{ self as nonce, state::{DurableNonce, State}, @@ -8,11 +7,10 @@ use { }, solana_program_runtime::invoke_context::InvokeContext, solana_pubkey::Pubkey, + solana_svm_log_collector::ic_msg, solana_system_interface::error::SystemError, solana_sysvar::rent::Rent, - solana_transaction_context::{ - BorrowedAccount, IndexOfAccount, InstructionContext, TransactionContext, - }, + solana_transaction_context::{BorrowedInstructionAccount, IndexOfAccount, InstructionContext}, std::collections::HashSet, }; @@ -22,7 +20,7 @@ fn checked_add(a: u64, b: u64) -> Result { } pub fn advance_nonce_account( - account: &mut BorrowedAccount, + account: &mut BorrowedInstructionAccount, signers: &HashSet, invoke_context: &InvokeContext, ) -> Result<(), InstructionError> { @@ -83,11 +81,9 @@ pub(crate) fn withdraw_nonce_account( rent: &Rent, signers: &HashSet, invoke_context: &InvokeContext, - transaction_context: &TransactionContext, instruction_context: &InstructionContext, ) -> Result<(), InstructionError> { - let mut from = instruction_context - .try_borrow_instruction_account(transaction_context, from_account_index)?; + let mut from = instruction_context.try_borrow_instruction_account(from_account_index)?; if !from.is_writable() { ic_msg!( invoke_context, @@ -151,15 +147,14 @@ pub(crate) fn withdraw_nonce_account( from.checked_sub_lamports(lamports)?; drop(from); - let mut to = instruction_context - .try_borrow_instruction_account(transaction_context, to_account_index)?; + let mut to = instruction_context.try_borrow_instruction_account(to_account_index)?; to.checked_add_lamports(lamports)?; Ok(()) } pub(crate) fn initialize_nonce_account( - account: &mut BorrowedAccount, + account: &mut BorrowedInstructionAccount, nonce_authority: &Pubkey, rent: &Rent, invoke_context: &InvokeContext, @@ -209,7 +204,7 @@ pub(crate) fn initialize_nonce_account( } pub(crate) fn authorize_nonce_account( - account: &mut BorrowedAccount, + account: &mut BorrowedInstructionAccount, nonce_authority: &Pubkey, signers: &HashSet, invoke_context: &InvokeContext, @@ -267,9 +262,8 @@ mod test { ($invoke_context:expr, $transaction_context:ident, $instruction_context:ident, $instruction_accounts:ident) => { $invoke_context .transaction_context - .get_next_instruction_context_mut() - .unwrap() - .configure(vec![2], $instruction_accounts, &[]); + .configure_next_instruction_for_tests(2, $instruction_accounts, &[]) + .unwrap(); $invoke_context.push().unwrap(); let $transaction_context = &$invoke_context.transaction_context; let $instruction_context = $transaction_context @@ -294,8 +288,8 @@ mod test { (system_program::id(), AccountSharedData::default()), ]; let $instruction_accounts = vec![ - InstructionAccount::new(0, 0, true, true), - InstructionAccount::new(1, 1, false, true), + InstructionAccount::new(0, true, true), + InstructionAccount::new(1, false, true), ]; with_mock_invoke_context!($invoke_context, transaction_context, transaction_accounts); }; @@ -326,7 +320,7 @@ mod test { instruction_accounts ); let mut nonce_account = instruction_context - .try_borrow_instruction_account(transaction_context, NONCE_ACCOUNT_INDEX) + .try_borrow_instruction_account(NONCE_ACCOUNT_INDEX) .unwrap(); let data = nonce::state::Data { authority: *nonce_account.get_key(), @@ -377,7 +371,7 @@ mod test { set_invoke_context_blockhash!(invoke_context, 0); let to_account = instruction_context - .try_borrow_instruction_account(transaction_context, WITHDRAW_TO_ACCOUNT_INDEX) + .try_borrow_instruction_account(WITHDRAW_TO_ACCOUNT_INDEX) .unwrap(); let withdraw_lamports = nonce_account.get_lamports(); let expect_nonce_lamports = nonce_account.get_lamports() - withdraw_lamports; @@ -391,15 +385,14 @@ mod test { &rent, &signers, &invoke_context, - transaction_context, - instruction_context, + &instruction_context, ) .unwrap(); let nonce_account = instruction_context - .try_borrow_instruction_account(transaction_context, NONCE_ACCOUNT_INDEX) + .try_borrow_instruction_account(NONCE_ACCOUNT_INDEX) .unwrap(); let to_account = instruction_context - .try_borrow_instruction_account(transaction_context, WITHDRAW_TO_ACCOUNT_INDEX) + .try_borrow_instruction_account(WITHDRAW_TO_ACCOUNT_INDEX) .unwrap(); // Empties Account balance assert_eq!(nonce_account.get_lamports(), expect_nonce_lamports); @@ -420,7 +413,7 @@ mod test { instruction_accounts ); let mut nonce_account = instruction_context - .try_borrow_instruction_account(transaction_context, NONCE_ACCOUNT_INDEX) + .try_borrow_instruction_account(NONCE_ACCOUNT_INDEX) .unwrap(); set_invoke_context_blockhash!(invoke_context, 31); let authority = *nonce_account.get_key(); @@ -451,7 +444,7 @@ mod test { instruction_accounts ); let mut nonce_account = instruction_context - .try_borrow_instruction_account(transaction_context, NONCE_ACCOUNT_INDEX) + .try_borrow_instruction_account(NONCE_ACCOUNT_INDEX) .unwrap(); let mut signers = HashSet::new(); signers.insert(*nonce_account.get_key()); @@ -472,7 +465,7 @@ mod test { instruction_accounts ); let mut nonce_account = instruction_context - .try_borrow_instruction_account(transaction_context, NONCE_ACCOUNT_INDEX) + .try_borrow_instruction_account(NONCE_ACCOUNT_INDEX) .unwrap(); let mut signers = HashSet::new(); signers.insert(*nonce_account.get_key()); @@ -491,10 +484,10 @@ mod test { instruction_accounts ); let mut nonce_account = instruction_context - .try_borrow_instruction_account(transaction_context, NONCE_ACCOUNT_INDEX) + .try_borrow_instruction_account(NONCE_ACCOUNT_INDEX) .unwrap(); let nonce_authority = instruction_context - .try_borrow_instruction_account(transaction_context, NONCE_ACCOUNT_INDEX + 1) + .try_borrow_instruction_account(NONCE_ACCOUNT_INDEX + 1) .unwrap(); let mut signers = HashSet::new(); signers.insert(*nonce_account.get_key()); @@ -518,10 +511,10 @@ mod test { instruction_accounts ); let mut nonce_account = instruction_context - .try_borrow_instruction_account(transaction_context, NONCE_ACCOUNT_INDEX) + .try_borrow_instruction_account(NONCE_ACCOUNT_INDEX) .unwrap(); let nonce_authority = instruction_context - .try_borrow_instruction_account(transaction_context, NONCE_ACCOUNT_INDEX + 1) + .try_borrow_instruction_account(NONCE_ACCOUNT_INDEX + 1) .unwrap(); let mut signers = HashSet::new(); signers.insert(*nonce_account.get_key()); @@ -542,10 +535,10 @@ mod test { instruction_accounts ); let nonce_account = instruction_context - .try_borrow_instruction_account(transaction_context, NONCE_ACCOUNT_INDEX) + .try_borrow_instruction_account(NONCE_ACCOUNT_INDEX) .unwrap(); let to_account = instruction_context - .try_borrow_instruction_account(transaction_context, WITHDRAW_TO_ACCOUNT_INDEX) + .try_borrow_instruction_account(WITHDRAW_TO_ACCOUNT_INDEX) .unwrap(); let versions = nonce_account.get_state::().unwrap(); assert_eq!(versions.state(), &State::Uninitialized); @@ -564,15 +557,14 @@ mod test { &rent, &signers, &invoke_context, - transaction_context, - instruction_context, + &instruction_context, ) .unwrap(); let nonce_account = instruction_context - .try_borrow_instruction_account(transaction_context, NONCE_ACCOUNT_INDEX) + .try_borrow_instruction_account(NONCE_ACCOUNT_INDEX) .unwrap(); let to_account = instruction_context - .try_borrow_instruction_account(transaction_context, WITHDRAW_TO_ACCOUNT_INDEX) + .try_borrow_instruction_account(WITHDRAW_TO_ACCOUNT_INDEX) .unwrap(); let versions = nonce_account.get_state::().unwrap(); assert_eq!(versions.state(), &State::Uninitialized); @@ -590,10 +582,10 @@ mod test { instruction_accounts ); let nonce_account = instruction_context - .try_borrow_instruction_account(transaction_context, NONCE_ACCOUNT_INDEX) + .try_borrow_instruction_account(NONCE_ACCOUNT_INDEX) .unwrap(); let to_account = instruction_context - .try_borrow_instruction_account(transaction_context, WITHDRAW_TO_ACCOUNT_INDEX) + .try_borrow_instruction_account(WITHDRAW_TO_ACCOUNT_INDEX) .unwrap(); let versions = nonce_account.get_state::().unwrap(); assert_eq!(versions.state(), &State::Uninitialized); @@ -609,8 +601,7 @@ mod test { &rent, &signers, &invoke_context, - transaction_context, - instruction_context, + &instruction_context, ); assert_eq!(result, Err(InstructionError::MissingRequiredSignature)); } @@ -625,7 +616,7 @@ mod test { instruction_accounts ); let nonce_account = instruction_context - .try_borrow_instruction_account(transaction_context, NONCE_ACCOUNT_INDEX) + .try_borrow_instruction_account(NONCE_ACCOUNT_INDEX) .unwrap(); let versions = nonce_account.get_state::().unwrap(); assert_eq!(versions.state(), &State::Uninitialized); @@ -641,8 +632,7 @@ mod test { &rent, &signers, &invoke_context, - transaction_context, - instruction_context, + &instruction_context, ); assert_eq!(result, Err(InstructionError::InsufficientFunds)); } @@ -657,10 +647,10 @@ mod test { instruction_accounts ); let nonce_account = instruction_context - .try_borrow_instruction_account(transaction_context, NONCE_ACCOUNT_INDEX) + .try_borrow_instruction_account(NONCE_ACCOUNT_INDEX) .unwrap(); let to_account = instruction_context - .try_borrow_instruction_account(transaction_context, WITHDRAW_TO_ACCOUNT_INDEX) + .try_borrow_instruction_account(WITHDRAW_TO_ACCOUNT_INDEX) .unwrap(); let mut signers = HashSet::new(); signers.insert(*nonce_account.get_key()); @@ -677,15 +667,14 @@ mod test { &rent, &signers, &invoke_context, - transaction_context, - instruction_context, + &instruction_context, ) .unwrap(); let nonce_account = instruction_context - .try_borrow_instruction_account(transaction_context, NONCE_ACCOUNT_INDEX) + .try_borrow_instruction_account(NONCE_ACCOUNT_INDEX) .unwrap(); let to_account = instruction_context - .try_borrow_instruction_account(transaction_context, WITHDRAW_TO_ACCOUNT_INDEX) + .try_borrow_instruction_account(WITHDRAW_TO_ACCOUNT_INDEX) .unwrap(); let versions = nonce_account.get_state::().unwrap(); assert_eq!(versions.state(), &State::Uninitialized); @@ -703,15 +692,14 @@ mod test { &rent, &signers, &invoke_context, - transaction_context, - instruction_context, + &instruction_context, ) .unwrap(); let nonce_account = instruction_context - .try_borrow_instruction_account(transaction_context, NONCE_ACCOUNT_INDEX) + .try_borrow_instruction_account(NONCE_ACCOUNT_INDEX) .unwrap(); let to_account = instruction_context - .try_borrow_instruction_account(transaction_context, WITHDRAW_TO_ACCOUNT_INDEX) + .try_borrow_instruction_account(WITHDRAW_TO_ACCOUNT_INDEX) .unwrap(); let versions = nonce_account.get_state::().unwrap(); assert_eq!(versions.state(), &State::Uninitialized); @@ -729,10 +717,10 @@ mod test { instruction_accounts ); let mut nonce_account = instruction_context - .try_borrow_instruction_account(transaction_context, NONCE_ACCOUNT_INDEX) + .try_borrow_instruction_account(NONCE_ACCOUNT_INDEX) .unwrap(); let to_account = instruction_context - .try_borrow_instruction_account(transaction_context, WITHDRAW_TO_ACCOUNT_INDEX) + .try_borrow_instruction_account(WITHDRAW_TO_ACCOUNT_INDEX) .unwrap(); let mut signers = HashSet::new(); signers.insert(*nonce_account.get_key()); @@ -760,15 +748,14 @@ mod test { &rent, &signers, &invoke_context, - transaction_context, - instruction_context, + &instruction_context, ) .unwrap(); let nonce_account = instruction_context - .try_borrow_instruction_account(transaction_context, NONCE_ACCOUNT_INDEX) + .try_borrow_instruction_account(NONCE_ACCOUNT_INDEX) .unwrap(); let to_account = instruction_context - .try_borrow_instruction_account(transaction_context, WITHDRAW_TO_ACCOUNT_INDEX) + .try_borrow_instruction_account(WITHDRAW_TO_ACCOUNT_INDEX) .unwrap(); let versions = nonce_account.get_state::().unwrap(); let data = nonce::state::Data::new( @@ -794,15 +781,14 @@ mod test { &rent, &signers, &invoke_context, - transaction_context, - instruction_context, + &instruction_context, ) .unwrap(); let nonce_account = instruction_context - .try_borrow_instruction_account(transaction_context, NONCE_ACCOUNT_INDEX) + .try_borrow_instruction_account(NONCE_ACCOUNT_INDEX) .unwrap(); let to_account = instruction_context - .try_borrow_instruction_account(transaction_context, WITHDRAW_TO_ACCOUNT_INDEX) + .try_borrow_instruction_account(WITHDRAW_TO_ACCOUNT_INDEX) .unwrap(); let versions = nonce_account.get_state::().unwrap(); assert_eq!(versions.state(), &State::Uninitialized); @@ -820,10 +806,10 @@ mod test { instruction_accounts ); let mut nonce_account = instruction_context - .try_borrow_instruction_account(transaction_context, NONCE_ACCOUNT_INDEX) + .try_borrow_instruction_account(NONCE_ACCOUNT_INDEX) .unwrap(); let to_account = instruction_context - .try_borrow_instruction_account(transaction_context, WITHDRAW_TO_ACCOUNT_INDEX) + .try_borrow_instruction_account(WITHDRAW_TO_ACCOUNT_INDEX) .unwrap(); set_invoke_context_blockhash!(invoke_context, 0); let authorized = *nonce_account.get_key(); @@ -840,8 +826,7 @@ mod test { &rent, &signers, &invoke_context, - transaction_context, - instruction_context, + &instruction_context, ); assert_eq!(result, Err(SystemError::NonceBlockhashNotExpired.into())); } @@ -856,7 +841,7 @@ mod test { instruction_accounts ); let mut nonce_account = instruction_context - .try_borrow_instruction_account(transaction_context, NONCE_ACCOUNT_INDEX) + .try_borrow_instruction_account(NONCE_ACCOUNT_INDEX) .unwrap(); set_invoke_context_blockhash!(invoke_context, 95); let authorized = *nonce_account.get_key(); @@ -873,8 +858,7 @@ mod test { &rent, &signers, &invoke_context, - transaction_context, - instruction_context, + &instruction_context, ); assert_eq!(result, Err(InstructionError::InsufficientFunds)); } @@ -889,7 +873,7 @@ mod test { instruction_accounts ); let mut nonce_account = instruction_context - .try_borrow_instruction_account(transaction_context, NONCE_ACCOUNT_INDEX) + .try_borrow_instruction_account(NONCE_ACCOUNT_INDEX) .unwrap(); set_invoke_context_blockhash!(invoke_context, 95); let authorized = *nonce_account.get_key(); @@ -906,8 +890,7 @@ mod test { &rent, &signers, &invoke_context, - transaction_context, - instruction_context, + &instruction_context, ); assert_eq!(result, Err(InstructionError::InsufficientFunds)); } @@ -922,7 +905,7 @@ mod test { instruction_accounts ); let mut nonce_account = instruction_context - .try_borrow_instruction_account(transaction_context, NONCE_ACCOUNT_INDEX) + .try_borrow_instruction_account(NONCE_ACCOUNT_INDEX) .unwrap(); set_invoke_context_blockhash!(invoke_context, 95); let authorized = *nonce_account.get_key(); @@ -939,8 +922,7 @@ mod test { &rent, &signers, &invoke_context, - transaction_context, - instruction_context, + &instruction_context, ); assert_eq!(result, Err(InstructionError::InsufficientFunds)); } @@ -955,7 +937,7 @@ mod test { instruction_accounts ); let mut nonce_account = instruction_context - .try_borrow_instruction_account(transaction_context, NONCE_ACCOUNT_INDEX) + .try_borrow_instruction_account(NONCE_ACCOUNT_INDEX) .unwrap(); let versions = nonce_account.get_state::().unwrap(); assert_eq!(versions.state(), &State::Uninitialized); @@ -987,7 +969,7 @@ mod test { instruction_accounts ); let mut nonce_account = instruction_context - .try_borrow_instruction_account(transaction_context, NONCE_ACCOUNT_INDEX) + .try_borrow_instruction_account(NONCE_ACCOUNT_INDEX) .unwrap(); set_invoke_context_blockhash!(invoke_context, 31); let authorized = *nonce_account.get_key(); @@ -1008,7 +990,7 @@ mod test { instruction_accounts ); let mut nonce_account = instruction_context - .try_borrow_instruction_account(transaction_context, NONCE_ACCOUNT_INDEX) + .try_borrow_instruction_account(NONCE_ACCOUNT_INDEX) .unwrap(); nonce_account.checked_sub_lamports(42 * 2).unwrap(); set_invoke_context_blockhash!(invoke_context, 63); @@ -1028,7 +1010,7 @@ mod test { instruction_accounts ); let mut nonce_account = instruction_context - .try_borrow_instruction_account(transaction_context, NONCE_ACCOUNT_INDEX) + .try_borrow_instruction_account(NONCE_ACCOUNT_INDEX) .unwrap(); let mut signers = HashSet::new(); signers.insert(*nonce_account.get_key()); @@ -1058,7 +1040,7 @@ mod test { instruction_accounts ); let mut nonce_account = instruction_context - .try_borrow_instruction_account(transaction_context, NONCE_ACCOUNT_INDEX) + .try_borrow_instruction_account(NONCE_ACCOUNT_INDEX) .unwrap(); let mut signers = HashSet::new(); signers.insert(*nonce_account.get_key()); @@ -1081,7 +1063,7 @@ mod test { instruction_accounts ); let mut nonce_account = instruction_context - .try_borrow_instruction_account(transaction_context, NONCE_ACCOUNT_INDEX) + .try_borrow_instruction_account(NONCE_ACCOUNT_INDEX) .unwrap(); let mut signers = HashSet::new(); signers.insert(*nonce_account.get_key()); @@ -1103,7 +1085,7 @@ mod test { instruction_accounts ); let mut nonce_account = instruction_context - .try_borrow_instruction_account(transaction_context, NONCE_ACCOUNT_INDEX) + .try_borrow_instruction_account(NONCE_ACCOUNT_INDEX) .unwrap(); let mut signers = HashSet::new(); signers.insert(nonce_account.get_key()); @@ -1158,7 +1140,7 @@ mod test { instruction_accounts ); let mut nonce_account = instruction_context - .try_borrow_instruction_account(transaction_context, NONCE_ACCOUNT_INDEX) + .try_borrow_instruction_account(NONCE_ACCOUNT_INDEX) .unwrap(); let mut signers = HashSet::new(); signers.insert(nonce_account.get_key()); diff --git a/programs/system/src/system_processor.rs b/programs/system/src/system_processor.rs index 8d968055da625d..ce65334ffe7ad6 100644 --- a/programs/system/src/system_processor.rs +++ b/programs/system/src/system_processor.rs @@ -6,7 +6,6 @@ use { log::*, solana_bincode::limited_deserialize, solana_instruction::error::InstructionError, - solana_log_collector::ic_msg, solana_nonce as nonce, solana_program_runtime::{ declare_process_instruction, invoke_context::InvokeContext, @@ -14,12 +13,11 @@ use { }, solana_pubkey::Pubkey, solana_sdk_ids::system_program, + solana_svm_log_collector::ic_msg, solana_system_interface::{ error::SystemError, instruction::SystemInstruction, MAX_PERMITTED_DATA_LENGTH, }, - solana_transaction_context::{ - BorrowedAccount, IndexOfAccount, InstructionContext, TransactionContext, - }, + solana_transaction_context::{BorrowedInstructionAccount, IndexOfAccount, InstructionContext}, std::collections::HashSet, }; @@ -45,7 +43,10 @@ impl Address { invoke_context: &InvokeContext, ) -> Result { let base = if let Some((base, seed, owner)) = with_seed { - let address_with_seed = Pubkey::create_with_seed(base, seed, owner)?; + // The conversion from `PubkeyError` to `InstructionError` through + // num-traits is incorrect, but it's the existing behavior. + let address_with_seed = + Pubkey::create_with_seed(base, seed, owner).map_err(|e| e as u64)?; // re-derive the address, must match the supplied address if *address != address_with_seed { ic_msg!( @@ -69,7 +70,7 @@ impl Address { } fn allocate( - account: &mut BorrowedAccount, + account: &mut BorrowedInstructionAccount, address: &Address, space: u64, signers: &HashSet, @@ -111,7 +112,7 @@ fn allocate( } fn assign( - account: &mut BorrowedAccount, + account: &mut BorrowedInstructionAccount, address: &Address, owner: &Pubkey, signers: &HashSet, @@ -131,7 +132,7 @@ fn assign( } fn allocate_and_assign( - to: &mut BorrowedAccount, + to: &mut BorrowedInstructionAccount, to_address: &Address, space: u64, owner: &Pubkey, @@ -152,13 +153,11 @@ fn create_account( owner: &Pubkey, signers: &HashSet, invoke_context: &InvokeContext, - transaction_context: &TransactionContext, instruction_context: &InstructionContext, ) -> Result<(), InstructionError> { // if it looks like the `to` account is already in use, bail { - let mut to = instruction_context - .try_borrow_instruction_account(transaction_context, to_account_index)?; + let mut to = instruction_context.try_borrow_instruction_account(to_account_index)?; if to.get_lamports() > 0 { ic_msg!( invoke_context, @@ -175,7 +174,6 @@ fn create_account( to_account_index, lamports, invoke_context, - transaction_context, instruction_context, ) } @@ -185,11 +183,9 @@ fn transfer_verified( to_account_index: IndexOfAccount, lamports: u64, invoke_context: &InvokeContext, - transaction_context: &TransactionContext, instruction_context: &InstructionContext, ) -> Result<(), InstructionError> { - let mut from = instruction_context - .try_borrow_instruction_account(transaction_context, from_account_index)?; + let mut from = instruction_context.try_borrow_instruction_account(from_account_index)?; if !from.get_data().is_empty() { ic_msg!(invoke_context, "Transfer: `from` must not carry data"); return Err(InstructionError::InvalidArgument); @@ -206,8 +202,7 @@ fn transfer_verified( from.checked_sub_lamports(lamports)?; drop(from); - let mut to = instruction_context - .try_borrow_instruction_account(transaction_context, to_account_index)?; + let mut to = instruction_context.try_borrow_instruction_account(to_account_index)?; to.checked_add_lamports(lamports)?; Ok(()) } @@ -217,17 +212,13 @@ fn transfer( to_account_index: IndexOfAccount, lamports: u64, invoke_context: &InvokeContext, - transaction_context: &TransactionContext, instruction_context: &InstructionContext, ) -> Result<(), InstructionError> { if !instruction_context.is_instruction_account_signer(from_account_index)? { ic_msg!( invoke_context, "Transfer: `from` account {} must sign", - transaction_context.get_key_of_account_at_index( - instruction_context - .get_index_of_instruction_account_in_transaction(from_account_index)?, - )?, + instruction_context.get_key_of_instruction_account(from_account_index)?, ); return Err(InstructionError::MissingRequiredSignature); } @@ -237,7 +228,6 @@ fn transfer( to_account_index, lamports, invoke_context, - transaction_context, instruction_context, ) } @@ -250,32 +240,26 @@ fn transfer_with_seed( to_account_index: IndexOfAccount, lamports: u64, invoke_context: &InvokeContext, - transaction_context: &TransactionContext, instruction_context: &InstructionContext, ) -> Result<(), InstructionError> { if !instruction_context.is_instruction_account_signer(from_base_account_index)? { ic_msg!( invoke_context, "Transfer: 'from' account {:?} must sign", - transaction_context.get_key_of_account_at_index( - instruction_context - .get_index_of_instruction_account_in_transaction(from_base_account_index)?, - )?, + instruction_context.get_key_of_instruction_account(from_base_account_index,)?, ); return Err(InstructionError::MissingRequiredSignature); } + // The conversion from `PubkeyError` to `InstructionError` through + // num-traits is incorrect, but it's the existing behavior. let address_from_seed = Pubkey::create_with_seed( - transaction_context.get_key_of_account_at_index( - instruction_context - .get_index_of_instruction_account_in_transaction(from_base_account_index)?, - )?, + instruction_context.get_key_of_instruction_account(from_base_account_index)?, from_seed, from_owner, - )?; + ) + .map_err(|e| e as u64)?; - let from_key = transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(from_account_index)?, - )?; + let from_key = instruction_context.get_key_of_instruction_account(from_account_index)?; if *from_key != address_from_seed { ic_msg!( invoke_context, @@ -291,7 +275,6 @@ fn transfer_with_seed( to_account_index, lamports, invoke_context, - transaction_context, instruction_context, ) } @@ -307,7 +290,7 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| trace!("process_instruction: {instruction:?}"); - let signers = instruction_context.get_signers(transaction_context)?; + let signers = instruction_context.get_signers()?; match instruction { SystemInstruction::CreateAccount { lamports, @@ -316,9 +299,7 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| } => { instruction_context.check_number_of_instruction_accounts(2)?; let to_address = Address::create( - transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(1)?, - )?, + instruction_context.get_key_of_instruction_account(1)?, None, invoke_context, )?; @@ -331,8 +312,7 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| &owner, &signers, invoke_context, - transaction_context, - instruction_context, + &instruction_context, ) } SystemInstruction::CreateAccountWithSeed { @@ -344,9 +324,7 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| } => { instruction_context.check_number_of_instruction_accounts(2)?; let to_address = Address::create( - transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(1)?, - )?, + instruction_context.get_key_of_instruction_account(1)?, Some((&base, &seed, &owner)), invoke_context, )?; @@ -359,18 +337,14 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| &owner, &signers, invoke_context, - transaction_context, - instruction_context, + &instruction_context, ) } SystemInstruction::Assign { owner } => { instruction_context.check_number_of_instruction_accounts(1)?; - let mut account = - instruction_context.try_borrow_instruction_account(transaction_context, 0)?; + let mut account = instruction_context.try_borrow_instruction_account(0)?; let address = Address::create( - transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(0)?, - )?, + instruction_context.get_key_of_instruction_account(0)?, None, invoke_context, )?; @@ -378,14 +352,7 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| } SystemInstruction::Transfer { lamports } => { instruction_context.check_number_of_instruction_accounts(2)?; - transfer( - 0, - 1, - lamports, - invoke_context, - transaction_context, - instruction_context, - ) + transfer(0, 1, lamports, invoke_context, &instruction_context) } SystemInstruction::TransferWithSeed { lamports, @@ -401,18 +368,16 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| 2, lamports, invoke_context, - transaction_context, - instruction_context, + &instruction_context, ) } SystemInstruction::AdvanceNonceAccount => { instruction_context.check_number_of_instruction_accounts(1)?; - let mut me = - instruction_context.try_borrow_instruction_account(transaction_context, 0)?; + let mut me = instruction_context.try_borrow_instruction_account(0)?; #[allow(deprecated)] let recent_blockhashes = get_sysvar_with_account_check::recent_blockhashes( invoke_context, - instruction_context, + &instruction_context, 1, )?; if recent_blockhashes.is_empty() { @@ -429,10 +394,11 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| #[allow(deprecated)] let _recent_blockhashes = get_sysvar_with_account_check::recent_blockhashes( invoke_context, - instruction_context, + &instruction_context, 2, )?; - let rent = get_sysvar_with_account_check::rent(invoke_context, instruction_context, 3)?; + let rent = + get_sysvar_with_account_check::rent(invoke_context, &instruction_context, 3)?; withdraw_nonce_account( 0, lamports, @@ -440,18 +406,16 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| &rent, &signers, invoke_context, - transaction_context, - instruction_context, + &instruction_context, ) } SystemInstruction::InitializeNonceAccount(authorized) => { instruction_context.check_number_of_instruction_accounts(1)?; - let mut me = - instruction_context.try_borrow_instruction_account(transaction_context, 0)?; + let mut me = instruction_context.try_borrow_instruction_account(0)?; #[allow(deprecated)] let recent_blockhashes = get_sysvar_with_account_check::recent_blockhashes( invoke_context, - instruction_context, + &instruction_context, 1, )?; if recent_blockhashes.is_empty() { @@ -461,19 +425,18 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| ); return Err(SystemError::NonceNoRecentBlockhashes.into()); } - let rent = get_sysvar_with_account_check::rent(invoke_context, instruction_context, 2)?; + let rent = + get_sysvar_with_account_check::rent(invoke_context, &instruction_context, 2)?; initialize_nonce_account(&mut me, &authorized, &rent, invoke_context) } SystemInstruction::AuthorizeNonceAccount(nonce_authority) => { instruction_context.check_number_of_instruction_accounts(1)?; - let mut me = - instruction_context.try_borrow_instruction_account(transaction_context, 0)?; + let mut me = instruction_context.try_borrow_instruction_account(0)?; authorize_nonce_account(&mut me, &nonce_authority, &signers, invoke_context) } SystemInstruction::UpgradeNonceAccount => { instruction_context.check_number_of_instruction_accounts(1)?; - let mut nonce_account = - instruction_context.try_borrow_instruction_account(transaction_context, 0)?; + let mut nonce_account = instruction_context.try_borrow_instruction_account(0)?; if !system_program::check_id(nonce_account.get_owner()) { return Err(InstructionError::InvalidAccountOwner); } @@ -488,12 +451,9 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| } SystemInstruction::Allocate { space } => { instruction_context.check_number_of_instruction_accounts(1)?; - let mut account = - instruction_context.try_borrow_instruction_account(transaction_context, 0)?; + let mut account = instruction_context.try_borrow_instruction_account(0)?; let address = Address::create( - transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(0)?, - )?, + instruction_context.get_key_of_instruction_account(0)?, None, invoke_context, )?; @@ -506,12 +466,9 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| owner, } => { instruction_context.check_number_of_instruction_accounts(1)?; - let mut account = - instruction_context.try_borrow_instruction_account(transaction_context, 0)?; + let mut account = instruction_context.try_borrow_instruction_account(0)?; let address = Address::create( - transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(0)?, - )?, + instruction_context.get_key_of_instruction_account(0)?, Some((&base, &seed, &owner)), invoke_context, )?; @@ -526,12 +483,9 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| } SystemInstruction::AssignWithSeed { base, seed, owner } => { instruction_context.check_number_of_instruction_accounts(1)?; - let mut account = - instruction_context.try_borrow_instruction_account(transaction_context, 0)?; + let mut account = instruction_context.try_borrow_instruction_account(0)?; let address = Address::create( - transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(0)?, - )?, + instruction_context.get_key_of_instruction_account(0)?, Some((&base, &seed, &owner)), invoke_context, )?; @@ -592,7 +546,7 @@ mod tests { ) -> Vec { mock_process_instruction( &system_program::id(), - Vec::new(), + None, instruction_data, transaction_accounts, instruction_accounts, @@ -1280,7 +1234,7 @@ mod tests { &data, Vec::new(), Vec::new(), - Err(InstructionError::NotEnoughAccountKeys), + Err(InstructionError::MissingAccount), ); // Attempt to transfer with no destination @@ -1296,7 +1250,7 @@ mod tests { is_signer: true, is_writable: false, }], - Err(InstructionError::NotEnoughAccountKeys), + Err(InstructionError::MissingAccount), ); } @@ -1530,7 +1484,7 @@ mod tests { &serialize(&SystemInstruction::AdvanceNonceAccount).unwrap(), Vec::new(), Vec::new(), - Err(InstructionError::NotEnoughAccountKeys), + Err(InstructionError::MissingAccount), ); } @@ -1545,7 +1499,7 @@ mod tests { is_signer: true, is_writable: true, }], - Err(InstructionError::NotEnoughAccountKeys), + Err(InstructionError::MissingAccount), ); } @@ -1589,7 +1543,7 @@ mod tests { ]); mock_process_instruction( &system_program::id(), - Vec::new(), + None, &serialize(&SystemInstruction::AdvanceNonceAccount).unwrap(), vec![ (nonce_address, accounts[0].clone()), @@ -1636,7 +1590,7 @@ mod tests { &serialize(&SystemInstruction::WithdrawNonceAccount(42)).unwrap(), Vec::new(), Vec::new(), - Err(InstructionError::NotEnoughAccountKeys), + Err(InstructionError::MissingAccount), ); } @@ -1651,7 +1605,7 @@ mod tests { is_signer: true, is_writable: true, }], - Err(InstructionError::NotEnoughAccountKeys), + Err(InstructionError::MissingAccount), ); } @@ -1702,7 +1656,7 @@ mod tests { &serialize(&SystemInstruction::InitializeNonceAccount(Pubkey::default())).unwrap(), Vec::new(), Vec::new(), - Err(InstructionError::NotEnoughAccountKeys), + Err(InstructionError::MissingAccount), ); } @@ -1718,7 +1672,7 @@ mod tests { is_signer: true, is_writable: true, }], - Err(InstructionError::NotEnoughAccountKeys), + Err(InstructionError::MissingAccount), ); } @@ -1885,7 +1839,7 @@ mod tests { let new_recent_blockhashes_account = create_recent_blockhashes_account_for_test(vec![]); mock_process_instruction( &system_program::id(), - Vec::new(), + None, &serialize(&SystemInstruction::AdvanceNonceAccount).unwrap(), vec![ (nonce_address, accounts[0].clone()), diff --git a/programs/vote/benches/process_vote.rs b/programs/vote/benches/process_vote.rs index afdf6b4ac62dc6..aeb272f9335a61 100644 --- a/programs/vote/benches/process_vote.rs +++ b/programs/vote/benches/process_vote.rs @@ -14,11 +14,11 @@ use { solana_pubkey::Pubkey, solana_sdk_ids::sysvar, solana_slot_hashes::{SlotHashes, MAX_ENTRIES}, - solana_transaction_context::TransactionAccount, + solana_transaction_context::transaction_accounts::TransactionAccount, solana_vote_program::{ vote_instruction::VoteInstruction, vote_state::{ - TowerSync, Vote, VoteInit, VoteState, VoteStateUpdate, VoteStateVersions, + TowerSync, Vote, VoteInit, VoteStateUpdate, VoteStateV3, VoteStateVersions, MAX_LOCKOUT_HISTORY, }, }, @@ -39,7 +39,7 @@ fn create_accounts() -> (Slot, SlotHashes, Vec, Vec (Slot, SlotHashes, Vec, Vec = vec![0; VoteState::size_of()]; - let versioned = VoteStateVersions::new_current(vote_state); - VoteState::serialize(&versioned, &mut vote_account_data).unwrap(); + let mut vote_account_data: Vec = vec![0; VoteStateV3::size_of()]; + let versioned = VoteStateVersions::new_v3(vote_state); + VoteStateV3::serialize(&versioned, &mut vote_account_data).unwrap(); Account { lamports: 1, @@ -107,7 +107,7 @@ fn bench_process_deprecated_vote_instruction( bencher.iter(|| { mock_process_instruction_with_feature_set( &solana_vote_program::id(), - Vec::new(), + None, &instruction_data, transaction_accounts.clone(), instruction_account_metas.clone(), @@ -129,7 +129,7 @@ fn bench_process_vote_instruction( bencher.iter(|| { mock_process_instruction( &solana_vote_program::id(), - Vec::new(), + None, &instruction_data, transaction_accounts.clone(), instruction_account_metas.clone(), diff --git a/programs/vote/benches/vote_instructions.rs b/programs/vote/benches/vote_instructions.rs index 92741648e8bef8..bb12ed10cb8e10 100644 --- a/programs/vote/benches/vote_instructions.rs +++ b/programs/vote/benches/vote_instructions.rs @@ -14,14 +14,14 @@ use { solana_rent::Rent, solana_sdk_ids::{sysvar, vote::id}, solana_slot_hashes::{SlotHashes, MAX_ENTRIES}, - solana_transaction_context::TransactionAccount, + solana_transaction_context::transaction_accounts::TransactionAccount, solana_vote_program::{ vote_instruction::VoteInstruction, vote_processor::Entrypoint, vote_state::{ create_account, create_account_with_authorized, TowerSync, Vote, VoteAuthorize, - VoteAuthorizeCheckedWithSeedArgs, VoteAuthorizeWithSeedArgs, VoteInit, VoteState, - VoteStateUpdate, VoteStateVersions, MAX_LOCKOUT_HISTORY, + VoteAuthorizeCheckedWithSeedArgs, VoteAuthorizeWithSeedArgs, VoteInit, VoteStateUpdate, + VoteStateV3, VoteStateVersions, MAX_LOCKOUT_HISTORY, }, }, }; @@ -48,7 +48,7 @@ fn create_accounts() -> (Slot, SlotHashes, Vec, Vec (Slot, SlotHashes, Vec, Vec = vec![0; VoteState::size_of()]; - let versioned = VoteStateVersions::new_current(vote_state); - VoteState::serialize(&versioned, &mut vote_account_data).unwrap(); + let mut vote_account_data: Vec = vec![0; VoteStateV3::size_of()]; + let versioned = VoteStateVersions::new_v3(vote_state); + VoteStateV3::serialize(&versioned, &mut vote_account_data).unwrap(); Account { lamports: 1, @@ -124,7 +124,7 @@ fn create_accounts() -> (Slot, SlotHashes, Vec, Vec (Pubkey, AccountSharedData) { let rent = Rent::default(); - let balance = VoteState::get_rent_exempt_reserve(&rent); + let balance = rent.minimum_balance(VoteStateV3::size_of()); let vote_pubkey = solana_pubkey::new_rand(); ( vote_pubkey, @@ -159,7 +159,7 @@ fn process_instruction( ) -> Vec { mock_process_instruction( &id(), - Vec::new(), + None, instruction_data, transaction_accounts, instruction_accounts, @@ -180,7 +180,7 @@ fn process_deprecated_instruction( deprecated_feature_set.deactivate(&deprecate_legacy_vote_ixs::id()); mock_process_instruction_with_feature_set( &id(), - Vec::new(), + None, instruction_data, transaction_accounts, instruction_accounts, @@ -256,7 +256,7 @@ struct BenchInitializeAccount { impl BenchInitializeAccount { fn new() -> Self { let vote_pubkey = solana_pubkey::new_rand(); - let vote_account = AccountSharedData::new(100, VoteState::size_of(), &id()); + let vote_account = AccountSharedData::new(100, VoteStateV3::size_of(), &id()); let node_pubkey = solana_pubkey::new_rand(); let node_account = AccountSharedData::default(); let instruction_data = serialize(&VoteInstruction::InitializeAccount(VoteInit { @@ -574,7 +574,7 @@ impl BenchAuthorizeChecked { fn new() -> Self { let vote_pubkey = Pubkey::new_unique(); let new_authorized_pubkey = Pubkey::new_unique(); - let vote_account = AccountSharedData::new(100, VoteState::size_of(), &id()); + let vote_account = AccountSharedData::new(100, VoteStateV3::size_of(), &id()); let clock_address = sysvar::clock::id(); let clock_account = account::create_account_shared_data_for_test(&Clock::default()); let default_authorized_pubkey = Pubkey::default(); diff --git a/programs/vote/src/vote_processor.rs b/programs/vote/src/vote_processor.rs index 823c4c39932324..e852ebdedcb741 100644 --- a/programs/vote/src/vote_processor.rs +++ b/programs/vote/src/vote_processor.rs @@ -10,7 +10,7 @@ use { sysvar_cache::get_sysvar_with_account_check, }, solana_pubkey::Pubkey, - solana_transaction_context::{BorrowedAccount, InstructionContext, TransactionContext}, + solana_transaction_context::{BorrowedInstructionAccount, InstructionContext}, solana_vote_interface::{instruction::VoteInstruction, program::id, state::VoteAuthorize}, std::collections::HashSet, }; @@ -18,8 +18,7 @@ use { fn process_authorize_with_seed_instruction( invoke_context: &InvokeContext, instruction_context: &InstructionContext, - transaction_context: &TransactionContext, - vote_account: &mut BorrowedAccount, + vote_account: &mut BorrowedInstructionAccount, new_authority: &Pubkey, authorization_type: VoteAuthorize, current_authority_derived_key_owner: &Pubkey, @@ -28,14 +27,17 @@ fn process_authorize_with_seed_instruction( let clock = get_sysvar_with_account_check::clock(invoke_context, instruction_context, 1)?; let mut expected_authority_keys: HashSet = HashSet::default(); if instruction_context.is_instruction_account_signer(2)? { - let base_pubkey = transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(2)?, - )?; - expected_authority_keys.insert(Pubkey::create_with_seed( - base_pubkey, - current_authority_derived_key_seed, - current_authority_derived_key_owner, - )?); + let base_pubkey = instruction_context.get_key_of_instruction_account(2)?; + // The conversion from `PubkeyError` to `InstructionError` through + // num-traits is incorrect, but it's the existing behavior. + expected_authority_keys.insert( + Pubkey::create_with_seed( + base_pubkey, + current_authority_derived_key_seed, + current_authority_derived_key_owner, + ) + .map_err(|e| e as u64)?, + ); }; vote_state::authorize( vote_account, @@ -57,33 +59,33 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| trace!("process_instruction: {data:?}"); - let mut me = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; + let mut me = instruction_context.try_borrow_instruction_account(0)?; if *me.get_owner() != id() { return Err(InstructionError::InvalidAccountOwner); } - let signers = instruction_context.get_signers(transaction_context)?; + let signers = instruction_context.get_signers()?; match limited_deserialize(data, solana_packet::PACKET_DATA_SIZE as u64)? { VoteInstruction::InitializeAccount(vote_init) => { - let rent = get_sysvar_with_account_check::rent(invoke_context, instruction_context, 1)?; + let rent = + get_sysvar_with_account_check::rent(invoke_context, &instruction_context, 1)?; if !rent.is_exempt(me.get_lamports(), me.get_data().len()) { return Err(InstructionError::InsufficientFunds); } let clock = - get_sysvar_with_account_check::clock(invoke_context, instruction_context, 2)?; + get_sysvar_with_account_check::clock(invoke_context, &instruction_context, 2)?; vote_state::initialize_account(&mut me, &vote_init, &signers, &clock) } VoteInstruction::Authorize(voter_pubkey, vote_authorize) => { let clock = - get_sysvar_with_account_check::clock(invoke_context, instruction_context, 1)?; + get_sysvar_with_account_check::clock(invoke_context, &instruction_context, 1)?; vote_state::authorize(&mut me, &voter_pubkey, vote_authorize, &signers, &clock) } VoteInstruction::AuthorizeWithSeed(args) => { instruction_context.check_number_of_instruction_accounts(3)?; process_authorize_with_seed_instruction( invoke_context, - instruction_context, - transaction_context, + &instruction_context, &mut me, &args.new_authority, args.authorization_type, @@ -93,16 +95,13 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| } VoteInstruction::AuthorizeCheckedWithSeed(args) => { instruction_context.check_number_of_instruction_accounts(4)?; - let new_authority = transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(3)?, - )?; + let new_authority = instruction_context.get_key_of_instruction_account(3)?; if !instruction_context.is_instruction_account_signer(3)? { return Err(InstructionError::MissingRequiredSignature); } process_authorize_with_seed_instruction( invoke_context, - instruction_context, - transaction_context, + &instruction_context, &mut me, new_authority, args.authorization_type, @@ -112,9 +111,7 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| } VoteInstruction::UpdateValidatorIdentity => { instruction_context.check_number_of_instruction_accounts(2)?; - let node_pubkey = transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(1)?, - )?; + let node_pubkey = instruction_context.get_key_of_instruction_account(1)?; vote_state::update_validator_identity(&mut me, node_pubkey, &signers) } VoteInstruction::UpdateCommission(commission) => { @@ -132,10 +129,13 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| if invoke_context.is_deprecate_legacy_vote_ixs_active() { return Err(InstructionError::InvalidInstructionData); } - let slot_hashes = - get_sysvar_with_account_check::slot_hashes(invoke_context, instruction_context, 1)?; + let slot_hashes = get_sysvar_with_account_check::slot_hashes( + invoke_context, + &instruction_context, + 1, + )?; let clock = - get_sysvar_with_account_check::clock(invoke_context, instruction_context, 2)?; + get_sysvar_with_account_check::clock(invoke_context, &instruction_context, 2)?; vote_state::process_vote_with_account(&mut me, &slot_hashes, &clock, &vote, &signers) } VoteInstruction::UpdateVoteState(vote_state_update) @@ -190,8 +190,7 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| drop(me); vote_state::withdraw( - transaction_context, - instruction_context, + &instruction_context, 0, lamports, 1, @@ -202,14 +201,12 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| } VoteInstruction::AuthorizeChecked(vote_authorize) => { instruction_context.check_number_of_instruction_accounts(4)?; - let voter_pubkey = transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(3)?, - )?; + let voter_pubkey = instruction_context.get_key_of_instruction_account(3)?; if !instruction_context.is_instruction_account_signer(3)? { return Err(InstructionError::MissingRequiredSignature); } let clock = - get_sysvar_with_account_check::clock(invoke_context, instruction_context, 1)?; + get_sysvar_with_account_check::clock(invoke_context, &instruction_context, 1)?; vote_state::authorize(&mut me, voter_pubkey, vote_authorize, &signers, &clock) } } @@ -229,7 +226,8 @@ mod tests { }, vote_state::{ self, Lockout, TowerSync, Vote, VoteAuthorize, VoteAuthorizeCheckedWithSeedArgs, - VoteAuthorizeWithSeedArgs, VoteInit, VoteState, VoteStateUpdate, VoteStateVersions, + VoteAuthorizeWithSeedArgs, VoteInit, VoteStateUpdate, VoteStateV3, + VoteStateVersions, }, }, bincode::serialize, @@ -272,7 +270,7 @@ mod tests { ) -> Vec { mock_process_instruction( &id(), - Vec::new(), + None, instruction_data, transaction_accounts, instruction_accounts, @@ -347,7 +345,7 @@ mod tests { fn create_test_account() -> (Pubkey, AccountSharedData) { let rent = Rent::default(); - let balance = VoteState::get_rent_exempt_reserve(&rent); + let balance = rent.minimum_balance(VoteStateV3::size_of()); let vote_pubkey = solana_pubkey::new_rand(); ( vote_pubkey, @@ -359,18 +357,27 @@ mod tests { let vote_pubkey = solana_pubkey::new_rand(); let authorized_voter = solana_pubkey::new_rand(); let authorized_withdrawer = solana_pubkey::new_rand(); + let account = + create_test_account_with_provided_authorized(&authorized_voter, &authorized_withdrawer); ( vote_pubkey, authorized_voter, authorized_withdrawer, - vote_state::create_account_with_authorized( - &solana_pubkey::new_rand(), - &authorized_voter, - &authorized_withdrawer, - 0, - 100, - ), + account, + ) + } + + fn create_test_account_with_provided_authorized( + authorized_voter: &Pubkey, + authorized_withdrawer: &Pubkey, + ) -> AccountSharedData { + vote_state::create_account_with_authorized( + &solana_pubkey::new_rand(), + authorized_voter, + authorized_withdrawer, + 0, + 100, ) } @@ -434,7 +441,7 @@ mod tests { let lamports = vote_account.lamports(); let mut vote_account_with_epoch_credits = AccountSharedData::new(lamports, vote_account_space, &id()); - let versioned = VoteStateVersions::new_current(vote_state); + let versioned = VoteStateVersions::new_v3(vote_state); vote_state::to(&versioned, &mut vote_account_with_epoch_credits); (vote_pubkey, vote_account_with_epoch_credits) @@ -473,14 +480,14 @@ mod tests { &[], Vec::new(), Vec::new(), - Err(InstructionError::NotEnoughAccountKeys), + Err(InstructionError::MissingAccount), ); } #[test] fn test_initialize_vote_account() { let vote_pubkey = solana_pubkey::new_rand(); - let vote_account = AccountSharedData::new(100, VoteState::size_of(), &id()); + let vote_account = AccountSharedData::new(100, VoteStateV3::size_of(), &id()); let node_pubkey = solana_pubkey::new_rand(); let node_account = AccountSharedData::default(); let instruction_data = serialize(&VoteInstruction::InitializeAccount(VoteInit { @@ -545,7 +552,7 @@ mod tests { vec![ ( vote_pubkey, - AccountSharedData::new(100, 2 * VoteState::size_of(), &id()), + AccountSharedData::new(100, 2 * VoteStateV3::size_of(), &id()), ), (sysvar::rent::id(), create_default_rent_account()), (sysvar::clock::id(), create_default_clock_account()), @@ -608,9 +615,7 @@ mod tests { Err(InstructionError::MissingRequiredSignature), ); instruction_accounts[1].is_signer = true; - let vote_state: VoteState = StateMut::::state(&accounts[0]) - .unwrap() - .convert_to_current(); + let vote_state = VoteStateV3::deserialize(accounts[0].data()).unwrap(); assert_ne!(vote_state.node_pubkey, node_pubkey); // should fail, authorized_withdrawer didn't sign the transaction @@ -622,9 +627,7 @@ mod tests { Err(InstructionError::MissingRequiredSignature), ); instruction_accounts[2].is_signer = true; - let vote_state: VoteState = StateMut::::state(&accounts[0]) - .unwrap() - .convert_to_current(); + let vote_state = VoteStateV3::deserialize(accounts[0].data()).unwrap(); assert_ne!(vote_state.node_pubkey, node_pubkey); // should pass @@ -634,9 +637,7 @@ mod tests { instruction_accounts, Ok(()), ); - let vote_state: VoteState = StateMut::::state(&accounts[0]) - .unwrap() - .convert_to_current(); + let vote_state = VoteStateV3::deserialize(accounts[0].data()).unwrap(); assert_eq!(vote_state.node_pubkey, node_pubkey); } @@ -678,9 +679,7 @@ mod tests { instruction_accounts.clone(), Ok(()), ); - let vote_state: VoteState = StateMut::::state(&accounts[0]) - .unwrap() - .convert_to_current(); + let vote_state = VoteStateV3::deserialize(accounts[0].data()).unwrap(); assert_eq!(vote_state.commission, u8::MAX); // should pass @@ -690,9 +689,7 @@ mod tests { instruction_accounts.clone(), Ok(()), ); - let vote_state: VoteState = StateMut::::state(&accounts[0]) - .unwrap() - .convert_to_current(); + let vote_state = VoteStateV3::deserialize(accounts[0].data()).unwrap(); assert_eq!(vote_state.commission, 42); // should fail, authorized_withdrawer didn't sign the transaction @@ -703,9 +700,7 @@ mod tests { instruction_accounts, Err(InstructionError::MissingRequiredSignature), ); - let vote_state: VoteState = StateMut::::state(&accounts[0]) - .unwrap() - .convert_to_current(); + let vote_state = VoteStateV3::deserialize(accounts[0].data()).unwrap(); assert_eq!(vote_state.commission, 0); } @@ -770,9 +765,7 @@ mod tests { }, ); if is_tower_sync { - let vote_state: VoteState = StateMut::::state(&accounts[0]) - .unwrap() - .convert_to_current(); + let vote_state = VoteStateV3::deserialize(accounts[0].data()).unwrap(); assert_eq!( vote_state.votes, vec![vote_state::LandedVote::from(Lockout::new( @@ -823,7 +816,7 @@ mod tests { transaction_accounts[1] = (sysvar::slot_hashes::id(), slot_hashes_account.clone()); // should fail, uninitialized - let vote_account = AccountSharedData::new(100, VoteState::size_of(), &id()); + let vote_account = AccountSharedData::new(100, VoteStateV3::size_of(), &id()); transaction_accounts[0] = (vote_pubkey, vote_account); process_instruction( &instruction_data, @@ -1738,7 +1731,10 @@ mod tests { commission: 0, }, 101, - CreateVoteAccountConfig::default(), + CreateVoteAccountConfig { + space: vote_state::VoteState1_14_11::size_of() as u64, + ..CreateVoteAccountConfig::default() + }, ); // grab the `space` value from SystemInstruction::CreateAccount by directly indexing, for // expediency @@ -1928,7 +1924,7 @@ mod tests { VoteAuthorize::Voter, ); instruction.accounts = instruction.accounts[0..2].to_vec(); - process_instruction_as_one_arg(&instruction, Err(InstructionError::NotEnoughAccountKeys)); + process_instruction_as_one_arg(&instruction, Err(InstructionError::MissingAccount)); let mut instruction = authorize_checked( &vote_pubkey, @@ -1937,7 +1933,7 @@ mod tests { VoteAuthorize::Withdrawer, ); instruction.accounts = instruction.accounts[0..2].to_vec(); - process_instruction_as_one_arg(&instruction, Err(InstructionError::NotEnoughAccountKeys)); + process_instruction_as_one_arg(&instruction, Err(InstructionError::MissingAccount)); // Test with non-signing new_authorized_pubkey let mut instruction = authorize_checked( @@ -1965,10 +1961,13 @@ mod tests { ); // Test with new_authorized_pubkey signer - let vote_account = AccountSharedData::new(100, VoteState::size_of(), &id()); + let default_authorized_pubkey = Pubkey::default(); + let vote_account = create_test_account_with_provided_authorized( + &default_authorized_pubkey, + &default_authorized_pubkey, + ); let clock_address = sysvar::clock::id(); let clock_account = account::create_account_shared_data_for_test(&Clock::default()); - let default_authorized_pubkey = Pubkey::default(); let authorized_account = create_default_account(); let new_authorized_account = create_default_account(); let transaction_accounts = vec![ diff --git a/programs/vote/src/vote_state/handler.rs b/programs/vote/src/vote_state/handler.rs new file mode 100644 index 00000000000000..79ab557c6368a0 --- /dev/null +++ b/programs/vote/src/vote_state/handler.rs @@ -0,0 +1,1683 @@ +//! Vote state handler API. +//! +//! Wraps the vote state behind a "handler" API to support converting from an +//! existing vote state version to whichever version is the target (or +//! "current") vote state version. +//! +//! The program must be generic over whichever vote state version is the +//! target, since at compile time the target version is not known (can be +//! changed with a feature gate). For this reason, the handler offers a +//! getter and setter API around vote state, for all operations required by the +//! vote program. + +use { + solana_clock::{Clock, Epoch, Slot, UnixTimestamp}, + solana_instruction::error::InstructionError, + solana_pubkey::Pubkey, + solana_transaction_context::BorrowedInstructionAccount, + solana_vote_interface::{ + authorized_voters::AuthorizedVoters, + error::VoteError, + state::{ + BlockTimestamp, LandedVote, Lockout, VoteInit, VoteState1_14_11, VoteStateV3, + VoteStateV4, VoteStateVersions, BLS_PUBLIC_KEY_COMPRESSED_SIZE, + MAX_EPOCH_CREDITS_HISTORY, MAX_LOCKOUT_HISTORY, VOTE_CREDITS_GRACE_SLOTS, + VOTE_CREDITS_MAXIMUM_PER_SLOT, + }, + }, + std::collections::VecDeque, +}; + +/// Trait defining the interface for vote state operations. +pub trait VoteStateHandle { + fn is_uninitialized(&self) -> bool; + + fn authorized_withdrawer(&self) -> &Pubkey; + + fn set_authorized_withdrawer(&mut self, authorized_withdrawer: Pubkey); + + fn authorized_voters(&self) -> &AuthorizedVoters; + + fn set_new_authorized_voter( + &mut self, + authorized_pubkey: &Pubkey, + current_epoch: Epoch, + target_epoch: Epoch, + verify: F, + ) -> Result<(), InstructionError> + where + F: Fn(Pubkey) -> Result<(), InstructionError>; + + fn get_and_update_authorized_voter( + &mut self, + current_epoch: Epoch, + ) -> Result; + + fn commission(&self) -> u8; + + fn set_commission(&mut self, commission: u8); + + fn node_pubkey(&self) -> &Pubkey; + + fn set_node_pubkey(&mut self, node_pubkey: Pubkey); + + fn votes(&self) -> &VecDeque; + + fn votes_mut(&mut self) -> &mut VecDeque; + + fn set_votes(&mut self, votes: VecDeque); + + /// Returns if the vote state contains a vote for the slot `candidate_slot` + fn contains_slot(&self, candidate_slot: Slot) -> bool; + + fn last_lockout(&self) -> Option<&Lockout>; + + fn last_voted_slot(&self) -> Option; + + fn root_slot(&self) -> Option; + + fn set_root_slot(&mut self, root_slot: Option); + + fn current_epoch(&self) -> Epoch; + + fn epoch_credits(&self) -> &Vec<(Epoch, u64, u64)>; + + fn epoch_credits_mut(&mut self) -> &mut Vec<(Epoch, u64, u64)>; + + fn last_timestamp(&self) -> &BlockTimestamp; + + fn set_last_timestamp(&mut self, timestamp: BlockTimestamp); + + fn set_vote_account_state( + self, + vote_account: &mut BorrowedInstructionAccount, + ) -> Result<(), InstructionError>; + + fn credits_for_vote_at_index(&self, index: usize) -> u64 { + let latency = self + .votes() + .get(index) + .map_or(0, |landed_vote| landed_vote.latency); + + // If latency is 0, this means that the Lockout was created and stored from a software version that did not + // store vote latencies; in this case, 1 credit is awarded + if latency == 0 { + 1 + } else { + match latency.checked_sub(VOTE_CREDITS_GRACE_SLOTS) { + None | Some(0) => { + // latency was <= VOTE_CREDITS_GRACE_SLOTS, so maximum credits are awarded + VOTE_CREDITS_MAXIMUM_PER_SLOT as u64 + } + + Some(diff) => { + // diff = latency - VOTE_CREDITS_GRACE_SLOTS, and diff > 0 + // Subtract diff from VOTE_CREDITS_MAXIMUM_PER_SLOT which is the number of credits to award + match VOTE_CREDITS_MAXIMUM_PER_SLOT.checked_sub(diff) { + // If diff >= VOTE_CREDITS_MAXIMUM_PER_SLOT, 1 credit is awarded + None | Some(0) => 1, + + Some(credits) => credits as u64, + } + } + } + } + } + + fn increment_credits(&mut self, epoch: Epoch, credits: u64) { + // increment credits, record by epoch + + // never seen a credit + if self.epoch_credits().is_empty() { + self.epoch_credits_mut().push((epoch, 0, 0)); + } else if epoch != self.epoch_credits().last().unwrap().0 { + let (_, credits, prev_credits) = *self.epoch_credits().last().unwrap(); + + if credits != prev_credits { + // if credits were earned previous epoch + // append entry at end of list for the new epoch + self.epoch_credits_mut().push((epoch, credits, credits)); + } else { + // else just move the current epoch + self.epoch_credits_mut().last_mut().unwrap().0 = epoch; + } + + // Remove too old epoch_credits + if self.epoch_credits().len() > MAX_EPOCH_CREDITS_HISTORY { + self.epoch_credits_mut().remove(0); + } + } + + self.epoch_credits_mut().last_mut().unwrap().1 = self + .epoch_credits() + .last() + .unwrap() + .1 + .saturating_add(credits); + } + + fn process_timestamp(&mut self, slot: Slot, timestamp: UnixTimestamp) -> Result<(), VoteError> { + let last_timestamp = self.last_timestamp(); + if (slot < last_timestamp.slot || timestamp < last_timestamp.timestamp) + || (slot == last_timestamp.slot + && &BlockTimestamp { slot, timestamp } != last_timestamp + && last_timestamp.slot != 0) + { + return Err(VoteError::TimestampTooOld); + } + self.set_last_timestamp(BlockTimestamp { slot, timestamp }); + Ok(()) + } + + fn pop_expired_votes(&mut self, next_vote_slot: Slot) { + while let Some(vote) = self.last_lockout() { + if !vote.is_locked_out_at_slot(next_vote_slot) { + self.votes_mut().pop_back(); + } else { + break; + } + } + } + + fn double_lockouts(&mut self) { + let stack_depth = self.votes().len(); + for (i, v) in self.votes_mut().iter_mut().enumerate() { + // Don't increase the lockout for this vote until we get more confirmations + // than the max number of confirmations this vote has seen + if stack_depth + > i.checked_add(v.confirmation_count() as usize).expect( + "`confirmation_count` and tower_size should be bounded by \ + `MAX_LOCKOUT_HISTORY`", + ) + { + v.lockout.increase_confirmation_count(1); + } + } + } + + fn process_next_vote_slot(&mut self, next_vote_slot: Slot, epoch: Epoch, current_slot: Slot) { + // Ignore votes for slots earlier than we already have votes for + if self + .last_voted_slot() + .is_some_and(|last_voted_slot| next_vote_slot <= last_voted_slot) + { + return; + } + + self.pop_expired_votes(next_vote_slot); + + let landed_vote = LandedVote { + latency: compute_vote_latency(next_vote_slot, current_slot), + lockout: Lockout::new(next_vote_slot), + }; + + // Once the stack is full, pop the oldest lockout and distribute rewards + if self.votes().len() == MAX_LOCKOUT_HISTORY { + let credits = self.credits_for_vote_at_index(0); + let landed_vote = self.votes_mut().pop_front().unwrap(); + self.set_root_slot(Some(landed_vote.slot())); + + self.increment_credits(epoch, credits); + } + self.votes_mut().push_back(landed_vote); + self.double_lockouts(); + } + + #[cfg(test)] + fn credits(&self) -> u64 { + if self.epoch_credits().is_empty() { + 0 + } else { + self.epoch_credits().last().unwrap().1 + } + } +} + +impl VoteStateHandle for VoteStateV3 { + fn is_uninitialized(&self) -> bool { + self.authorized_voters.is_empty() + } + + fn authorized_withdrawer(&self) -> &Pubkey { + &self.authorized_withdrawer + } + + fn set_authorized_withdrawer(&mut self, authorized_withdrawer: Pubkey) { + self.authorized_withdrawer = authorized_withdrawer; + } + + fn authorized_voters(&self) -> &AuthorizedVoters { + &self.authorized_voters + } + + fn set_new_authorized_voter( + &mut self, + authorized_pubkey: &Pubkey, + current_epoch: Epoch, + target_epoch: Epoch, + verify: F, + ) -> Result<(), InstructionError> + where + F: Fn(Pubkey) -> Result<(), InstructionError>, + { + let epoch_authorized_voter = self.get_and_update_authorized_voter(current_epoch)?; + verify(epoch_authorized_voter)?; + + // The offset in slots `n` on which the target_epoch + // (default value `DEFAULT_LEADER_SCHEDULE_SLOT_OFFSET`) is + // calculated is the number of slots available from the + // first slot `S` of an epoch in which to set a new voter for + // the epoch at `S` + `n` + if self.authorized_voters.contains(target_epoch) { + return Err(VoteError::TooSoonToReauthorize.into()); + } + + // Get the latest authorized_voter + let (latest_epoch, latest_authorized_pubkey) = self + .authorized_voters + .last() + .ok_or(InstructionError::InvalidAccountData)?; + + // If we're not setting the same pubkey as authorized pubkey again, + // then update the list of prior voters to mark the expiration + // of the old authorized pubkey + if latest_authorized_pubkey != authorized_pubkey { + // Update the epoch ranges of authorized pubkeys that will be expired + let epoch_of_last_authorized_switch = + self.prior_voters.last().map(|range| range.2).unwrap_or(0); + + // target_epoch must: + // 1) Be monotonically increasing due to the clock always + // moving forward + // 2) not be equal to latest epoch otherwise this + // function would have returned TooSoonToReauthorize error + // above + if target_epoch <= *latest_epoch { + return Err(InstructionError::InvalidAccountData); + } + + // Commit the new state + self.prior_voters.append(( + *latest_authorized_pubkey, + epoch_of_last_authorized_switch, + target_epoch, + )); + } + + self.authorized_voters + .insert(target_epoch, *authorized_pubkey); + + Ok(()) + } + + fn get_and_update_authorized_voter( + &mut self, + current_epoch: Epoch, + ) -> Result { + let pubkey = self + .authorized_voters + .get_and_cache_authorized_voter_for_epoch(current_epoch) + .ok_or(InstructionError::InvalidAccountData)?; + self.authorized_voters + .purge_authorized_voters(current_epoch); + Ok(pubkey) + } + + fn commission(&self) -> u8 { + self.commission + } + + fn set_commission(&mut self, commission: u8) { + self.commission = commission; + } + + fn node_pubkey(&self) -> &Pubkey { + &self.node_pubkey + } + + fn set_node_pubkey(&mut self, node_pubkey: Pubkey) { + self.node_pubkey = node_pubkey; + } + + fn votes(&self) -> &VecDeque { + &self.votes + } + + fn votes_mut(&mut self) -> &mut VecDeque { + &mut self.votes + } + + fn set_votes(&mut self, votes: VecDeque) { + self.votes = votes; + } + + fn contains_slot(&self, candidate_slot: Slot) -> bool { + self.votes + .binary_search_by(|vote| vote.slot().cmp(&candidate_slot)) + .is_ok() + } + + fn last_lockout(&self) -> Option<&Lockout> { + self.votes.back().map(|vote| &vote.lockout) + } + + fn last_voted_slot(&self) -> Option { + self.last_lockout().map(|v| v.slot()) + } + + fn root_slot(&self) -> Option { + self.root_slot + } + + fn set_root_slot(&mut self, root_slot: Option) { + self.root_slot = root_slot; + } + + fn current_epoch(&self) -> Epoch { + if self.epoch_credits.is_empty() { + 0 + } else { + self.epoch_credits.last().unwrap().0 + } + } + + fn epoch_credits(&self) -> &Vec<(Epoch, u64, u64)> { + &self.epoch_credits + } + + fn epoch_credits_mut(&mut self) -> &mut Vec<(Epoch, u64, u64)> { + &mut self.epoch_credits + } + + fn last_timestamp(&self) -> &BlockTimestamp { + &self.last_timestamp + } + + fn set_last_timestamp(&mut self, timestamp: BlockTimestamp) { + self.last_timestamp = timestamp; + } + + fn set_vote_account_state( + self, + vote_account: &mut BorrowedInstructionAccount, + ) -> Result<(), InstructionError> { + // If the account is not large enough to store the vote state, then attempt a realloc to make it large enough. + // The realloc can only proceed if the vote account has balance sufficient for rent exemption at the new size. + if (vote_account.get_data().len() < VoteStateV3::size_of()) + && (!vote_account.is_rent_exempt_at_data_length(VoteStateV3::size_of()) + || vote_account + .set_data_length(VoteStateV3::size_of()) + .is_err()) + { + // Account cannot be resized to the size of a vote state as it will not be rent exempt, or failed to be + // resized for other reasons. So store the V1_14_11 version. + return vote_account.set_state(&VoteStateVersions::V1_14_11(Box::new( + VoteState1_14_11::from(self), + ))); + } + // Vote account is large enough to store the newest version of vote state + vote_account.set_state(&VoteStateVersions::V3(Box::new(self))) + } +} + +impl VoteStateHandle for VoteStateV4 { + fn is_uninitialized(&self) -> bool { + // As per SIMD-0185, v4 is always initialized. + false + } + + fn authorized_withdrawer(&self) -> &Pubkey { + &self.authorized_withdrawer + } + + fn set_authorized_withdrawer(&mut self, authorized_withdrawer: Pubkey) { + self.authorized_withdrawer = authorized_withdrawer; + } + + fn authorized_voters(&self) -> &AuthorizedVoters { + &self.authorized_voters + } + + fn set_new_authorized_voter( + &mut self, + authorized_pubkey: &Pubkey, + current_epoch: Epoch, + target_epoch: Epoch, + verify: F, + ) -> Result<(), InstructionError> + where + F: Fn(Pubkey) -> Result<(), InstructionError>, + { + // Similar to the v3 implementation, but with no `prior_voters` field. + + let epoch_authorized_voter = self.get_and_update_authorized_voter(current_epoch)?; + verify(epoch_authorized_voter)?; + + // The offset in slots `n` on which the target_epoch + // (default value `DEFAULT_LEADER_SCHEDULE_SLOT_OFFSET`) is + // calculated is the number of slots available from the + // first slot `S` of an epoch in which to set a new voter for + // the epoch at `S` + `n` + if self.authorized_voters.contains(target_epoch) { + return Err(VoteError::TooSoonToReauthorize.into()); + } + + self.authorized_voters + .insert(target_epoch, *authorized_pubkey); + + Ok(()) + } + + fn get_and_update_authorized_voter( + &mut self, + current_epoch: Epoch, + ) -> Result { + let pubkey = self + .authorized_voters + .get_and_cache_authorized_voter_for_epoch(current_epoch) + .ok_or(InstructionError::InvalidAccountData)?; + // Per SIMD-0185, v4 retains voters for `current_epoch - 1` through + // `current_epoch + 2`. Only purge entries for epochs less than + // `current_epoch - 1`. + self.authorized_voters + .purge_authorized_voters(current_epoch.saturating_sub(1)); + Ok(pubkey) + } + + fn commission(&self) -> u8 { + (self.inflation_rewards_commission_bps / 100) as u8 + } + + #[allow(clippy::arithmetic_side_effects)] + fn set_commission(&mut self, commission: u8) { + // Safety: u16::MAX > u8::MAX * 100 + self.inflation_rewards_commission_bps = (commission as u16) * 100; + } + + fn node_pubkey(&self) -> &Pubkey { + &self.node_pubkey + } + + fn set_node_pubkey(&mut self, node_pubkey: Pubkey) { + self.node_pubkey = node_pubkey; + } + + fn votes(&self) -> &VecDeque { + &self.votes + } + + fn votes_mut(&mut self) -> &mut VecDeque { + &mut self.votes + } + + fn set_votes(&mut self, votes: VecDeque) { + self.votes = votes; + } + + fn contains_slot(&self, candidate_slot: Slot) -> bool { + self.votes + .binary_search_by(|vote| vote.slot().cmp(&candidate_slot)) + .is_ok() + } + + fn last_lockout(&self) -> Option<&Lockout> { + self.votes.back().map(|vote| &vote.lockout) + } + + fn last_voted_slot(&self) -> Option { + self.last_lockout().map(|v| v.slot()) + } + + fn root_slot(&self) -> Option { + self.root_slot + } + + fn set_root_slot(&mut self, root_slot: Option) { + self.root_slot = root_slot; + } + + fn current_epoch(&self) -> Epoch { + if self.epoch_credits.is_empty() { + 0 + } else { + self.epoch_credits.last().unwrap().0 + } + } + + fn epoch_credits(&self) -> &Vec<(Epoch, u64, u64)> { + &self.epoch_credits + } + + fn epoch_credits_mut(&mut self) -> &mut Vec<(Epoch, u64, u64)> { + &mut self.epoch_credits + } + + fn last_timestamp(&self) -> &BlockTimestamp { + &self.last_timestamp + } + + fn set_last_timestamp(&mut self, timestamp: BlockTimestamp) { + self.last_timestamp = timestamp; + } + + fn set_vote_account_state( + self, + vote_account: &mut BorrowedInstructionAccount, + ) -> Result<(), InstructionError> { + // If the account is not large enough to store the vote state, then attempt a realloc to make it large enough. + // The realloc can only proceed if the vote account has balance sufficient for rent exemption at the new size. + if (vote_account.get_data().len() < VoteStateV4::size_of()) + && (!vote_account.is_rent_exempt_at_data_length(VoteStateV4::size_of()) + || vote_account + .set_data_length(VoteStateV4::size_of()) + .is_err()) + { + // Unlike with conversions to v3, we will not gracefully default to + // storing a v1_14_11. Instead, throw an error, as per SIMD-0185. + return Err(InstructionError::AccountNotRentExempt); + } + // Vote account is large enough to store the newest version of vote state + vote_account.set_state(&VoteStateVersions::V4(Box::new(self))) + } +} + +/// Default block revenue commission rate in basis points (100%) per SIMD-0185. +#[cfg(test)] // Test-only for now, until later commits. +const DEFAULT_BLOCK_REVENUE_COMMISSION_BPS: u16 = 10_000; + +/// Create a new VoteStateV4 from `VoteInit` with proper SIMD-0185 defaults. +/// Note this is a temporary substitute for `VoteStateV4::new`. +#[allow(clippy::arithmetic_side_effects)] +#[cfg(test)] // Test-only for now, until later commits. +pub(crate) fn create_new_vote_state_v4( + vote_pubkey: &Pubkey, + vote_init: &VoteInit, + clock: &Clock, +) -> VoteStateV4 { + VoteStateV4 { + node_pubkey: vote_init.node_pubkey, + authorized_voters: AuthorizedVoters::new(clock.epoch, vote_init.authorized_voter), + authorized_withdrawer: vote_init.authorized_withdrawer, + inflation_rewards_commission_bps: (vote_init.commission as u16) * 100, // u16::MAX > u8::MAX * 100 + // Per SIMD-0185, set default collectors and commission + inflation_rewards_collector: *vote_pubkey, + block_revenue_collector: vote_init.node_pubkey, + block_revenue_commission_bps: DEFAULT_BLOCK_REVENUE_COMMISSION_BPS, + ..VoteStateV4::default() + } +} + +/// (Alpenglow) Create a test-only `VoteStateV4` with the provided values. +pub(crate) fn create_new_vote_state_v4_for_tests( + node_pubkey: &Pubkey, + authorized_voter: &Pubkey, + authorized_withdrawer: &Pubkey, + bls_pubkey_compressed: Option<[u8; BLS_PUBLIC_KEY_COMPRESSED_SIZE]>, + inflation_rewards_commission_bps: u16, +) -> VoteStateV4 { + VoteStateV4 { + node_pubkey: *node_pubkey, + authorized_voters: AuthorizedVoters::new(0, *authorized_voter), + authorized_withdrawer: *authorized_withdrawer, + bls_pubkey_compressed, + inflation_rewards_commission_bps, + ..VoteStateV4::default() + } +} + +/// The target version to convert all deserialized vote state into. +pub enum VoteStateTargetVersion { + V3, + // New vote state versions will be added here... +} + +#[derive(Clone, Debug, PartialEq)] +enum TargetVoteState { + V3(VoteStateV3), + // New vote state versions will be added here... +} + +/// Vote state handler for +/// * Deserializing vote state +/// * Converting vote state in-memory to target version +/// * Operating on the vote state data agnostically +/// * Serializing the resulting state to the vote account +#[derive(Clone, Debug, PartialEq)] +pub struct VoteStateHandler { + target_state: TargetVoteState, +} + +impl VoteStateHandle for VoteStateHandler { + fn is_uninitialized(&self) -> bool { + match &self.target_state { + TargetVoteState::V3(v3) => v3.is_uninitialized(), + } + } + + fn authorized_withdrawer(&self) -> &Pubkey { + match &self.target_state { + TargetVoteState::V3(v3) => v3.authorized_withdrawer(), + } + } + + fn set_authorized_withdrawer(&mut self, authorized_withdrawer: Pubkey) { + match &mut self.target_state { + TargetVoteState::V3(v3) => v3.set_authorized_withdrawer(authorized_withdrawer), + } + } + + fn authorized_voters(&self) -> &AuthorizedVoters { + match &self.target_state { + TargetVoteState::V3(v3) => v3.authorized_voters(), + } + } + + fn set_new_authorized_voter( + &mut self, + authorized_pubkey: &Pubkey, + current_epoch: Epoch, + target_epoch: Epoch, + verify: F, + ) -> Result<(), InstructionError> + where + F: Fn(Pubkey) -> Result<(), InstructionError>, + { + match &mut self.target_state { + TargetVoteState::V3(v3) => { + v3.set_new_authorized_voter(authorized_pubkey, current_epoch, target_epoch, verify) + } + } + } + + fn get_and_update_authorized_voter( + &mut self, + current_epoch: Epoch, + ) -> Result { + match &mut self.target_state { + TargetVoteState::V3(v3) => v3.get_and_update_authorized_voter(current_epoch), + } + } + + fn commission(&self) -> u8 { + match &self.target_state { + TargetVoteState::V3(v3) => v3.commission(), + } + } + + fn set_commission(&mut self, commission: u8) { + match &mut self.target_state { + TargetVoteState::V3(v3) => v3.set_commission(commission), + } + } + + fn node_pubkey(&self) -> &Pubkey { + match &self.target_state { + TargetVoteState::V3(v3) => v3.node_pubkey(), + } + } + + fn set_node_pubkey(&mut self, node_pubkey: Pubkey) { + match &mut self.target_state { + TargetVoteState::V3(v3) => v3.set_node_pubkey(node_pubkey), + } + } + + fn votes(&self) -> &VecDeque { + match &self.target_state { + TargetVoteState::V3(v3) => v3.votes(), + } + } + + fn votes_mut(&mut self) -> &mut VecDeque { + match &mut self.target_state { + TargetVoteState::V3(v3) => v3.votes_mut(), + } + } + + fn set_votes(&mut self, votes: VecDeque) { + match &mut self.target_state { + TargetVoteState::V3(v3) => v3.set_votes(votes), + } + } + + fn contains_slot(&self, candidate_slot: Slot) -> bool { + match &self.target_state { + TargetVoteState::V3(v3) => v3.contains_slot(candidate_slot), + } + } + + fn last_lockout(&self) -> Option<&Lockout> { + match &self.target_state { + TargetVoteState::V3(v3) => v3.last_lockout(), + } + } + + fn last_voted_slot(&self) -> Option { + match &self.target_state { + TargetVoteState::V3(v3) => v3.last_voted_slot(), + } + } + + fn root_slot(&self) -> Option { + match &self.target_state { + TargetVoteState::V3(v3) => v3.root_slot(), + } + } + + fn set_root_slot(&mut self, root_slot: Option) { + match &mut self.target_state { + TargetVoteState::V3(v3) => v3.set_root_slot(root_slot), + } + } + + fn current_epoch(&self) -> Epoch { + match &self.target_state { + TargetVoteState::V3(v3) => v3.current_epoch(), + } + } + + fn epoch_credits(&self) -> &Vec<(Epoch, u64, u64)> { + match &self.target_state { + TargetVoteState::V3(v3) => v3.epoch_credits(), + } + } + + fn epoch_credits_mut(&mut self) -> &mut Vec<(Epoch, u64, u64)> { + match &mut self.target_state { + TargetVoteState::V3(v3) => v3.epoch_credits_mut(), + } + } + + fn last_timestamp(&self) -> &BlockTimestamp { + match &self.target_state { + TargetVoteState::V3(v3) => v3.last_timestamp(), + } + } + + fn set_last_timestamp(&mut self, timestamp: BlockTimestamp) { + match &mut self.target_state { + TargetVoteState::V3(v3) => v3.set_last_timestamp(timestamp), + } + } + + fn set_vote_account_state( + self, + vote_account: &mut BorrowedInstructionAccount, + ) -> Result<(), InstructionError> { + match self.target_state { + TargetVoteState::V3(v3) => v3.set_vote_account_state(vote_account), + } + } +} + +impl VoteStateHandler { + /// Create a new handler for the provided target version by deserializing + /// the vote state and converting it to the target. + pub fn deserialize_and_convert( + vote_account: &BorrowedInstructionAccount, + target_version: VoteStateTargetVersion, + ) -> Result { + let target_state = match target_version { + VoteStateTargetVersion::V3 => { + let vote_state = VoteStateV3::deserialize(vote_account.get_data())?; + TargetVoteState::V3(vote_state) + } + }; + Ok(Self { target_state }) + } + + pub fn init_vote_account_state( + vote_account: &mut BorrowedInstructionAccount, + vote_init: &VoteInit, + clock: &Clock, + target_version: VoteStateTargetVersion, + ) -> Result<(), InstructionError> { + match target_version { + VoteStateTargetVersion::V3 => { + VoteStateV3::new(vote_init, clock).set_vote_account_state(vote_account) + } + } + } + + pub fn deinitialize_vote_account_state( + vote_account: &mut BorrowedInstructionAccount, + target_version: VoteStateTargetVersion, + ) -> Result<(), InstructionError> { + match target_version { + VoteStateTargetVersion::V3 => { + VoteStateV3::default().set_vote_account_state(vote_account) + } + } + } + + pub fn check_vote_account_length( + vote_account: &mut BorrowedInstructionAccount, + target_version: VoteStateTargetVersion, + ) -> Result<(), InstructionError> { + let length = vote_account.get_data().len(); + let expected = match target_version { + VoteStateTargetVersion::V3 => VoteStateV3::size_of(), + }; + if length != expected { + Err(InstructionError::InvalidAccountData) + } else { + Ok(()) + } + } + + #[cfg(test)] + pub fn new_v3(vote_state: VoteStateV3) -> Self { + Self { + target_state: TargetVoteState::V3(vote_state), + } + } + + #[cfg(test)] + pub fn default_v3() -> Self { + Self::new_v3(VoteStateV3::default()) + } + + #[cfg(test)] + pub fn epoch_credits(&self) -> &Vec<(Epoch, u64, u64)> { + match &self.target_state { + TargetVoteState::V3(v3) => &v3.epoch_credits, + } + } + + #[cfg(test)] + pub fn nth_recent_lockout(&self, position: usize) -> Option<&Lockout> { + match &self.target_state { + TargetVoteState::V3(v3) => { + if position < v3.votes.len() { + let pos = v3 + .votes + .len() + .checked_sub(position) + .and_then(|pos| pos.checked_sub(1))?; + v3.votes.get(pos).map(|vote| &vote.lockout) + } else { + None + } + } + } + } +} + +// Computes the vote latency for vote on voted_for_slot where the vote itself landed in current_slot +pub(crate) fn compute_vote_latency(voted_for_slot: Slot, current_slot: Slot) -> u8 { + std::cmp::min(current_slot.saturating_sub(voted_for_slot), u8::MAX as u64) as u8 +} + +#[allow(clippy::arithmetic_side_effects)] +#[allow(clippy::type_complexity)] +#[cfg(test)] +mod tests { + use { + super::*, + crate::id, + solana_account::AccountSharedData, + solana_clock::Clock, + solana_epoch_schedule::MAX_LEADER_SCHEDULE_EPOCH_OFFSET, + solana_pubkey::Pubkey, + solana_rent::Rent, + solana_sdk_ids::native_loader, + solana_transaction_context::{InstructionAccount, TransactionContext}, + solana_vote_interface::{ + authorized_voters::AuthorizedVoters, + state::{BlockTimestamp, VoteInit, MAX_EPOCH_CREDITS_HISTORY, MAX_LOCKOUT_HISTORY}, + }, + test_case::test_case, + }; + + fn mock_transaction_context( + vote_pubkey: Pubkey, + vote_account: AccountSharedData, + rent: Rent, + ) -> TransactionContext { + let program_account = AccountSharedData::new(0, 0, &native_loader::id()); + let mut transaction_context = TransactionContext::new( + vec![(id(), program_account), (vote_pubkey, vote_account)], + rent, + 0, + 0, + ); + transaction_context + .configure_next_instruction_for_tests( + 0, + vec![InstructionAccount::new(1, false, true)], + &[], + ) + .unwrap(); + transaction_context + } + + fn get_max_sized_vote_state_v3() -> VoteStateV3 { + let mut authorized_voters = AuthorizedVoters::default(); + for i in 0..=MAX_LEADER_SCHEDULE_EPOCH_OFFSET { + authorized_voters.insert(i, Pubkey::new_unique()); + } + + VoteStateV3 { + votes: VecDeque::from(vec![LandedVote::default(); MAX_LOCKOUT_HISTORY]), + root_slot: Some(u64::MAX), + epoch_credits: vec![(0, 0, 0); MAX_EPOCH_CREDITS_HISTORY], + authorized_voters, + ..Default::default() + } + } + + fn get_max_sized_vote_state_v4() -> VoteStateV4 { + let mut authorized_voters = AuthorizedVoters::default(); + for i in 0..=MAX_LEADER_SCHEDULE_EPOCH_OFFSET { + authorized_voters.insert(i, Pubkey::new_unique()); + } + + VoteStateV4 { + votes: VecDeque::from(vec![LandedVote::default(); MAX_LOCKOUT_HISTORY]), + root_slot: Some(u64::MAX), + epoch_credits: vec![(0, 0, 0); MAX_EPOCH_CREDITS_HISTORY], + authorized_voters, + bls_pubkey_compressed: Some([255; BLS_PUBLIC_KEY_COMPRESSED_SIZE]), + ..Default::default() + } + } + + fn set_new_authorized_voter_and_assert( + vote_state: &mut T, + original_voter: Pubkey, + epoch_offset: Epoch, + prior_voters_last_callback: Option &(Pubkey, Epoch, Epoch)>, + ) { + let new_voter = Pubkey::new_unique(); + // Set a new authorized voter + vote_state + .set_new_authorized_voter(&new_voter, 0, epoch_offset, |_| Ok(())) + .unwrap(); + + if let Some(prior_voters_last) = prior_voters_last_callback { + assert_eq!( + prior_voters_last(vote_state), + &(original_voter, 0, epoch_offset), + ); + } + + // Trying to set authorized voter for same epoch again should fail + assert_eq!( + vote_state.set_new_authorized_voter(&new_voter, 0, epoch_offset, |_| Ok(())), + Err(VoteError::TooSoonToReauthorize.into()) + ); + + // Setting the same authorized voter again should succeed + vote_state + .set_new_authorized_voter(&new_voter, 2, 2 + epoch_offset, |_| Ok(())) + .unwrap(); + + // Set a third and fourth authorized voter + let new_voter2 = Pubkey::new_unique(); + vote_state + .set_new_authorized_voter(&new_voter2, 3, 3 + epoch_offset, |_| Ok(())) + .unwrap(); + if let Some(prior_voters_last) = prior_voters_last_callback { + assert_eq!( + prior_voters_last(vote_state), + &(new_voter, epoch_offset, 3 + epoch_offset), + ); + } + + let new_voter3 = Pubkey::new_unique(); + vote_state + .set_new_authorized_voter(&new_voter3, 6, 6 + epoch_offset, |_| Ok(())) + .unwrap(); + if let Some(prior_voters_last) = prior_voters_last_callback { + assert_eq!( + prior_voters_last(vote_state), + &(new_voter2, 3 + epoch_offset, 6 + epoch_offset), + ); + } + + // Check can set back to original voter + vote_state + .set_new_authorized_voter(&original_voter, 9, 9 + epoch_offset, |_| Ok(())) + .unwrap(); + + // Run with these voters for a while, check the ranges of authorized + // voters is correct + for i in 9..epoch_offset { + assert_eq!( + vote_state.get_and_update_authorized_voter(i).unwrap(), + original_voter + ); + } + for i in epoch_offset..3 + epoch_offset { + assert_eq!( + vote_state.get_and_update_authorized_voter(i).unwrap(), + new_voter + ); + } + for i in 3 + epoch_offset..6 + epoch_offset { + assert_eq!( + vote_state.get_and_update_authorized_voter(i).unwrap(), + new_voter2 + ); + } + for i in 6 + epoch_offset..9 + epoch_offset { + assert_eq!( + vote_state.get_and_update_authorized_voter(i).unwrap(), + new_voter3 + ); + } + for i in 9 + epoch_offset..=10 + epoch_offset { + assert_eq!( + vote_state.get_and_update_authorized_voter(i).unwrap(), + original_voter + ); + } + } + + #[test] + fn test_set_new_authorized_voter() { + let vote_pubkey = Pubkey::new_unique(); + let original_voter = Pubkey::new_unique(); + let epoch_offset = 15; + + let vote_init = VoteInit { + node_pubkey: original_voter, + authorized_voter: original_voter, + authorized_withdrawer: original_voter, + commission: 0, + }; + let clock = Clock::default(); + + // Start with v3. We'll also check `prior_voters`. + let mut vote_state = VoteStateV3::new(&vote_init, &clock); + assert!(vote_state.prior_voters.last().is_none()); + + set_new_authorized_voter_and_assert( + &mut vote_state, + original_voter, + epoch_offset, + Some(|vote_state: &VoteStateV3| vote_state.prior_voters.last().unwrap()), + ); + + // Now try with v4. No `prior_voters` to check. + let mut vote_state = create_new_vote_state_v4(&vote_pubkey, &vote_init, &clock); + + set_new_authorized_voter_and_assert(&mut vote_state, original_voter, epoch_offset, None); + } + + fn assert_authorized_voter_is_locked_within_epoch( + vote_state: &mut T, + original_voter: &Pubkey, + ) { + // Test that it's not possible to set a new authorized + // voter within the same epoch, even if none has been + // explicitly set before + let new_voter = Pubkey::new_unique(); + assert_eq!( + vote_state.set_new_authorized_voter(&new_voter, 1, 1, |_| Ok(())), + Err(VoteError::TooSoonToReauthorize.into()) + ); + assert_eq!( + vote_state.authorized_voters().get_authorized_voter(1), + Some(*original_voter) + ); + // Set a new authorized voter for a future epoch + assert_eq!( + vote_state.set_new_authorized_voter(&new_voter, 1, 2, |_| Ok(())), + Ok(()) + ); + // Test that it's not possible to set a new authorized + // voter within the same epoch, even if none has been + // explicitly set before + assert_eq!( + vote_state.set_new_authorized_voter(original_voter, 3, 3, |_| Ok(())), + Err(VoteError::TooSoonToReauthorize.into()) + ); + assert_eq!( + vote_state.authorized_voters().get_authorized_voter(3), + Some(new_voter) + ); + } + + #[test] + fn test_authorized_voter_is_locked_within_epoch() { + let vote_pubkey = Pubkey::new_unique(); + let original_voter = Pubkey::new_unique(); + + let vote_init = VoteInit { + node_pubkey: original_voter, + authorized_voter: original_voter, + authorized_withdrawer: original_voter, + commission: 0, + }; + let clock = Clock::default(); + + // First test v3. + let mut vote_state = VoteStateV3::new(&vote_init, &clock); + assert_authorized_voter_is_locked_within_epoch(&mut vote_state, &original_voter); + + // Now v4. + let mut vote_state = create_new_vote_state_v4(&vote_pubkey, &vote_init, &clock); + assert_authorized_voter_is_locked_within_epoch(&mut vote_state, &original_voter); + } + + #[test] + fn test_get_and_update_authorized_voter_v3() { + let original_voter = Pubkey::new_unique(); + let mut vote_state = VoteStateV3::new( + &VoteInit { + node_pubkey: original_voter, + authorized_voter: original_voter, + authorized_withdrawer: original_voter, + commission: 0, + }, + &Clock::default(), + ); + + assert_eq!(vote_state.authorized_voters().len(), 1); + assert_eq!( + *vote_state.authorized_voters().first().unwrap().1, + original_voter + ); + + // If no new authorized voter was set, the same authorized voter + // is locked into the next epoch + assert_eq!( + vote_state.get_and_update_authorized_voter(1).unwrap(), + original_voter + ); + + // Try to get the authorized voter for epoch 5, implies + // the authorized voter for epochs 1-4 were unchanged + assert_eq!( + vote_state.get_and_update_authorized_voter(5).unwrap(), + original_voter + ); + + // Authorized voter for expired epoch 0..5 should have been + // purged and no longer queryable + assert_eq!(vote_state.authorized_voters().len(), 1); + for i in 0..5 { + assert!(vote_state + .authorized_voters() + .get_authorized_voter(i) + .is_none()); + } + + // Set an authorized voter change at slot 7 + let new_authorized_voter = Pubkey::new_unique(); + vote_state + .set_new_authorized_voter(&new_authorized_voter, 5, 7, |_| Ok(())) + .unwrap(); + + // Try to get the authorized voter for epoch 6, unchanged + assert_eq!( + vote_state.get_and_update_authorized_voter(6).unwrap(), + original_voter + ); + + // Try to get the authorized voter for epoch 7 and onwards, should + // be the new authorized voter + for i in 7..10 { + assert_eq!( + vote_state.get_and_update_authorized_voter(i).unwrap(), + new_authorized_voter + ); + } + assert_eq!(vote_state.authorized_voters().len(), 1); + } + + // v4 purging retains one extra epoch compared to v3. + // Besides that, the functionality should be the same. + #[test] + fn test_get_and_update_authorized_voter_v4() { + let vote_pubkey = Pubkey::new_unique(); + let original_voter = Pubkey::new_unique(); + let mut vote_state = create_new_vote_state_v4( + &vote_pubkey, + &VoteInit { + node_pubkey: original_voter, + authorized_voter: original_voter, + authorized_withdrawer: original_voter, + commission: 0, + }, + &Clock::default(), + ); + + // Run the same exercise as the v3 test to start. + + assert_eq!(vote_state.authorized_voters().len(), 1); + assert_eq!( + *vote_state.authorized_voters().first().unwrap().1, + original_voter + ); + + // If no new authorized voter was set, the same authorized voter + // is locked into the next epoch + assert_eq!( + vote_state.get_and_update_authorized_voter(1).unwrap(), + original_voter + ); + + // Try to get the authorized voter for epoch 5, implies + // the authorized voter for epochs 1-4 were unchanged + assert_eq!( + vote_state.get_and_update_authorized_voter(5).unwrap(), + original_voter + ); + + // Just like with the v3 tests, authorized voters for epochs 0..5 should + // be purged, but only because we didn't cache an entry for current - 1. + assert_eq!(vote_state.authorized_voters().len(), 1); + for i in 0..5 { + assert!(vote_state + .authorized_voters() + .get_authorized_voter(i) + .is_none()); + } + + // Say we're in epoch 7. Cache entries for both epochs 6 and 7. + assert_eq!( + vote_state.get_and_update_authorized_voter(6).unwrap(), + original_voter + ); + assert_eq!( + vote_state.get_and_update_authorized_voter(7).unwrap(), + original_voter + ); + + // Now we should have length 2. + assert_eq!(vote_state.authorized_voters().len(), 2); + + // 0..=5 should still be purged. + for i in 0..=5 { + assert!(vote_state + .authorized_voters() + .get_authorized_voter(i) + .is_none()); + } + + // Set an authorized voter change at epoch 9. + let new_authorized_voter = Pubkey::new_unique(); + vote_state + .set_new_authorized_voter(&new_authorized_voter, 7, 9, |_| Ok(())) + .unwrap(); + + // Try to get the authorized voter for epoch 8, unchanged + assert_eq!( + vote_state.get_and_update_authorized_voter(8).unwrap(), + original_voter + ); + + // Try to get the authorized voter for epoch 9 and onwards, should + // be the new authorized voter + for i in 9..12 { + assert_eq!( + vote_state.get_and_update_authorized_voter(i).unwrap(), + new_authorized_voter + ); + } + assert_eq!(vote_state.authorized_voters().len(), 2); + + // If we skip a few epochs ahead, only the current epoch is retained. + assert_eq!( + vote_state.get_and_update_authorized_voter(15).unwrap(), + new_authorized_voter + ); + assert_eq!(vote_state.authorized_voters().len(), 1); + } + + #[test_case( + VoteStateV3::size_of(), + get_max_sized_vote_state_v3(), + |vote_state, data| { + let versioned = VoteStateVersions::new_v3(vote_state); + VoteStateV3::serialize(&versioned, data).unwrap(); + }; + "VoteStateV3" + )] + #[test_case( + VoteStateV4::size_of(), + get_max_sized_vote_state_v4(), + |vote_state, data| { + let versioned = VoteStateVersions::new_v4(vote_state); + VoteStateV4::serialize(&versioned, data).unwrap(); + }; + "VoteStateV4" + )] + fn test_vote_state_max_size( + max_size: usize, + mut vote_state: T, + verify_serialize: fn(T, &mut [u8]), + ) { + let mut max_sized_data = vec![0; max_size]; + let (start_leader_schedule_epoch, _) = vote_state.authorized_voters().last().unwrap(); + let start_current_epoch = + start_leader_schedule_epoch - MAX_LEADER_SCHEDULE_EPOCH_OFFSET + 1; + + for i in start_current_epoch..start_current_epoch + 2 * MAX_LEADER_SCHEDULE_EPOCH_OFFSET { + vote_state + .set_new_authorized_voter( + &Pubkey::new_unique(), + i, + i + MAX_LEADER_SCHEDULE_EPOCH_OFFSET, + |_| Ok(()), + ) + .unwrap(); + + verify_serialize(vote_state.clone(), &mut max_sized_data); + } + } + + #[test_case(VoteStateV3::default() ; "VoteStateV3")] + #[test_case(VoteStateV4::default() ; "VoteStateV4")] + fn test_vote_state_epoch_credits(mut vote_state: T) { + assert_eq!(vote_state.credits(), 0); + assert_eq!(vote_state.epoch_credits().clone(), vec![]); + + let mut expected = vec![]; + let mut credits = 0; + let epochs = (MAX_EPOCH_CREDITS_HISTORY + 2) as u64; + for epoch in 0..epochs { + for _j in 0..epoch { + vote_state.increment_credits(epoch, 1); + credits += 1; + } + expected.push((epoch, credits, credits - epoch)); + } + + while expected.len() > MAX_EPOCH_CREDITS_HISTORY { + expected.remove(0); + } + + assert_eq!(vote_state.credits(), credits); + assert_eq!(vote_state.epoch_credits().clone(), expected); + } + + #[test_case(VoteStateV3::default() ; "VoteStateV3")] + #[test_case(VoteStateV4::default() ; "VoteStateV4")] + fn test_vote_state_epoch0_no_credits(mut vote_state: T) { + assert_eq!(vote_state.epoch_credits().len(), 0); + vote_state.increment_credits(1, 1); + assert_eq!(vote_state.epoch_credits().len(), 1); + + vote_state.increment_credits(2, 1); + assert_eq!(vote_state.epoch_credits().len(), 2); + } + + #[test_case(VoteStateV3::default() ; "VoteStateV3")] + #[test_case(VoteStateV4::default() ; "VoteStateV4")] + fn test_vote_state_increment_credits(mut vote_state: T) { + let credits = (MAX_EPOCH_CREDITS_HISTORY + 2) as u64; + for i in 0..credits { + vote_state.increment_credits(i, 1); + } + assert_eq!(vote_state.credits(), credits); + assert!(vote_state.epoch_credits().len() <= MAX_EPOCH_CREDITS_HISTORY); + } + + #[test_case(VoteStateV3::default() ; "VoteStateV3")] + #[test_case(VoteStateV4::default() ; "VoteStateV4")] + fn test_vote_process_timestamp(mut vote_state: T) { + let (slot, timestamp) = (15, 1_575_412_285); + vote_state.set_last_timestamp(BlockTimestamp { slot, timestamp }); + + assert_eq!( + vote_state.process_timestamp(slot - 1, timestamp + 1), + Err(VoteError::TimestampTooOld) + ); + assert_eq!( + vote_state.last_timestamp(), + &BlockTimestamp { slot, timestamp } + ); + assert_eq!( + vote_state.process_timestamp(slot + 1, timestamp - 1), + Err(VoteError::TimestampTooOld) + ); + assert_eq!( + vote_state.process_timestamp(slot, timestamp + 1), + Err(VoteError::TimestampTooOld) + ); + assert_eq!(vote_state.process_timestamp(slot, timestamp), Ok(())); + assert_eq!( + vote_state.last_timestamp(), + &BlockTimestamp { slot, timestamp } + ); + assert_eq!(vote_state.process_timestamp(slot + 1, timestamp), Ok(())); + assert_eq!( + vote_state.last_timestamp(), + &BlockTimestamp { + slot: slot + 1, + timestamp + } + ); + assert_eq!( + vote_state.process_timestamp(slot + 2, timestamp + 1), + Ok(()) + ); + assert_eq!( + vote_state.last_timestamp(), + &BlockTimestamp { + slot: slot + 2, + timestamp: timestamp + 1 + } + ); + + // Test initial vote + vote_state.set_last_timestamp(BlockTimestamp::default()); + assert_eq!(vote_state.process_timestamp(0, timestamp), Ok(())); + } + + enum ExpectedVoteStateVersion { + V1_14_11, + V3, + } + + fn init_vote_account_state_v3_and_assert( + vote_pubkey: Pubkey, + vote_account: AccountSharedData, + vote_init: &VoteInit, + clock: &Clock, + rent: Rent, + expected_version: ExpectedVoteStateVersion, + ) { + let transaction_context = mock_transaction_context(vote_pubkey, vote_account, rent); + let instruction_context = transaction_context.get_next_instruction_context().unwrap(); + let mut vote_account = instruction_context + .try_borrow_instruction_account(0) + .unwrap(); + + // Initialize. + VoteStateHandler::init_vote_account_state( + &mut vote_account, + vote_init, + clock, + VoteStateTargetVersion::V3, + ) + .unwrap(); + + let vote_state_versions = vote_account.get_state::().unwrap(); + + match expected_version { + ExpectedVoteStateVersion::V1_14_11 => { + assert!(matches!( + vote_state_versions, + VoteStateVersions::V1_14_11(_) + )); + assert!(!vote_state_versions.is_uninitialized()); + + // Verify fields. + if let VoteStateVersions::V1_14_11(v1_14_11) = vote_state_versions { + assert_eq!(v1_14_11.node_pubkey, vote_init.node_pubkey); + assert_eq!( + v1_14_11.authorized_voters.get_authorized_voter(0), + Some(vote_init.authorized_voter) + ); + assert_eq!( + v1_14_11.authorized_withdrawer, + vote_init.authorized_withdrawer + ); + assert_eq!(v1_14_11.commission, vote_init.commission); + } else { + panic!("should be v1_14_11"); + } + } + ExpectedVoteStateVersion::V3 => { + assert!(matches!(vote_state_versions, VoteStateVersions::V3(_))); + assert!(!vote_state_versions.is_uninitialized()); + + // Verify fields. + if let VoteStateVersions::V3(v3) = vote_state_versions { + assert_eq!(v3.node_pubkey, vote_init.node_pubkey); + assert_eq!( + v3.authorized_voters.get_authorized_voter(0), + Some(vote_init.authorized_voter) + ); + assert_eq!(v3.authorized_withdrawer, vote_init.authorized_withdrawer); + assert_eq!(v3.commission, vote_init.commission); + } else { + panic!("should be v3"); + } + } + } + } + + fn deinit_vote_account_state_v3_and_assert( + vote_pubkey: Pubkey, + vote_account: AccountSharedData, + rent: Rent, + expected_version: ExpectedVoteStateVersion, + ) { + let transaction_context = mock_transaction_context(vote_pubkey, vote_account, rent.clone()); + let instruction_context = transaction_context.get_next_instruction_context().unwrap(); + let mut vote_account = instruction_context + .try_borrow_instruction_account(0) + .unwrap(); + + // Deinitialize. + VoteStateHandler::deinitialize_vote_account_state( + &mut vote_account, + VoteStateTargetVersion::V3, + ) + .unwrap(); + + let vote_state_versions = vote_account.get_state::().unwrap(); + + match expected_version { + ExpectedVoteStateVersion::V1_14_11 => { + assert!(matches!( + vote_state_versions, + VoteStateVersions::V1_14_11(_) + )); + assert!(vote_state_versions.is_uninitialized()); + } + ExpectedVoteStateVersion::V3 => { + assert!(matches!(vote_state_versions, VoteStateVersions::V3(_))); + assert!(vote_state_versions.is_uninitialized()); + } + } + } + + #[test] + fn test_init_vote_account_state_v3() { + let vote_pubkey = Pubkey::new_unique(); + let vote_init = VoteInit { + node_pubkey: Pubkey::new_unique(), + authorized_voter: Pubkey::new_unique(), + authorized_withdrawer: Pubkey::new_unique(), + commission: 5, + }; + let clock = Clock::default(); + let rent = Rent::default(); + + // First create a vote account that's too small for v3. + let v1_14_11_size = VoteState1_14_11::size_of(); + let lamports = rent.minimum_balance(v1_14_11_size); + let vote_account = AccountSharedData::new(lamports, v1_14_11_size, &id()); + + // Initialize - should default to v1_14_11. + init_vote_account_state_v3_and_assert( + vote_pubkey, + vote_account, + &vote_init, + &clock, + rent.clone(), + ExpectedVoteStateVersion::V1_14_11, + ); + + // Create a vote account that's too small for v3, but has enough + // lamports for resize. + let v3_size = VoteStateV3::size_of(); + let lamports = rent.minimum_balance(v3_size); + let vote_account = AccountSharedData::new(lamports, v1_14_11_size, &id()); + + // Initialize - should resize and create v3. + init_vote_account_state_v3_and_assert( + vote_pubkey, + vote_account, + &vote_init, + &clock, + rent.clone(), + ExpectedVoteStateVersion::V3, + ); + + // Now create a vote account that's large enough for v3. + let lamports = rent.minimum_balance(v3_size); + let vote_account = AccountSharedData::new(lamports, v3_size, &id()); + + // Initialize - should create v3. + init_vote_account_state_v3_and_assert( + vote_pubkey, + vote_account, + &vote_init, + &clock, + rent, + ExpectedVoteStateVersion::V3, + ); + } + + #[test] + fn test_deinitialize_vote_account_state_v3() { + let vote_pubkey = Pubkey::new_unique(); + let rent = Rent::default(); + + // First create a vote account that's too small for v3. + let v1_14_11_size = VoteState1_14_11::size_of(); + let lamports = rent.minimum_balance(v1_14_11_size); + let vote_account = AccountSharedData::new(lamports, v1_14_11_size, &id()); + + // Deinitialize - should default to v1_14_11. + deinit_vote_account_state_v3_and_assert( + vote_pubkey, + vote_account, + rent.clone(), + ExpectedVoteStateVersion::V1_14_11, + ); + + // Create a vote account that's too small for v3, but has enough + // lamports for resize. + let v3_size = VoteStateV3::size_of(); + let lamports = rent.minimum_balance(v3_size); + let vote_account = AccountSharedData::new(lamports, v1_14_11_size, &id()); + + // Deinitialize - should resize and create v3. + deinit_vote_account_state_v3_and_assert( + vote_pubkey, + vote_account, + rent.clone(), + ExpectedVoteStateVersion::V3, + ); + + // Now create a vote account that's large enough for v3. + let lamports = rent.minimum_balance(v3_size); + let vote_account = AccountSharedData::new(lamports, v3_size, &id()); + + // Deinitialize - should create v3. + deinit_vote_account_state_v3_and_assert( + vote_pubkey, + vote_account, + rent, + ExpectedVoteStateVersion::V3, + ); + } +} diff --git a/programs/vote/src/vote_state/mod.rs b/programs/vote/src/vote_state/mod.rs index 685f23d46f234a..702ae9cbdd1c33 100644 --- a/programs/vote/src/vote_state/mod.rs +++ b/programs/vote/src/vote_state/mod.rs @@ -1,7 +1,11 @@ //! Vote state, vote program //! Receive and processes votes from validators + +mod handler; + pub use solana_vote_interface::state::{vote_state_versions::*, *}; use { + handler::{VoteStateHandle, VoteStateHandler, VoteStateTargetVersion}, log::*, solana_account::{AccountSharedData, ReadableAccount, WritableAccount}, solana_clock::{Clock, Epoch, Slot}, @@ -11,9 +15,7 @@ use { solana_pubkey::Pubkey, solana_rent::Rent, solana_slot_hashes::SlotHash, - solana_transaction_context::{ - BorrowedAccount, IndexOfAccount, InstructionContext, TransactionContext, - }, + solana_transaction_context::{BorrowedInstructionAccount, IndexOfAccount, InstructionContext}, solana_vote_interface::{error::VoteError, program::id}, std::{ cmp::Ordering, @@ -22,45 +24,20 @@ use { }; // utility function, used by Stakes, tests -pub fn from(account: &T) -> Option { - VoteState::deserialize(account.data()).ok() +pub fn from(account: &T) -> Option { + VoteStateV3::deserialize(account.data()).ok() } // utility function, used by Stakes, tests pub fn to(versioned: &VoteStateVersions, account: &mut T) -> Option<()> { - VoteState::serialize(versioned, account.data_as_mut_slice()).ok() -} - -// Updates the vote account state with a new VoteState instance. This is required temporarily during the -// upgrade of vote account state from V1_14_11 to Current. -fn set_vote_account_state( - vote_account: &mut BorrowedAccount, - vote_state: VoteState, -) -> Result<(), InstructionError> { - // If the account is not large enough to store the vote state, then attempt a realloc to make it large enough. - // The realloc can only proceed if the vote account has balance sufficient for rent exemption at the new size. - if (vote_account.get_data().len() < VoteStateVersions::vote_state_size_of(true)) - && (!vote_account - .is_rent_exempt_at_data_length(VoteStateVersions::vote_state_size_of(true)) - || vote_account - .set_data_length(VoteStateVersions::vote_state_size_of(true)) - .is_err()) - { - // Account cannot be resized to the size of a vote state as it will not be rent exempt, or failed to be - // resized for other reasons. So store the V1_14_11 version. - return vote_account.set_state(&VoteStateVersions::V1_14_11(Box::new( - VoteState1_14_11::from(vote_state), - ))); - } - // Vote account is large enough to store the newest version of vote state - vote_account.set_state(&VoteStateVersions::new_current(vote_state)) + VoteStateV3::serialize(versioned, account.data_as_mut_slice()).ok() } /// Checks the proposed vote state with the current and /// slot hashes, making adjustments to the root / filtering /// votes as needed. fn check_and_filter_proposed_vote_state( - vote_state: &VoteState, + vote_state: &VoteStateHandler, proposed_lockouts: &mut VecDeque, proposed_root: &mut Option, proposed_hash: Hash, @@ -76,7 +53,7 @@ fn check_and_filter_proposed_vote_state( .slot(); // If the proposed state is not new enough, return - if let Some(last_vote_slot) = vote_state.votes.back().map(|lockout| lockout.slot()) { + if let Some(last_vote_slot) = vote_state.votes().back().map(|lockout| lockout.slot()) { if last_proposed_slot <= last_vote_slot { return Err(VoteError::VoteTooOld); } @@ -102,10 +79,10 @@ fn check_and_filter_proposed_vote_state( // votes from the vote state are less than R, use its root instead. if root < earliest_slot_hash_in_history { // First overwrite the proposed root with the vote state's root - *proposed_root = vote_state.root_slot; + *proposed_root = vote_state.root_slot(); // Then try to find the latest vote in vote state that's less than R - for vote in vote_state.votes.iter().rev() { + for vote in vote_state.votes().iter().rev() { if vote.slot() <= root { *proposed_root = Some(vote.slot()); break; @@ -268,7 +245,7 @@ fn check_and_filter_proposed_vote_state( // fork warn!( "{} dropped vote {:?} root {:?} failed to match hash {} {}", - vote_state.node_pubkey, + vote_state.node_pubkey(), proposed_lockouts, proposed_root, proposed_hash, @@ -303,8 +280,8 @@ fn check_and_filter_proposed_vote_state( Ok(()) } -fn check_slots_are_valid( - vote_state: &VoteState, +fn check_slots_are_valid( + vote_state: &T, vote_slots: &[Slot], vote_hash: &Hash, slot_hashes: &[(Slot, Hash)], @@ -363,7 +340,10 @@ fn check_slots_are_valid( // there are not slots in `vote_slots` greater than `last_voted_slot` debug!( "{} dropped vote slots {:?}, vote hash: {:?} slot hashes:SlotHash {:?}, too old ", - vote_state.node_pubkey, vote_slots, vote_hash, slot_hashes + vote_state.node_pubkey(), + vote_slots, + vote_hash, + slot_hashes ); return Err(VoteError::VoteTooOld); } @@ -372,7 +352,9 @@ fn check_slots_are_valid( // a matching slot hash in step 2) info!( "{} dropped vote slots {:?} failed to match slot hashes: {:?}", - vote_state.node_pubkey, vote_slots, slot_hashes, + vote_state.node_pubkey(), + vote_slots, + slot_hashes, ); return Err(VoteError::SlotsMismatch); } @@ -382,7 +364,10 @@ fn check_slots_are_valid( // fork warn!( "{} dropped vote slots {:?} failed to match hash {} {}", - vote_state.node_pubkey, vote_slots, vote_hash, slot_hashes[j].1 + vote_state.node_pubkey(), + vote_slots, + vote_hash, + slot_hashes[j].1 ); return Err(VoteError::SlotHashMismatch); } @@ -427,7 +412,7 @@ fn check_slots_are_valid( // have to have at least one other slot on top of it, even if the first 30 votes were all // popped off. pub fn process_new_vote_state( - vote_state: &mut VoteState, + vote_state: &mut VoteStateHandler, mut new_state: VecDeque, new_root: Option, timestamp: Option, @@ -439,7 +424,7 @@ pub fn process_new_vote_state( return Err(VoteError::TooManyVotes); } - match (new_root, vote_state.root_slot) { + match (new_root, vote_state.root_slot()) { (Some(new_root), Some(current_root)) => { if new_root < current_root { return Err(VoteError::RootRollBack); @@ -496,7 +481,7 @@ pub fn process_new_vote_state( let mut earned_credits = 0_u64; if let Some(new_root) = new_root { - for current_vote in &vote_state.votes { + for current_vote in vote_state.votes() { // Find the first vote in the current vote state for a slot greater // than the new proposed root if current_vote.slot() <= new_root { @@ -533,10 +518,10 @@ pub fn process_new_vote_state( // All the votes in our current vote state that are missing from the new vote state // must have been expired by later votes. Check that the lockouts match this assumption. - while current_vote_state_index < vote_state.votes.len() + while current_vote_state_index < vote_state.votes().len() && new_vote_state_index < new_state.len() { - let current_vote = &vote_state.votes[current_vote_state_index]; + let current_vote = &vote_state.votes()[current_vote_state_index]; let new_vote = &mut new_state[new_vote_state_index]; // If the current slot is less than the new proposed slot, then the @@ -560,7 +545,7 @@ pub fn process_new_vote_state( } // Copy the vote slot latency in from the current state to the new state - new_vote.latency = vote_state.votes[current_vote_state_index].latency; + new_vote.latency = vote_state.votes()[current_vote_state_index].latency; current_vote_state_index = current_vote_state_index.checked_add(1).expect( "`current_vote_state_index` is bounded by `MAX_LOCKOUT_HISTORY` when slot is \ @@ -587,11 +572,11 @@ pub fn process_new_vote_state( // have had their latency initialized to 0 by the above loop. Those will now be updated to their actual latency. for new_vote in new_state.iter_mut() { if new_vote.latency == 0 { - new_vote.latency = VoteState::compute_vote_latency(new_vote.slot(), current_slot); + new_vote.latency = handler::compute_vote_latency(new_vote.slot(), current_slot); } } - if vote_state.root_slot != new_root { + if vote_state.root_slot() != new_root { // Award vote credits based on the number of slots that were voted on and have reached finality // For each finalized slot, there was one voted-on slot in the new vote state that was responsible for // finalizing it. Each of those votes is awarded 1 credit. @@ -601,14 +586,14 @@ pub fn process_new_vote_state( let last_slot = new_state.back().unwrap().slot(); vote_state.process_timestamp(last_slot, timestamp)?; } - vote_state.root_slot = new_root; - vote_state.votes = new_state; + vote_state.set_root_slot(new_root); + vote_state.set_votes(new_state); Ok(()) } -pub fn process_vote_unfiltered( - vote_state: &mut VoteState, +pub fn process_vote_unfiltered( + vote_state: &mut T, vote_slots: &[Slot], vote: &Vote, slot_hashes: &[SlotHash], @@ -623,7 +608,7 @@ pub fn process_vote_unfiltered( } pub fn process_vote( - vote_state: &mut VoteState, + vote_state: &mut VoteStateHandler, vote: &Vote, slot_hashes: &[SlotHash], epoch: Epoch, @@ -653,7 +638,10 @@ pub fn process_vote( } /// "unchecked" functions used by tests and Tower -pub fn process_vote_unchecked(vote_state: &mut VoteState, vote: Vote) -> Result<(), VoteError> { +pub fn process_vote_unchecked( + vote_state: &mut T, + vote: Vote, +) -> Result<(), VoteError> { if vote.slots.is_empty() { return Err(VoteError::EmptySlots); } @@ -669,13 +657,13 @@ pub fn process_vote_unchecked(vote_state: &mut VoteState, vote: Vote) -> Result< } #[cfg(test)] -pub fn process_slot_votes_unchecked(vote_state: &mut VoteState, slots: &[Slot]) { +pub fn process_slot_votes_unchecked(vote_state: &mut T, slots: &[Slot]) { for slot in slots { process_slot_vote_unchecked(vote_state, *slot); } } -pub fn process_slot_vote_unchecked(vote_state: &mut VoteState, slot: Slot) { +pub fn process_slot_vote_unchecked(vote_state: &mut T, slot: Slot) { let _ = process_vote_unchecked(vote_state, Vote::new(vec![slot], Hash::default())); } @@ -683,20 +671,19 @@ pub fn process_slot_vote_unchecked(vote_state: &mut VoteState, slot: Slot) { /// but will implicitly withdraw authorization from the previously authorized /// key pub fn authorize( - vote_account: &mut BorrowedAccount, + vote_account: &mut BorrowedInstructionAccount, authorized: &Pubkey, vote_authorize: VoteAuthorize, signers: &HashSet, clock: &Clock, ) -> Result<(), InstructionError> { - let mut vote_state: VoteState = vote_account - .get_state::()? - .convert_to_current(); + let mut vote_state = + VoteStateHandler::deserialize_and_convert(vote_account, VoteStateTargetVersion::V3)?; match vote_authorize { VoteAuthorize::Voter => { let authorized_withdrawer_signer = - verify_authorized_signer(&vote_state.authorized_withdrawer, signers).is_ok(); + verify_authorized_signer(vote_state.authorized_withdrawer(), signers).is_ok(); vote_state.set_new_authorized_voter( authorized, @@ -717,48 +704,46 @@ pub fn authorize( } VoteAuthorize::Withdrawer => { // current authorized withdrawer must say "yay" - verify_authorized_signer(&vote_state.authorized_withdrawer, signers)?; - vote_state.authorized_withdrawer = *authorized; + verify_authorized_signer(vote_state.authorized_withdrawer(), signers)?; + vote_state.set_authorized_withdrawer(*authorized); } } - set_vote_account_state(vote_account, vote_state) + vote_state.set_vote_account_state(vote_account) } /// Update the node_pubkey, requires signature of the authorized voter pub fn update_validator_identity( - vote_account: &mut BorrowedAccount, + vote_account: &mut BorrowedInstructionAccount, node_pubkey: &Pubkey, signers: &HashSet, ) -> Result<(), InstructionError> { - let mut vote_state: VoteState = vote_account - .get_state::()? - .convert_to_current(); + let mut vote_state = + VoteStateHandler::deserialize_and_convert(vote_account, VoteStateTargetVersion::V3)?; // current authorized withdrawer must say "yay" - verify_authorized_signer(&vote_state.authorized_withdrawer, signers)?; + verify_authorized_signer(vote_state.authorized_withdrawer(), signers)?; // new node must say "yay" verify_authorized_signer(node_pubkey, signers)?; - vote_state.node_pubkey = *node_pubkey; + vote_state.set_node_pubkey(*node_pubkey); - set_vote_account_state(vote_account, vote_state) + vote_state.set_vote_account_state(vote_account) } /// Update the vote account's commission pub fn update_commission( - vote_account: &mut BorrowedAccount, + vote_account: &mut BorrowedInstructionAccount, commission: u8, signers: &HashSet, epoch_schedule: &EpochSchedule, clock: &Clock, ) -> Result<(), InstructionError> { - let vote_state_result = vote_account - .get_state::() - .map(|vote_state| vote_state.convert_to_current()); + let vote_state_result = + VoteStateHandler::deserialize_and_convert(vote_account, VoteStateTargetVersion::V3); let enforce_commission_update_rule = if let Ok(decoded_vote_state) = &vote_state_result { - is_commission_increase(decoded_vote_state, commission) + commission > decoded_vote_state.commission() } else { true }; @@ -770,15 +755,15 @@ pub fn update_commission( let mut vote_state = vote_state_result?; // current authorized withdrawer must say "yay" - verify_authorized_signer(&vote_state.authorized_withdrawer, signers)?; + verify_authorized_signer(vote_state.authorized_withdrawer(), signers)?; - vote_state.commission = commission; + vote_state.set_commission(commission); - set_vote_account_state(vote_account, vote_state) + vote_state.set_vote_account_state(vote_account) } /// Given a proposed new commission, returns true if this would be a commission increase, false otherwise -pub fn is_commission_increase(vote_state: &VoteState, commission: u8) -> bool { +pub fn is_commission_increase(vote_state: &VoteStateV3, commission: u8) -> bool { commission > vote_state.commission } @@ -811,7 +796,6 @@ fn verify_authorized_signer( /// Withdraw funds from the vote account pub fn withdraw( - transaction_context: &TransactionContext, instruction_context: &InstructionContext, vote_account_index: IndexOfAccount, lamports: u64, @@ -820,13 +804,12 @@ pub fn withdraw( rent_sysvar: &Rent, clock: &Clock, ) -> Result<(), InstructionError> { - let mut vote_account = instruction_context - .try_borrow_instruction_account(transaction_context, vote_account_index)?; - let vote_state: VoteState = vote_account - .get_state::()? - .convert_to_current(); + let mut vote_account = + instruction_context.try_borrow_instruction_account(vote_account_index)?; + let vote_state = + VoteStateHandler::deserialize_and_convert(&vote_account, VoteStateTargetVersion::V3)?; - verify_authorized_signer(&vote_state.authorized_withdrawer, signers)?; + verify_authorized_signer(vote_state.authorized_withdrawer(), signers)?; let remaining_balance = vote_account .get_lamports() @@ -835,7 +818,7 @@ pub fn withdraw( if remaining_balance == 0 { let reject_active_vote_account_close = vote_state - .epoch_credits + .epoch_credits() .last() .map(|(last_epoch_with_credits, _, _)| { let current_epoch = clock.epoch; @@ -850,7 +833,10 @@ pub fn withdraw( return Err(VoteError::ActiveVoteAccountClose.into()); } else { // Deinitialize upon zero-balance - set_vote_account_state(&mut vote_account, VoteState::default())?; + VoteStateHandler::deinitialize_vote_account_state( + &mut vote_account, + VoteStateTargetVersion::V3, + )?; } } else { let min_rent_exempt_balance = rent_sysvar.minimum_balance(vote_account.get_data().len()); @@ -861,8 +847,7 @@ pub fn withdraw( vote_account.checked_sub_lamports(lamports)?; drop(vote_account); - let mut to_account = instruction_context - .try_borrow_instruction_account(transaction_context, to_account_index)?; + let mut to_account = instruction_context.try_borrow_instruction_account(to_account_index)?; to_account.checked_add_lamports(lamports)?; Ok(()) } @@ -871,14 +856,12 @@ pub fn withdraw( /// Assumes that the account is being init as part of a account creation or balance transfer and /// that the transaction must be signed by the staker's keys pub fn initialize_account( - vote_account: &mut BorrowedAccount, + vote_account: &mut BorrowedInstructionAccount, vote_init: &VoteInit, signers: &HashSet, clock: &Clock, ) -> Result<(), InstructionError> { - if vote_account.get_data().len() != VoteStateVersions::vote_state_size_of(true) { - return Err(InstructionError::InvalidAccountData); - } + VoteStateHandler::check_vote_account_length(vote_account, VoteStateTargetVersion::V3)?; let versioned = vote_account.get_state::()?; if !versioned.is_uninitialized() { @@ -888,21 +871,26 @@ pub fn initialize_account( // node must agree to accept this vote account verify_authorized_signer(&vote_init.node_pubkey, signers)?; - set_vote_account_state(vote_account, VoteState::new(vote_init, clock)) + VoteStateHandler::init_vote_account_state( + vote_account, + vote_init, + clock, + VoteStateTargetVersion::V3, + ) } -fn verify_and_get_vote_state( - vote_account: &BorrowedAccount, +fn verify_and_get_vote_state_handler( + vote_account: &BorrowedInstructionAccount, clock: &Clock, signers: &HashSet, -) -> Result { - let versioned = vote_account.get_state::()?; +) -> Result { + let mut vote_state = + VoteStateHandler::deserialize_and_convert(vote_account, VoteStateTargetVersion::V3)?; - if versioned.is_uninitialized() { + if vote_state.is_uninitialized() { return Err(InstructionError::UninitializedAccount); } - let mut vote_state = versioned.convert_to_current(); let authorized_voter = vote_state.get_and_update_authorized_voter(clock.epoch)?; verify_authorized_signer(&authorized_voter, signers)?; @@ -910,13 +898,13 @@ fn verify_and_get_vote_state( } pub fn process_vote_with_account( - vote_account: &mut BorrowedAccount, + vote_account: &mut BorrowedInstructionAccount, slot_hashes: &[SlotHash], clock: &Clock, vote: &Vote, signers: &HashSet, ) -> Result<(), InstructionError> { - let mut vote_state = verify_and_get_vote_state(vote_account, clock, signers)?; + let mut vote_state = verify_and_get_vote_state_handler(vote_account, clock, signers)?; process_vote(&mut vote_state, vote, slot_hashes, clock.epoch, clock.slot)?; if let Some(timestamp) = vote.timestamp { @@ -926,17 +914,17 @@ pub fn process_vote_with_account( .ok_or(VoteError::EmptySlots) .and_then(|slot| vote_state.process_timestamp(*slot, timestamp))?; } - set_vote_account_state(vote_account, vote_state) + vote_state.set_vote_account_state(vote_account) } pub fn process_vote_state_update( - vote_account: &mut BorrowedAccount, + vote_account: &mut BorrowedInstructionAccount, slot_hashes: &[SlotHash], clock: &Clock, vote_state_update: VoteStateUpdate, signers: &HashSet, ) -> Result<(), InstructionError> { - let mut vote_state = verify_and_get_vote_state(vote_account, clock, signers)?; + let mut vote_state = verify_and_get_vote_state_handler(vote_account, clock, signers)?; do_process_vote_state_update( &mut vote_state, slot_hashes, @@ -944,11 +932,11 @@ pub fn process_vote_state_update( clock.slot, vote_state_update, )?; - set_vote_account_state(vote_account, vote_state) + vote_state.set_vote_account_state(vote_account) } pub fn do_process_vote_state_update( - vote_state: &mut VoteState, + vote_state: &mut VoteStateHandler, slot_hashes: &[SlotHash], epoch: u64, slot: u64, @@ -976,13 +964,13 @@ pub fn do_process_vote_state_update( } pub fn process_tower_sync( - vote_account: &mut BorrowedAccount, + vote_account: &mut BorrowedInstructionAccount, slot_hashes: &[SlotHash], clock: &Clock, tower_sync: TowerSync, signers: &HashSet, ) -> Result<(), InstructionError> { - let mut vote_state = verify_and_get_vote_state(vote_account, clock, signers)?; + let mut vote_state = verify_and_get_vote_state_handler(vote_account, clock, signers)?; do_process_tower_sync( &mut vote_state, slot_hashes, @@ -990,11 +978,11 @@ pub fn process_tower_sync( clock.slot, tower_sync, )?; - set_vote_account_state(vote_account, vote_state) + vote_state.set_vote_account_state(vote_account) } fn do_process_tower_sync( - vote_state: &mut VoteState, + vote_state: &mut VoteStateHandler, slot_hashes: &[SlotHash], epoch: u64, slot: u64, @@ -1032,9 +1020,9 @@ pub fn create_account_with_authorized( commission: u8, lamports: u64, ) -> AccountSharedData { - let mut vote_account = AccountSharedData::new(lamports, VoteState::size_of(), &id()); + let mut vote_account = AccountSharedData::new(lamports, VoteStateV3::size_of(), &id()); - let vote_state = VoteState::new( + let vote_state = VoteStateV3::new( &VoteInit { node_pubkey: *node_pubkey, authorized_voter: *authorized_voter, @@ -1044,8 +1032,35 @@ pub fn create_account_with_authorized( &Clock::default(), ); - VoteState::serialize( - &VoteStateVersions::Current(Box::new(vote_state)), + VoteStateV3::serialize( + &VoteStateVersions::V3(Box::new(vote_state)), + vote_account.data_as_mut_slice(), + ) + .unwrap(); + + vote_account +} + +pub fn create_v4_account_with_authorized( + node_pubkey: &Pubkey, + authorized_voter: &Pubkey, + authorized_withdrawer: &Pubkey, + bls_pubkey_compressed: Option<[u8; BLS_PUBLIC_KEY_COMPRESSED_SIZE]>, + inflation_rewards_commission_bps: u16, + lamports: u64, +) -> AccountSharedData { + let mut vote_account = AccountSharedData::new(lamports, VoteStateV4::size_of(), &id()); + + let vote_state = handler::create_new_vote_state_v4_for_tests( + node_pubkey, + authorized_voter, + authorized_withdrawer, + bls_pubkey_compressed, + inflation_rewards_commission_bps, + ); + + VoteStateV4::serialize( + &VoteStateVersions::V4(Box::new(vote_state)), vote_account.data_as_mut_slice(), ) .unwrap(); @@ -1069,18 +1084,19 @@ mod tests { super::*, crate::vote_state, assert_matches::assert_matches, - solana_account::{state_traits::StateMut, AccountSharedData}, + solana_account::AccountSharedData, solana_clock::DEFAULT_SLOTS_PER_EPOCH, solana_sha256_hasher::hash, - solana_transaction_context::InstructionAccount, + solana_transaction_context::{InstructionAccount, TransactionContext}, + solana_vote_interface::authorized_voters::AuthorizedVoters, std::cell::RefCell, test_case::test_case, }; const MAX_RECENT_VOTES: usize = 16; - fn vote_state_new_for_test(auth_pubkey: &Pubkey) -> VoteState { - VoteState::new( + fn vote_state_new_for_test(auth_pubkey: &Pubkey) -> VoteStateHandler { + VoteStateHandler::new_v3(VoteStateV3::new( &VoteInit { node_pubkey: solana_pubkey::new_rand(), authorized_voter: *auth_pubkey, @@ -1088,12 +1104,12 @@ mod tests { commission: 0, }, &Clock::default(), - ) + )) } fn create_test_account() -> (Pubkey, RefCell) { let rent = Rent::default(); - let balance = VoteState::get_rent_exempt_reserve(&rent); + let balance = rent.minimum_balance(VoteStateV3::size_of()); let vote_pubkey = solana_pubkey::new_rand(); ( vote_pubkey, @@ -1106,13 +1122,21 @@ mod tests { ) } + fn get_credits(epoch_credits: &[(Epoch, u64, u64)]) -> u64 { + if epoch_credits.is_empty() { + 0 + } else { + epoch_credits.last().unwrap().1 + } + } + #[test] fn test_vote_state_upgrade_from_1_14_11() { // Create an initial vote account that is sized for the 1_14_11 version of vote state, and has only the // required lamports for rent exempt minimum at that size let node_pubkey = solana_pubkey::new_rand(); let withdrawer_pubkey = solana_pubkey::new_rand(); - let mut vote_state = VoteState::new( + let mut vote_state = VoteStateV3::new( &VoteInit { node_pubkey, authorized_voter: withdrawer_pubkey, @@ -1160,23 +1184,25 @@ mod tests { // Create a fake TransactionContext with a fake InstructionContext with a single account which is the // vote account that was just created let processor_account = AccountSharedData::new(0, 0, &solana_sdk_ids::native_loader::id()); - let transaction_context = TransactionContext::new( + let mut transaction_context = TransactionContext::new( vec![(id(), processor_account), (node_pubkey, vote_account)], rent.clone(), 0, 0, ); - let mut instruction_context = InstructionContext::default(); - instruction_context.configure( - vec![0], - vec![InstructionAccount::new(1, 0, false, true)], - &[], - ); + transaction_context + .configure_next_instruction_for_tests( + 0, + vec![InstructionAccount::new(1, false, true)], + &[], + ) + .unwrap(); + let instruction_context = transaction_context.get_next_instruction_context().unwrap(); // Get the BorrowedAccount from the InstructionContext which is what is used to manipulate and inspect account // state let mut borrowed_account = instruction_context - .try_borrow_instruction_account(&transaction_context, 0) + .try_borrow_instruction_account(0) .unwrap(); // Ensure that the vote state started out at 1_14_11 @@ -1184,7 +1210,7 @@ mod tests { assert_matches!(vote_state_version, VoteStateVersions::V1_14_11(_)); // Convert the vote state to current as would occur during vote instructions - let converted_vote_state = vote_state_version.convert_to_current(); + let converted_vote_state = VoteStateV3::deserialize(borrowed_account.get_data()).unwrap(); // Check to make sure that the vote_state is unchanged assert!(vote_state == converted_vote_state); @@ -1194,14 +1220,15 @@ mod tests { // Now re-set the vote account state; because the feature is not enabled, the old 1_14_11 format should be // written out assert_eq!( - set_vote_account_state(&mut borrowed_account, vote_state.clone()), + VoteStateHandler::new_v3(vote_state.clone()) + .set_vote_account_state(&mut borrowed_account), Ok(()) ); let vote_state_version = borrowed_account.get_state::().unwrap(); assert_matches!(vote_state_version, VoteStateVersions::V1_14_11(_)); // Convert the vote state to current as would occur during vote instructions - let converted_vote_state = vote_state_version.convert_to_current(); + let converted_vote_state = VoteStateV3::deserialize(borrowed_account.get_data()).unwrap(); // Check to make sure that the vote_state is unchanged assert_eq!(vote_state, converted_vote_state); @@ -1211,14 +1238,15 @@ mod tests { // Test that if the vote account does not have sufficient lamports to realloc, // the old vote state is written out assert_eq!( - set_vote_account_state(&mut borrowed_account, vote_state.clone()), + VoteStateHandler::new_v3(vote_state.clone()) + .set_vote_account_state(&mut borrowed_account), Ok(()) ); let vote_state_version = borrowed_account.get_state::().unwrap(); assert_matches!(vote_state_version, VoteStateVersions::V1_14_11(_)); // Convert the vote state to current as would occur during vote instructions - let converted_vote_state = vote_state_version.convert_to_current(); + let converted_vote_state = VoteStateV3::deserialize(borrowed_account.get_data()).unwrap(); // Check to make sure that the vote_state is unchanged assert_eq!(vote_state, converted_vote_state); @@ -1228,18 +1256,19 @@ mod tests { // Test that when the feature is enabled, if the vote account does have sufficient lamports, the // new vote state is written out assert_eq!( - borrowed_account.set_lamports(rent.minimum_balance(VoteState::size_of()),), + borrowed_account.set_lamports(rent.minimum_balance(VoteStateV3::size_of())), Ok(()) ); assert_eq!( - set_vote_account_state(&mut borrowed_account, vote_state.clone()), + VoteStateHandler::new_v3(vote_state.clone()) + .set_vote_account_state(&mut borrowed_account), Ok(()) ); let vote_state_version = borrowed_account.get_state::().unwrap(); - assert_matches!(vote_state_version, VoteStateVersions::Current(_)); + assert_matches!(vote_state_version, VoteStateVersions::V3(_)); // Convert the vote state to current as would occur during vote instructions - let converted_vote_state = vote_state_version.convert_to_current(); + let converted_vote_state = VoteStateV3::deserialize(borrowed_account.get_data()).unwrap(); // Check to make sure that the vote_state is unchanged assert_eq!(vote_state, converted_vote_state); @@ -1249,38 +1278,36 @@ mod tests { fn test_vote_lockout() { let (_vote_pubkey, vote_account) = create_test_account(); - let mut vote_state: VoteState = - StateMut::::state(&*vote_account.borrow()) - .unwrap() - .convert_to_current(); + let vote_state_v3 = VoteStateV3::deserialize(vote_account.borrow().data()).unwrap(); + let mut vote_state = VoteStateHandler::new_v3(vote_state_v3); for i in 0..(MAX_LOCKOUT_HISTORY + 1) { process_slot_vote_unchecked(&mut vote_state, (INITIAL_LOCKOUT * i) as u64); } // The last vote should have been popped b/c it reached a depth of MAX_LOCKOUT_HISTORY - assert_eq!(vote_state.votes.len(), MAX_LOCKOUT_HISTORY); - assert_eq!(vote_state.root_slot, Some(0)); + assert_eq!(vote_state.votes().len(), MAX_LOCKOUT_HISTORY); + assert_eq!(vote_state.root_slot(), Some(0)); check_lockouts(&vote_state); // One more vote that confirms the entire stack, // the root_slot should change to the // second vote - let top_vote = vote_state.votes.front().unwrap().slot(); + let top_vote = vote_state.votes().front().unwrap().slot(); let slot = vote_state.last_lockout().unwrap().last_locked_out_slot(); process_slot_vote_unchecked(&mut vote_state, slot); - assert_eq!(Some(top_vote), vote_state.root_slot); + assert_eq!(Some(top_vote), vote_state.root_slot()); // Expire everything except the first vote let slot = vote_state - .votes + .votes() .front() .unwrap() .lockout .last_locked_out_slot(); process_slot_vote_unchecked(&mut vote_state, slot); // First vote and new vote are both stored for a total of 2 votes - assert_eq!(vote_state.votes.len(), 2); + assert_eq!(vote_state.votes().len(), 2); } #[test] @@ -1288,7 +1315,7 @@ mod tests { let node_pubkey = Pubkey::new_unique(); let withdrawer_pubkey = Pubkey::new_unique(); let clock = Clock::default(); - let vote_state = VoteState::new( + let vote_state = VoteStateV3::new( &VoteInit { node_pubkey, authorized_voter: withdrawer_pubkey, @@ -1299,7 +1326,7 @@ mod tests { ); let serialized = - bincode::serialize(&VoteStateVersions::Current(Box::new(vote_state.clone()))).unwrap(); + bincode::serialize(&VoteStateVersions::V3(Box::new(vote_state.clone()))).unwrap(); let serialized_len = serialized.len(); let rent = Rent::default(); let lamports = rent.minimum_balance(serialized_len); @@ -1309,23 +1336,25 @@ mod tests { // Create a fake TransactionContext with a fake InstructionContext with a single account which is the // vote account that was just created let processor_account = AccountSharedData::new(0, 0, &solana_sdk_ids::native_loader::id()); - let transaction_context = TransactionContext::new( + let mut transaction_context = TransactionContext::new( vec![(id(), processor_account), (node_pubkey, vote_account)], rent, 0, 0, ); - let mut instruction_context = InstructionContext::default(); - instruction_context.configure( - vec![0], - vec![InstructionAccount::new(1, 0, false, true)], - &[], - ); + transaction_context + .configure_next_instruction_for_tests( + 0, + vec![InstructionAccount::new(1, false, true)], + &[], + ) + .unwrap(); + let instruction_context = transaction_context.get_next_instruction_context().unwrap(); // Get the BorrowedAccount from the InstructionContext which is what is used to manipulate and inspect account // state let mut borrowed_account = instruction_context - .try_borrow_instruction_account(&transaction_context, 0) + .try_borrow_instruction_account(0) .unwrap(); let epoch_schedule = std::sync::Arc::new(EpochSchedule::without_warmup()); @@ -1344,10 +1373,8 @@ mod tests { // Increase commission in first half of epoch -- allowed assert_eq!( - borrowed_account - .get_state::() + VoteStateV3::deserialize(borrowed_account.get_data()) .unwrap() - .convert_to_current() .commission, 10 ); @@ -1362,10 +1389,8 @@ mod tests { Ok(()) ); assert_eq!( - borrowed_account - .get_state::() + VoteStateV3::deserialize(borrowed_account.get_data()) .unwrap() - .convert_to_current() .commission, 11 ); @@ -1382,10 +1407,8 @@ mod tests { Err(_) ); assert_eq!( - borrowed_account - .get_state::() + VoteStateV3::deserialize(borrowed_account.get_data()) .unwrap() - .convert_to_current() .commission, 11 ); @@ -1402,19 +1425,15 @@ mod tests { Ok(()) ); assert_eq!( - borrowed_account - .get_state::() + VoteStateV3::deserialize(borrowed_account.get_data()) .unwrap() - .convert_to_current() .commission, 10 ); assert_eq!( - borrowed_account - .get_state::() + VoteStateV3::deserialize(borrowed_account.get_data()) .unwrap() - .convert_to_current() .commission, 10 ); @@ -1430,10 +1449,8 @@ mod tests { Ok(()) ); assert_eq!( - borrowed_account - .get_state::() + VoteStateV3::deserialize(borrowed_account.get_data()) .unwrap() - .convert_to_current() .commission, 9 ); @@ -1476,26 +1493,27 @@ mod tests { process_slot_vote_unchecked(&mut vote_state, i as u64); } - assert_eq!(vote_state.votes[0].confirmation_count(), 3); + assert_eq!(vote_state.votes()[0].confirmation_count(), 3); // Expire the second and third votes - let expire_slot = vote_state.votes[1].slot() + vote_state.votes[1].lockout.lockout() + 1; + let expire_slot = + vote_state.votes()[1].slot() + vote_state.votes()[1].lockout.lockout() + 1; process_slot_vote_unchecked(&mut vote_state, expire_slot); - assert_eq!(vote_state.votes.len(), 2); + assert_eq!(vote_state.votes().len(), 2); // Check that the old votes expired - assert_eq!(vote_state.votes[0].slot(), 0); - assert_eq!(vote_state.votes[1].slot(), expire_slot); + assert_eq!(vote_state.votes()[0].slot(), 0); + assert_eq!(vote_state.votes()[1].slot(), expire_slot); // Process one more vote process_slot_vote_unchecked(&mut vote_state, expire_slot + 1); // Confirmation count for the older first vote should remain unchanged - assert_eq!(vote_state.votes[0].confirmation_count(), 3); + assert_eq!(vote_state.votes()[0].confirmation_count(), 3); // The later votes should still have increasing confirmation counts - assert_eq!(vote_state.votes[1].confirmation_count(), 2); - assert_eq!(vote_state.votes[2].confirmation_count(), 1); + assert_eq!(vote_state.votes()[1].confirmation_count(), 2); + assert_eq!(vote_state.votes()[2].confirmation_count(), 1); } #[test] @@ -1507,14 +1525,14 @@ mod tests { process_slot_vote_unchecked(&mut vote_state, i as u64); } - assert_eq!(vote_state.credits(), 0); + assert_eq!(get_credits(vote_state.epoch_credits()), 0); process_slot_vote_unchecked(&mut vote_state, MAX_LOCKOUT_HISTORY as u64 + 1); - assert_eq!(vote_state.credits(), 1); + assert_eq!(get_credits(vote_state.epoch_credits()), 1); process_slot_vote_unchecked(&mut vote_state, MAX_LOCKOUT_HISTORY as u64 + 2); - assert_eq!(vote_state.credits(), 2); + assert_eq!(get_credits(vote_state.epoch_credits()), 2); process_slot_vote_unchecked(&mut vote_state, MAX_LOCKOUT_HISTORY as u64 + 3); - assert_eq!(vote_state.credits(), 3); + assert_eq!(get_credits(vote_state.epoch_credits()), 3); } #[test] @@ -1545,13 +1563,13 @@ mod tests { assert!(vote_state.nth_recent_lockout(MAX_LOCKOUT_HISTORY).is_none()); } - fn check_lockouts(vote_state: &VoteState) { - for (i, vote) in vote_state.votes.iter().enumerate() { - let num_votes = vote_state - .votes + fn check_lockouts(vote_state: &VoteStateHandler) { + let votes = vote_state.votes(); + for (i, vote) in votes.iter().enumerate() { + let num_votes = votes .len() .checked_sub(i) - .expect("`i` is less than `vote_state.votes.len()`"); + .expect("`i` is less than `vote_state.votes().len()`"); assert_eq!( vote.lockout.lockout(), INITIAL_LOCKOUT.pow(num_votes as u32) as u64 @@ -1559,15 +1577,11 @@ mod tests { } } - fn recent_votes(vote_state: &VoteState) -> Vec { - let start = vote_state.votes.len().saturating_sub(MAX_RECENT_VOTES); - (start..vote_state.votes.len()) - .map(|i| { - Vote::new( - vec![vote_state.votes.get(i).unwrap().slot()], - Hash::default(), - ) - }) + fn recent_votes(vote_state: &VoteStateHandler) -> Vec { + let votes = vote_state.votes(); + let start = votes.len().saturating_sub(MAX_RECENT_VOTES); + (start..votes.len()) + .map(|i| Vote::new(vec![votes.get(i).unwrap().slot()], Hash::default())) .collect() } @@ -1601,7 +1615,7 @@ mod tests { #[test] fn test_process_vote_skips_old_vote() { - let mut vote_state = VoteState::default(); + let mut vote_state = VoteStateHandler::default_v3(); let vote = Vote::new(vec![0], Hash::default()); let slot_hashes: Vec<_> = vec![(0, vote.hash)]; @@ -1619,7 +1633,7 @@ mod tests { #[test] fn test_check_slots_are_valid_vote_empty_slot_hashes() { - let vote_state = VoteState::default(); + let vote_state = VoteStateHandler::default_v3(); let vote = Vote::new(vec![0], Hash::default()); assert_eq!( @@ -1630,7 +1644,7 @@ mod tests { #[test] fn test_check_slots_are_valid_new_vote() { - let vote_state = VoteState::default(); + let vote_state = VoteStateHandler::default_v3(); let vote = Vote::new(vec![0], Hash::default()); let slot_hashes: Vec<_> = vec![(*vote.slots.last().unwrap(), vote.hash)]; @@ -1642,7 +1656,7 @@ mod tests { #[test] fn test_check_slots_are_valid_bad_hash() { - let vote_state = VoteState::default(); + let vote_state = VoteStateHandler::default_v3(); let vote = Vote::new(vec![0], Hash::default()); let slot_hashes: Vec<_> = vec![(*vote.slots.last().unwrap(), hash(vote.hash.as_ref()))]; @@ -1654,7 +1668,7 @@ mod tests { #[test] fn test_check_slots_are_valid_bad_slot() { - let vote_state = VoteState::default(); + let vote_state = VoteStateHandler::default_v3(); let vote = Vote::new(vec![1], Hash::default()); let slot_hashes: Vec<_> = vec![(0, vote.hash)]; @@ -1666,7 +1680,7 @@ mod tests { #[test] fn test_check_slots_are_valid_duplicate_vote() { - let mut vote_state = VoteState::default(); + let mut vote_state = VoteStateHandler::default_v3(); let vote = Vote::new(vec![0], Hash::default()); let slot_hashes: Vec<_> = vec![(*vote.slots.last().unwrap(), vote.hash)]; @@ -1682,7 +1696,7 @@ mod tests { #[test] fn test_check_slots_are_valid_next_vote() { - let mut vote_state = VoteState::default(); + let mut vote_state = VoteStateHandler::default_v3(); let vote = Vote::new(vec![0], Hash::default()); let slot_hashes: Vec<_> = vec![(*vote.slots.last().unwrap(), vote.hash)]; @@ -1701,7 +1715,7 @@ mod tests { #[test] fn test_check_slots_are_valid_next_vote_only() { - let mut vote_state = VoteState::default(); + let mut vote_state = VoteStateHandler::default_v3(); let vote = Vote::new(vec![0], Hash::default()); let slot_hashes: Vec<_> = vec![(*vote.slots.last().unwrap(), vote.hash)]; @@ -1719,7 +1733,7 @@ mod tests { } #[test] fn test_process_vote_empty_slots() { - let mut vote_state = VoteState::default(); + let mut vote_state = VoteStateHandler::default_v3(); let vote = Vote::new(vec![], Hash::default()); assert_eq!( @@ -1729,7 +1743,7 @@ mod tests { } pub fn process_new_vote_state_from_lockouts( - vote_state: &mut VoteState, + vote_state: &mut VoteStateHandler, new_state: VecDeque, new_root: Option, timestamp: Option, @@ -1748,8 +1762,9 @@ mod tests { // Test vote credit updates after "one credit per slot" feature is enabled #[test] fn test_vote_state_update_increment_credits() { - // Create a new Votestate - let mut vote_state = VoteState::new(&VoteInit::default(), &Clock::default()); + // Create a new VoteStateV3 handler + let mut vote_state = + VoteStateHandler::new_v3(VoteStateV3::new(&VoteInit::default(), &Clock::default())); // Test data: a sequence of groups of votes to simulate having been cast, after each group a vote // state update is compared to "normal" vote processing to ensure that credits are earned equally @@ -1807,8 +1822,8 @@ mod tests { assert_eq!( process_new_vote_state( &mut vote_state, - vote_state_after_vote.votes, - vote_state_after_vote.root_slot, + vote_state_after_vote.votes().clone(), + vote_state_after_vote.root_slot(), None, 0, 0, @@ -1818,8 +1833,8 @@ mod tests { // And ensure that the credits earned were the same assert_eq!( - vote_state.epoch_credits, - vote_state_after_vote.epoch_credits + vote_state.epoch_credits(), + vote_state_after_vote.epoch_credits() ); } } @@ -1994,10 +2009,12 @@ mod tests { // For each vote group, process all vote groups leading up to it and it itself, and ensure that the number of // credits earned is correct for both regular votes and vote state updates for i in 0..test_vote_groups.len() { - // Create a new VoteState for vote transaction - let mut vote_state_1 = VoteState::new(&VoteInit::default(), &Clock::default()); - // Create a new VoteState for vote state update transaction - let mut vote_state_2 = VoteState::new(&VoteInit::default(), &Clock::default()); + // Create a new VoteStateV3 for vote transaction + let mut vote_state_1 = + VoteStateHandler::new_v3(VoteStateV3::new(&VoteInit::default(), &Clock::default())); + // Create a new VoteStateV3 for vote state update transaction + let mut vote_state_2 = + VoteStateHandler::new_v3(VoteStateV3::new(&VoteInit::default(), &Clock::default())); test_vote_groups.iter().take(i + 1).for_each(|vote_group| { let vote = Vote { slots: vote_group.0.clone(), //vote_group.0 is the set of slots to cast votes on @@ -2020,8 +2037,8 @@ mod tests { assert_eq!( process_new_vote_state( &mut vote_state_2, - vote_state_1.votes.clone(), - vote_state_1.root_slot, + vote_state_1.votes().clone(), + vote_state_1.root_slot(), None, 0, vote_group.1, // vote_group.1 is the slot in which the vote was cast @@ -2032,8 +2049,14 @@ mod tests { // Ensure that the credits earned is correct for both vote states let vote_group = &test_vote_groups[i]; - assert_eq!(vote_state_1.credits(), vote_group.2 as u64); // vote_group.2 is the expected number of credits - assert_eq!(vote_state_2.credits(), vote_group.2 as u64); // vote_group.2 is the expected number of credits + assert_eq!( + get_credits(vote_state_1.epoch_credits()), + vote_group.2 as u64 + ); // vote_group.2 is the expected number of credits + assert_eq!( + get_credits(vote_state_2.epoch_credits()), + vote_group.2 as u64 + ); // vote_group.2 is the expected number of credits } } @@ -2120,7 +2143,8 @@ mod tests { ]; // Initial vote state - let mut vote_state = VoteState::new(&VoteInit::default(), &Clock::default()); + let mut vote_state = + VoteStateHandler::new_v3(VoteStateV3::new(&VoteInit::default(), &Clock::default())); // Process the vote state updates in sequence and ensure that the credits earned after each is processed is // correct @@ -2148,13 +2172,16 @@ mod tests { ); // Ensure that the credits earned is correct - assert_eq!(vote_state.credits(), proposed_vote_state.3 as u64); + assert_eq!( + get_credits(vote_state.epoch_credits()), + proposed_vote_state.3 as u64 + ); }); } #[test] fn test_process_new_vote_too_many_votes() { - let mut vote_state1 = VoteState::default(); + let mut vote_state1 = VoteStateHandler::default_v3(); let bad_votes: VecDeque = (0..=MAX_LOCKOUT_HISTORY) .map(|slot| { Lockout::new_with_confirmation_count( @@ -2179,11 +2206,11 @@ mod tests { #[test] fn test_process_new_vote_state_root_rollback() { - let mut vote_state1 = VoteState::default(); + let mut vote_state1 = VoteStateHandler::default_v3(); for i in 0..MAX_LOCKOUT_HISTORY + 2 { process_slot_vote_unchecked(&mut vote_state1, i as Slot); } - assert_eq!(vote_state1.root_slot.unwrap(), 1); + assert_eq!(vote_state1.root_slot().unwrap(), 1); // Update vote_state2 with a higher slot so that `process_new_vote_state` // doesn't panic. @@ -2197,7 +2224,7 @@ mod tests { assert_eq!( process_new_vote_state( &mut vote_state1, - vote_state2.votes.clone(), + vote_state2.votes().clone(), lesser_root, None, current_epoch, @@ -2211,7 +2238,7 @@ mod tests { assert_eq!( process_new_vote_state( &mut vote_state1, - vote_state2.votes.clone(), + vote_state2.votes().clone(), none_root, None, current_epoch, @@ -2223,7 +2250,7 @@ mod tests { #[test] fn test_process_new_vote_state_zero_confirmations() { - let mut vote_state1 = VoteState::default(); + let mut vote_state1 = VoteStateHandler::default_v3(); let current_epoch = vote_state1.current_epoch(); let bad_votes: VecDeque = vec![ @@ -2263,7 +2290,7 @@ mod tests { #[test] fn test_process_new_vote_state_confirmations_too_large() { - let mut vote_state1 = VoteState::default(); + let mut vote_state1 = VoteStateHandler::default_v3(); let current_epoch = vote_state1.current_epoch(); let good_votes: VecDeque = vec![Lockout::new_with_confirmation_count( @@ -2282,7 +2309,7 @@ mod tests { ) .unwrap(); - let mut vote_state1 = VoteState::default(); + let mut vote_state1 = VoteStateHandler::default_v3(); let bad_votes: VecDeque = vec![Lockout::new_with_confirmation_count( 0, MAX_LOCKOUT_HISTORY as u32 + 1, @@ -2303,7 +2330,7 @@ mod tests { #[test] fn test_process_new_vote_state_slot_smaller_than_root() { - let mut vote_state1 = VoteState::default(); + let mut vote_state1 = VoteStateHandler::default_v3(); let current_epoch = vote_state1.current_epoch(); let root_slot = 5; @@ -2344,7 +2371,7 @@ mod tests { #[test] fn test_process_new_vote_state_slots_not_ordered() { - let mut vote_state1 = VoteState::default(); + let mut vote_state1 = VoteStateHandler::default_v3(); let current_epoch = vote_state1.current_epoch(); let bad_votes: VecDeque = vec![ @@ -2384,7 +2411,7 @@ mod tests { #[test] fn test_process_new_vote_state_confirmations_not_ordered() { - let mut vote_state1 = VoteState::default(); + let mut vote_state1 = VoteStateHandler::default_v3(); let current_epoch = vote_state1.current_epoch(); let bad_votes: VecDeque = vec![ @@ -2424,7 +2451,7 @@ mod tests { #[test] fn test_process_new_vote_state_new_vote_state_lockout_mismatch() { - let mut vote_state1 = VoteState::default(); + let mut vote_state1 = VoteStateHandler::default_v3(); let current_epoch = vote_state1.current_epoch(); let bad_votes: VecDeque = vec![ @@ -2449,7 +2476,7 @@ mod tests { #[test] fn test_process_new_vote_state_confirmation_rollback() { - let mut vote_state1 = VoteState::default(); + let mut vote_state1 = VoteStateHandler::default_v3(); let current_epoch = vote_state1.current_epoch(); let votes: VecDeque = vec![ Lockout::new_with_confirmation_count(0, 4), @@ -2484,12 +2511,12 @@ mod tests { #[test] fn test_process_new_vote_state_root_progress() { - let mut vote_state1 = VoteState::default(); + let mut vote_state1 = VoteStateHandler::default_v3(); for i in 0..MAX_LOCKOUT_HISTORY { process_slot_vote_unchecked(&mut vote_state1, i as u64); } - assert!(vote_state1.root_slot.is_none()); + assert!(vote_state1.root_slot().is_none()); let mut vote_state2 = vote_state1.clone(); // 1) Try to update `vote_state1` with no root, @@ -2500,12 +2527,12 @@ mod tests { // should succeed. for new_vote in MAX_LOCKOUT_HISTORY + 1..=MAX_LOCKOUT_HISTORY + 2 { process_slot_vote_unchecked(&mut vote_state2, new_vote as Slot); - assert_ne!(vote_state1.root_slot, vote_state2.root_slot); + assert_ne!(vote_state1.root_slot(), vote_state2.root_slot()); process_new_vote_state( &mut vote_state1, - vote_state2.votes.clone(), - vote_state2.root_slot, + vote_state2.votes().clone(), + vote_state2.root_slot(), None, vote_state2.current_epoch(), 0, @@ -2536,11 +2563,11 @@ mod tests { // will immediately pop off 2. // Construct on-chain vote state - let mut vote_state1 = VoteState::default(); + let mut vote_state1 = VoteStateHandler::default_v3(); process_slot_votes_unchecked(&mut vote_state1, &[1, 2, 5]); assert_eq!( vote_state1 - .votes + .votes() .iter() .map(|vote| vote.slot()) .collect::>(), @@ -2548,11 +2575,11 @@ mod tests { ); // Construct local tower state - let mut vote_state2 = VoteState::default(); + let mut vote_state2 = VoteStateHandler::default_v3(); process_slot_votes_unchecked(&mut vote_state2, &[1, 2, 3, 5, 7]); assert_eq!( vote_state2 - .votes + .votes() .iter() .map(|vote| vote.slot()) .collect::>(), @@ -2562,8 +2589,8 @@ mod tests { // See that on-chain vote state can update properly process_new_vote_state( &mut vote_state1, - vote_state2.votes.clone(), - vote_state2.root_slot, + vote_state2.votes().clone(), + vote_state2.root_slot(), None, vote_state2.current_epoch(), 0, @@ -2576,11 +2603,11 @@ mod tests { #[test] fn test_process_new_vote_state_lockout_violation() { // Construct on-chain vote state - let mut vote_state1 = VoteState::default(); + let mut vote_state1 = VoteStateHandler::default_v3(); process_slot_votes_unchecked(&mut vote_state1, &[1, 2, 4, 5]); assert_eq!( vote_state1 - .votes + .votes() .iter() .map(|vote| vote.slot()) .collect::>(), @@ -2589,11 +2616,11 @@ mod tests { // Construct conflicting tower state. Vote 4 is missing, // but 5 should not have popped off vote 4. - let mut vote_state2 = VoteState::default(); + let mut vote_state2 = VoteStateHandler::default_v3(); process_slot_votes_unchecked(&mut vote_state2, &[1, 2, 3, 5, 7]); assert_eq!( vote_state2 - .votes + .votes() .iter() .map(|vote| vote.slot()) .collect::>(), @@ -2604,8 +2631,8 @@ mod tests { assert_eq!( process_new_vote_state( &mut vote_state1, - vote_state2.votes.clone(), - vote_state2.root_slot, + vote_state2.votes().clone(), + vote_state2.root_slot(), None, vote_state2.current_epoch(), 0, @@ -2617,11 +2644,11 @@ mod tests { #[test] fn test_process_new_vote_state_lockout_violation2() { // Construct on-chain vote state - let mut vote_state1 = VoteState::default(); + let mut vote_state1 = VoteStateHandler::default_v3(); process_slot_votes_unchecked(&mut vote_state1, &[1, 2, 5, 6, 7]); assert_eq!( vote_state1 - .votes + .votes() .iter() .map(|vote| vote.slot()) .collect::>(), @@ -2630,11 +2657,11 @@ mod tests { // Construct a new vote state. Violates on-chain state because 8 // should not have popped off 7 - let mut vote_state2 = VoteState::default(); + let mut vote_state2 = VoteStateHandler::default_v3(); process_slot_votes_unchecked(&mut vote_state2, &[1, 2, 3, 5, 6, 8]); assert_eq!( vote_state2 - .votes + .votes() .iter() .map(|vote| vote.slot()) .collect::>(), @@ -2646,8 +2673,8 @@ mod tests { assert_eq!( process_new_vote_state( &mut vote_state1, - vote_state2.votes.clone(), - vote_state2.root_slot, + vote_state2.votes().clone(), + vote_state2.root_slot(), None, vote_state2.current_epoch(), 0, @@ -2659,11 +2686,11 @@ mod tests { #[test] fn test_process_new_vote_state_expired_ancestor_not_removed() { // Construct on-chain vote state - let mut vote_state1 = VoteState::default(); + let mut vote_state1 = VoteStateHandler::default_v3(); process_slot_votes_unchecked(&mut vote_state1, &[1, 2, 3, 9]); assert_eq!( vote_state1 - .votes + .votes() .iter() .map(|vote| vote.slot()) .collect::>(), @@ -2677,11 +2704,11 @@ mod tests { // Slot 1 has been expired by 10, but is kept alive by its descendant // 9 which has not been expired yet. - assert_eq!(vote_state2.votes[0].slot(), 1); - assert_eq!(vote_state2.votes[0].lockout.last_locked_out_slot(), 9); + assert_eq!(vote_state2.votes()[0].slot(), 1); + assert_eq!(vote_state2.votes()[0].lockout.last_locked_out_slot(), 9); assert_eq!( vote_state2 - .votes + .votes() .iter() .map(|vote| vote.slot()) .collect::>(), @@ -2691,8 +2718,8 @@ mod tests { // Should be able to update vote_state1 process_new_vote_state( &mut vote_state1, - vote_state2.votes.clone(), - vote_state2.root_slot, + vote_state2.votes().clone(), + vote_state2.root_slot(), None, vote_state2.current_epoch(), 0, @@ -2703,11 +2730,11 @@ mod tests { #[test] fn test_process_new_vote_current_state_contains_bigger_slots() { - let mut vote_state1 = VoteState::default(); + let mut vote_state1 = VoteStateHandler::default_v3(); process_slot_votes_unchecked(&mut vote_state1, &[6, 7, 8]); assert_eq!( vote_state1 - .votes + .votes() .iter() .map(|vote| vote.slot()) .collect::>(), @@ -2753,12 +2780,12 @@ mod tests { 0, ) .unwrap(); - assert_eq!(vote_state1.votes, good_votes); + assert_eq!(*vote_state1.votes(), good_votes); } #[test] fn test_filter_old_votes() { - let mut vote_state = VoteState::default(); + let mut vote_state = VoteStateHandler::default_v3(); let old_vote_slot = 1; let vote = Vote::new(vec![old_vote_slot], Hash::default()); @@ -2783,8 +2810,8 @@ mod tests { process_vote(&mut vote_state, &vote, &slot_hashes, 0, 0).unwrap(); assert_eq!( vote_state - .votes - .into_iter() + .votes() + .iter() .map(|vote| vote.lockout) .collect::>(), vec![Lockout::new_with_confirmation_count(vote_slot, 1)] @@ -2799,8 +2826,8 @@ mod tests { .collect() } - fn build_vote_state(vote_slots: Vec, slot_hashes: &[(Slot, Hash)]) -> VoteState { - let mut vote_state = VoteState::default(); + fn build_vote_state(vote_slots: Vec, slot_hashes: &[(Slot, Hash)]) -> VoteStateHandler { + let mut vote_state = VoteStateHandler::default_v3(); if !vote_slots.is_empty() { let vote_hash = slot_hashes @@ -2915,7 +2942,7 @@ mod tests { ); let mut vote_state = build_vote_state(current_vote_state_slots, &slot_hashes); - vote_state.root_slot = current_vote_state_root; + vote_state.set_root_slot(current_vote_state_root); slot_hashes.retain(|slot| slot.0 >= earliest_slot_in_history); assert!(!proposed_slots_and_lockouts.is_empty()); @@ -2946,11 +2973,11 @@ mod tests { assert!( do_process_tower_sync(&mut vote_state, &slot_hashes, 0, 0, tower_sync.clone(),).is_ok() ); - assert_eq!(vote_state.root_slot, expected_root); + assert_eq!(vote_state.root_slot(), expected_root); assert_eq!( vote_state - .votes - .into_iter() + .votes() + .iter() .map(|vote| vote.lockout) .collect::>(), expected_vote_state, @@ -3288,7 +3315,7 @@ mod tests { // Have to vote for a slot greater than the last vote in the vote state to avoid VoteTooOld // errors - let vote_slot = vote_state.votes.back().unwrap().slot() + 2; + let vote_slot = vote_state.votes().back().unwrap().slot() + 2; let vote_slot_hash = slot_hashes .iter() .find(|(slot, _hash)| *slot == vote_slot) @@ -3401,7 +3428,7 @@ mod tests { // Have to vote for a slot greater than the last vote in the vote state to avoid VoteTooOld // errors - let vote_slot = vote_state.votes.back().unwrap().slot() + 2; + let vote_slot = vote_state.votes().back().unwrap().slot() + 2; let vote_slot_hash = slot_hashes .iter() .find(|(slot, _hash)| *slot == vote_slot) @@ -3446,7 +3473,7 @@ mod tests { // Have to vote for a slot greater than the last vote in the vote state to avoid VoteTooOld // errors - let vote_slot = vote_state.votes.back().unwrap().slot() + 2; + let vote_slot = vote_state.votes().back().unwrap().slot() + 2; let vote_slot_hash = slot_hashes .iter() .find(|(slot, _hash)| *slot == vote_slot) @@ -3476,7 +3503,7 @@ mod tests { ] ); - // Because 6 from the original VoteState + // Because 6 from the original VoteStateV3 // should not have been popped off in the proposed state, // we should get a lockout conflict assert_eq!( @@ -3494,7 +3521,7 @@ mod tests { // Have to vote for a slot greater than the last vote in the vote state to avoid VoteTooOld // errors - let vote_slot = vote_state.votes.back().unwrap().slot() + 2; + let vote_slot = vote_state.votes().back().unwrap().slot() + 2; let vote_slot_hash = Hash::new_unique(); let mut tower_sync = TowerSync::from(vec![(2, 4), (4, 3), (6, 2), (vote_slot, 1)]); tower_sync.hash = vote_slot_hash; @@ -3550,4 +3577,40 @@ mod tests { expected_allowed ); } + + #[test] + fn test_create_v4_account_with_authorized() { + let node_pubkey = Pubkey::new_unique(); + let authorized_voter = Pubkey::new_unique(); + let authorized_withdrawer = Pubkey::new_unique(); + let bls_pubkey_compressed = [42; 48]; + let inflation_rewards_commission_bps = 10000; + let lamports = 100; + let vote_account = create_v4_account_with_authorized( + &node_pubkey, + &authorized_voter, + &authorized_withdrawer, + Some(bls_pubkey_compressed), + inflation_rewards_commission_bps, + lamports, + ); + assert_eq!(vote_account.lamports(), lamports); + assert_eq!(vote_account.owner(), &id()); + assert_eq!(vote_account.data().len(), VoteStateV4::size_of()); + let vote_state_v4 = VoteStateV4::deserialize(vote_account.data(), &node_pubkey).unwrap(); + assert_eq!(vote_state_v4.node_pubkey, node_pubkey); + assert_eq!( + vote_state_v4.authorized_voters, + AuthorizedVoters::new(0, authorized_voter) + ); + assert_eq!(vote_state_v4.authorized_withdrawer, authorized_withdrawer); + assert_eq!( + vote_state_v4.bls_pubkey_compressed, + Some(bls_pubkey_compressed) + ); + assert_eq!( + vote_state_v4.inflation_rewards_commission_bps, + inflation_rewards_commission_bps + ); + } } diff --git a/programs/zk-elgamal-proof/Cargo.toml b/programs/zk-elgamal-proof/Cargo.toml index 0e6ee2a63d7c3e..73004c715fa7d4 100644 --- a/programs/zk-elgamal-proof/Cargo.toml +++ b/programs/zk-elgamal-proof/Cargo.toml @@ -14,9 +14,9 @@ bytemuck = { workspace = true } num-derive = { workspace = true } num-traits = { workspace = true } solana-instruction = { workspace = true, features = ["std"] } -solana-log-collector = { workspace = true } solana-program-runtime = { workspace = true } solana-sdk-ids = { workspace = true } +solana-svm-log-collector = { workspace = true } solana-zk-sdk = { workspace = true } [dev-dependencies] diff --git a/programs/zk-elgamal-proof/src/lib.rs b/programs/zk-elgamal-proof/src/lib.rs index c6fb759ccc17b7..4ca5ac291b6771 100644 --- a/programs/zk-elgamal-proof/src/lib.rs +++ b/programs/zk-elgamal-proof/src/lib.rs @@ -3,9 +3,9 @@ use { bytemuck::Pod, solana_instruction::error::InstructionError, - solana_log_collector::ic_msg, solana_program_runtime::{declare_process_instruction, invoke_context::InvokeContext}, solana_sdk_ids::system_program, + solana_svm_log_collector::ic_msg, solana_zk_sdk::zk_elgamal_proof_program::{ id, instruction::ProofInstruction, @@ -45,8 +45,8 @@ where // if instruction data is exactly 5 bytes, then read proof from an account let context_data = if instruction_data.len() == INSTRUCTION_DATA_LENGTH_WITH_PROOF_ACCOUNT { - let proof_data_account = instruction_context - .try_borrow_instruction_account(transaction_context, accessed_accounts)?; + let proof_data_account = + instruction_context.try_borrow_instruction_account(accessed_accounts)?; accessed_accounts = accessed_accounts.checked_add(1).unwrap(); let proof_data_offset = u32::from_le_bytes( @@ -93,14 +93,11 @@ where // create context state if additional accounts are provided with the instruction if instruction_context.get_number_of_instruction_accounts() > accessed_accounts { let context_state_authority = *instruction_context - .try_borrow_instruction_account( - transaction_context, - accessed_accounts.checked_add(1).unwrap(), - )? + .try_borrow_instruction_account(accessed_accounts.checked_add(1).unwrap())? .get_key(); - let mut proof_context_account = instruction_context - .try_borrow_instruction_account(transaction_context, accessed_accounts)?; + let mut proof_context_account = + instruction_context.try_borrow_instruction_account(accessed_accounts)?; if *proof_context_account.get_owner() != id() { return Err(InstructionError::InvalidAccountOwner); @@ -131,27 +128,20 @@ fn process_close_proof_context(invoke_context: &mut InvokeContext) -> Result<(), let instruction_context = transaction_context.get_current_instruction_context()?; let owner_pubkey = { - let owner_account = - instruction_context.try_borrow_instruction_account(transaction_context, 2)?; - - if !owner_account.is_signer() { + if !instruction_context.is_instruction_account_signer(2)? { return Err(InstructionError::MissingRequiredSignature); } - *owner_account.get_key() - }; // done with `owner_account`, so drop it to prevent a potential double borrow - let proof_context_account_pubkey = *instruction_context - .try_borrow_instruction_account(transaction_context, 0)? - .get_key(); - let destination_account_pubkey = *instruction_context - .try_borrow_instruction_account(transaction_context, 1)? - .get_key(); + *instruction_context.get_key_of_instruction_account(2)? + }; + + let proof_context_account_pubkey = *instruction_context.get_key_of_instruction_account(0)?; + let destination_account_pubkey = *instruction_context.get_key_of_instruction_account(1)?; if proof_context_account_pubkey == destination_account_pubkey { return Err(InstructionError::InvalidInstructionData); } - let mut proof_context_account = - instruction_context.try_borrow_instruction_account(transaction_context, 0)?; + let mut proof_context_account = instruction_context.try_borrow_instruction_account(0)?; let proof_context_state_meta = ProofContextStateMeta::try_from_bytes(proof_context_account.get_data())?; let expected_owner_pubkey = proof_context_state_meta.context_state_authority; @@ -160,8 +150,7 @@ fn process_close_proof_context(invoke_context: &mut InvokeContext) -> Result<(), return Err(InstructionError::InvalidAccountOwner); } - let mut destination_account = - instruction_context.try_borrow_instruction_account(transaction_context, 1)?; + let mut destination_account = instruction_context.try_borrow_instruction_account(1)?; destination_account.checked_add_lamports(proof_context_account.get_lamports())?; proof_context_account.set_lamports(0)?; proof_context_account.set_data_length(0)?; diff --git a/programs/zk-token-proof/Cargo.toml b/programs/zk-token-proof/Cargo.toml index eb916c82340b8c..e9f5f6fb8d928d 100644 --- a/programs/zk-token-proof/Cargo.toml +++ b/programs/zk-token-proof/Cargo.toml @@ -14,9 +14,9 @@ bytemuck = { workspace = true } num-derive = { workspace = true } num-traits = { workspace = true } solana-instruction = { workspace = true } -solana-log-collector = { workspace = true } solana-program-runtime = { workspace = true } solana-sdk-ids = { workspace = true } +solana-svm-log-collector = { workspace = true } solana-zk-token-sdk = { workspace = true } [dev-dependencies] diff --git a/programs/zk-token-proof/benches/verify_proofs.rs b/programs/zk-token-proof/benches/verify_proofs.rs index 36a88475f4d5f3..a059a68ba1c98a 100644 --- a/programs/zk-token-proof/benches/verify_proofs.rs +++ b/programs/zk-token-proof/benches/verify_proofs.rs @@ -1,4 +1,7 @@ #![allow(clippy::arithmetic_side_effects)] +// Allow deprecated warnings since this crate will be removed along with +// `solana-zk-token-sdk` will be removed +#![allow(deprecated)] use { criterion::{criterion_group, criterion_main, Criterion}, curve25519_dalek::scalar::Scalar, diff --git a/programs/zk-token-proof/src/lib.rs b/programs/zk-token-proof/src/lib.rs index dbbe3d72bc7886..b5969e9314d948 100644 --- a/programs/zk-token-proof/src/lib.rs +++ b/programs/zk-token-proof/src/lib.rs @@ -1,11 +1,14 @@ #![forbid(unsafe_code)] +// Allow deprecated warnings since this crate will be removed along with +// `solana-zk-token-sdk` will be removed +#![allow(deprecated)] use { bytemuck::Pod, solana_instruction::{error::InstructionError, TRANSACTION_LEVEL_STACK_HEIGHT}, - solana_log_collector::ic_msg, solana_program_runtime::{declare_process_instruction, invoke_context::InvokeContext}, solana_sdk_ids::system_program, + solana_svm_log_collector::ic_msg, solana_zk_token_sdk::{ zk_token_proof_instruction::*, zk_token_proof_program::id, @@ -55,8 +58,8 @@ where return Err(InstructionError::InvalidInstructionData); } - let proof_data_account = instruction_context - .try_borrow_instruction_account(transaction_context, accessed_accounts)?; + let proof_data_account = + instruction_context.try_borrow_instruction_account(accessed_accounts)?; accessed_accounts = accessed_accounts.checked_add(1).unwrap(); let proof_data_offset = u32::from_le_bytes( @@ -103,14 +106,9 @@ where // create context state if additional accounts are provided with the instruction if instruction_context.get_number_of_instruction_accounts() > accessed_accounts { let context_state_authority = *instruction_context - .try_borrow_instruction_account( - transaction_context, - accessed_accounts.checked_add(1).unwrap(), - )? - .get_key(); - - let mut proof_context_account = instruction_context - .try_borrow_instruction_account(transaction_context, accessed_accounts)?; + .get_key_of_instruction_account(accessed_accounts.checked_add(1).unwrap())?; + let mut proof_context_account = + instruction_context.try_borrow_instruction_account(accessed_accounts)?; if *proof_context_account.get_owner() != id() { return Err(InstructionError::InvalidAccountOwner); @@ -141,27 +139,20 @@ fn process_close_proof_context(invoke_context: &mut InvokeContext) -> Result<(), let instruction_context = transaction_context.get_current_instruction_context()?; let owner_pubkey = { - let owner_account = - instruction_context.try_borrow_instruction_account(transaction_context, 2)?; - - if !owner_account.is_signer() { + if !instruction_context.is_instruction_account_signer(2)? { return Err(InstructionError::MissingRequiredSignature); } - *owner_account.get_key() - }; // done with `owner_account`, so drop it to prevent a potential double borrow - - let proof_context_account_pubkey = *instruction_context - .try_borrow_instruction_account(transaction_context, 0)? - .get_key(); - let destination_account_pubkey = *instruction_context - .try_borrow_instruction_account(transaction_context, 1)? - .get_key(); + + *instruction_context.get_program_key()? + }; + + let proof_context_account_pubkey = *instruction_context.get_key_of_instruction_account(0)?; + let destination_account_pubkey = *instruction_context.get_key_of_instruction_account(1)?; if proof_context_account_pubkey == destination_account_pubkey { return Err(InstructionError::InvalidInstructionData); } - let mut proof_context_account = - instruction_context.try_borrow_instruction_account(transaction_context, 0)?; + let mut proof_context_account = instruction_context.try_borrow_instruction_account(0)?; let proof_context_state_meta = ProofContextStateMeta::try_from_bytes(proof_context_account.get_data())?; let expected_owner_pubkey = proof_context_state_meta.context_state_authority; @@ -170,8 +161,7 @@ fn process_close_proof_context(invoke_context: &mut InvokeContext) -> Result<(), return Err(InstructionError::InvalidAccountOwner); } - let mut destination_account = - instruction_context.try_borrow_instruction_account(transaction_context, 1)?; + let mut destination_account = instruction_context.try_borrow_instruction_account(1)?; destination_account.checked_add_lamports(proof_context_account.get_lamports())?; proof_context_account.set_lamports(0)?; proof_context_account.set_data_length(0)?; diff --git a/quic-client/Cargo.toml b/quic-client/Cargo.toml index 92b537ebd1713a..1427c56a35d235 100644 --- a/quic-client/Cargo.toml +++ b/quic-client/Cargo.toml @@ -40,3 +40,4 @@ solana-net-utils = { workspace = true, features = ["dev-context-only-utils"] } solana-packet = { workspace = true } solana-perf = { workspace = true } solana-streamer = { workspace = true, features = ["dev-context-only-utils"] } +tokio-util = { workspace = true } diff --git a/quic-client/src/nonblocking/quic_client.rs b/quic-client/src/nonblocking/quic_client.rs index a4ffa8bc93f088..05697ca398899b 100644 --- a/quic-client/src/nonblocking/quic_client.rs +++ b/quic-client/src/nonblocking/quic_client.rs @@ -17,10 +17,7 @@ use { }, solana_keypair::Keypair, solana_measure::measure::Measure, - solana_net_utils::{ - sockets::{bind_in_range_with_config, SocketConfiguration as SocketConfig}, - VALIDATOR_PORT_RANGE, - }, + solana_net_utils::sockets, solana_quic_definitions::{ QUIC_CONNECTION_HANDSHAKE_TIMEOUT, QUIC_KEEP_ALIVE, QUIC_MAX_TIMEOUT, QUIC_SEND_FAIRNESS, }, @@ -32,7 +29,7 @@ use { }, solana_transaction_error::TransportResult, std::{ - net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket}, + net::{SocketAddr, UdpSocket}, sync::{atomic::Ordering, Arc}, thread, }, @@ -81,11 +78,12 @@ impl QuicLazyInitializedEndpoint { let mut endpoint = if let Some(endpoint) = &self.client_endpoint { endpoint.clone() } else { - let config = SocketConfig::default(); - let client_socket = bind_in_range_with_config( - IpAddr::V4(Ipv4Addr::UNSPECIFIED), - VALIDATOR_PORT_RANGE, - config, + // This will bind to random ports, but VALIDATOR_PORT_RANGE is outside + // of the range for CI tests when this is running in CI + let client_socket = sockets::bind_in_range_with_config( + std::net::IpAddr::V4(std::net::Ipv4Addr::UNSPECIFIED), + solana_net_utils::VALIDATOR_PORT_RANGE, + sockets::SocketConfiguration::default(), ) .expect("QuicLazyInitializedEndpoint::create_endpoint bind_in_range") .1; @@ -296,7 +294,8 @@ impl QuicClient { match conn { Ok(conn) => { info!( - "Made 0rtt connection to {} with id {} try_count {}, last_connection_id: {}, last_error: {:?}", + "Made 0rtt connection to {} with id {} try_count {}, \ + last_connection_id: {}, last_error: {:?}", self.addr, conn.stable_id(), connection_try_count, @@ -330,7 +329,8 @@ impl QuicClient { Ok(conn) => { *conn_guard = Some(conn.clone()); info!( - "Made connection to {} id {} try_count {}, from connection cache warming?: {}", + "Made connection to {} id {} try_count {}, from connection \ + cache warming?: {}", self.addr, conn.connection.stable_id(), connection_try_count, @@ -340,8 +340,13 @@ impl QuicClient { conn.connection.clone() } Err(err) => { - info!("Cannot make connection to {}, error {:}, from connection cache warming?: {}", - self.addr, err, data.is_empty()); + info!( + "Cannot make connection to {}, error {:}, from connection \ + cache warming?: {}", + self.addr, + err, + data.is_empty() + ); return Err(err); } } @@ -396,7 +401,8 @@ impl QuicClient { .prepare_connection_us .fetch_add(measure_prepare_connection.as_us(), Ordering::Relaxed); trace!( - "Succcessfully sent to {} with id {}, thread: {:?}, data len: {}, send_packet_us: {} prepare_connection_us: {}", + "Succcessfully sent to {} with id {}, thread: {:?}, data len: {}, \ + send_packet_us: {} prepare_connection_us: {}", self.addr, connection.stable_id(), thread::current().id(), diff --git a/quic-client/src/quic_client.rs b/quic-client/src/quic_client.rs index 4b1b10462dbcd5..ae7837a914702a 100644 --- a/quic-client/src/quic_client.rs +++ b/quic-client/src/quic_client.rs @@ -80,7 +80,7 @@ pub fn get_runtime() -> &'static Runtime { async fn send_data_async( connection: Arc, - buffer: Vec, + buffer: Arc>, ) -> TransportResult<()> { let result = timeout(SEND_DATA_TIMEOUT, connection.send_data(&buffer)).await; ASYNC_TASK_SEMAPHORE.release(); @@ -160,7 +160,7 @@ impl ClientConnection for QuicClientConnection { Ok(()) } - fn send_data_async(&self, data: Vec) -> TransportResult<()> { + fn send_data_async(&self, data: Arc>) -> TransportResult<()> { let _lock = ASYNC_TASK_SEMAPHORE.acquire(); let inner = self.inner.clone(); diff --git a/quic-client/tests/quic_client.rs b/quic-client/tests/quic_client.rs index 23234fd7631a6d..e086bc0ad58fe6 100644 --- a/quic-client/tests/quic_client.rs +++ b/quic-client/tests/quic_client.rs @@ -18,13 +18,11 @@ mod tests { solana_tls_utils::{new_dummy_x509_certificate, QuicClientCertificate}, std::{ net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket}, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, RwLock, - }, + sync::{Arc, RwLock}, time::{Duration, Instant}, }, tokio::time::sleep, + tokio_util::sync::CancellationToken, }; fn check_packets( @@ -52,11 +50,11 @@ mod tests { assert!(total_packets > 0); } - fn server_args() -> (UdpSocket, Arc, Keypair) { + fn server_args() -> (UdpSocket, CancellationToken, Keypair) { let port_range = localhost_port_range_for_tests(); ( bind_to(IpAddr::V4(Ipv4Addr::LOCALHOST), port_range.0).expect("should bind"), - Arc::new(AtomicBool::new(false)), + CancellationToken::new(), Keypair::new(), ) } @@ -70,20 +68,20 @@ mod tests { solana_logger::setup(); let (sender, receiver) = unbounded(); let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); - let (s, exit, keypair) = server_args(); + let (s, cancel, keypair) = server_args(); let SpawnServerResult { endpoints: _, thread: t, key_updater: _, - } = solana_streamer::quic::spawn_server( + } = solana_streamer::quic::spawn_server_with_cancel( "solQuicTest", "quic_streamer_test", - s.try_clone().unwrap(), + vec![s.try_clone().unwrap()], &keypair, sender, - exit.clone(), staked_nodes, QuicServerParams::default_for_tests(), + cancel.clone(), ) .unwrap(); @@ -105,7 +103,7 @@ mod tests { assert!(client.send_data_batch_async(packets).is_ok()); check_packets(receiver, num_bytes, num_expected_packets); - exit.store(true, Ordering::Relaxed); + cancel.cancel(); t.join().unwrap(); } @@ -150,20 +148,20 @@ mod tests { solana_logger::setup(); let (sender, receiver) = unbounded(); let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); - let (s, exit, keypair) = server_args(); + let (s, cancel, keypair) = server_args(); let solana_streamer::nonblocking::quic::SpawnNonBlockingServerResult { endpoints: _, stats: _, thread: t, max_concurrent_connections: _, - } = solana_streamer::nonblocking::quic::spawn_server( + } = solana_streamer::nonblocking::quic::spawn_server_with_cancel( "quic_streamer_test", - s.try_clone().unwrap(), + vec![s.try_clone().unwrap()], &keypair, sender, - exit.clone(), staked_nodes, QuicServerParams::default_for_tests(), + cancel.clone(), ) .unwrap(); @@ -186,7 +184,7 @@ mod tests { } nonblocking_check_packets(receiver, num_bytes, num_expected_packets).await; - exit.store(true, Ordering::Relaxed); + cancel.cancel(); t.await.unwrap(); } @@ -208,26 +206,26 @@ mod tests { // Request Receiver let (sender, receiver) = unbounded(); let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); - let (request_recv_socket, request_recv_exit, keypair) = server_args(); + let (request_recv_socket, request_recv_cancel, keypair) = server_args(); let SpawnServerResult { endpoints: request_recv_endpoints, thread: request_recv_thread, key_updater: _, - } = solana_streamer::quic::spawn_server( + } = solana_streamer::quic::spawn_server_with_cancel( "solQuicTest", "quic_streamer_test", - request_recv_socket.try_clone().unwrap(), + [request_recv_socket.try_clone().unwrap()], &keypair, sender, - request_recv_exit.clone(), staked_nodes.clone(), QuicServerParams::default_for_tests(), + request_recv_cancel.clone(), ) .unwrap(); drop(request_recv_endpoints); // Response Receiver: - let (response_recv_socket, response_recv_exit, keypair2) = server_args(); + let (response_recv_socket, response_recv_cancel, keypair2) = server_args(); let (sender2, receiver2) = unbounded(); let addr = response_recv_socket.local_addr().unwrap().ip(); @@ -237,15 +235,15 @@ mod tests { endpoints: mut response_recv_endpoints, thread: response_recv_thread, key_updater: _, - } = solana_streamer::quic::spawn_server( + } = solana_streamer::quic::spawn_server_with_cancel( "solQuicTest", "quic_streamer_test", - response_recv_socket, + [response_recv_socket], &keypair2, sender2, - response_recv_exit.clone(), staked_nodes, QuicServerParams::default_for_tests(), + response_recv_cancel.clone(), ) .unwrap(); @@ -304,11 +302,11 @@ mod tests { drop(request_sender); drop(response_sender); - request_recv_exit.store(true, Ordering::Relaxed); + request_recv_cancel.cancel(); request_recv_thread.join().unwrap(); info!("Request receiver exited!"); - response_recv_exit.store(true, Ordering::Relaxed); + response_recv_cancel.cancel(); response_recv_thread.join().unwrap(); info!("Response receiver exited!"); } @@ -318,20 +316,20 @@ mod tests { solana_logger::setup(); let (sender, receiver) = unbounded(); let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); - let (s, exit, keypair) = server_args(); + let (s, cancel, keypair) = server_args(); let solana_streamer::nonblocking::quic::SpawnNonBlockingServerResult { endpoints: _, stats: _, thread: t, max_concurrent_connections: _, - } = solana_streamer::nonblocking::quic::spawn_server( + } = solana_streamer::nonblocking::quic::spawn_server_with_cancel( "quic_streamer_test", - s.try_clone().unwrap(), + vec![s.try_clone().unwrap()], &keypair, sender, - exit.clone(), staked_nodes, QuicServerParams::default_for_tests(), + cancel.clone(), ) .unwrap(); @@ -353,7 +351,7 @@ mod tests { } nonblocking_check_packets(receiver, num_bytes, num_expected_packets).await; - exit.store(true, Ordering::Relaxed); + cancel.cancel(); t.await.unwrap(); // We close the connection after the server is down, this should not block diff --git a/remote-wallet/Cargo.toml b/remote-wallet/Cargo.toml index 01698749762f57..f77f01f659736b 100644 --- a/remote-wallet/Cargo.toml +++ b/remote-wallet/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "solana-remote-wallet" -description = "Blockchain, Rebuilt for Scale" documentation = "https://docs.rs/solana-remote-wallet" version = { workspace = true } authors = { workspace = true } +description = { workspace = true } repository = { workspace = true } homepage = { workspace = true } license = { workspace = true } diff --git a/remote-wallet/src/ledger.rs b/remote-wallet/src/ledger.rs index e6c38cfdf69c45..e6cf3b1bc0e912 100644 --- a/remote-wallet/src/ledger.rs +++ b/remote-wallet/src/ledger.rs @@ -267,7 +267,7 @@ impl LedgerWallet { } let status = ((message[message.len() - 2] as usize) << 8) | (message[message.len() - 1] as usize); - trace!("Read status {:x}", status); + trace!("Read status {status:x}"); Self::parse_status(status)?; let new_len = message.len() - 2; message.truncate(new_len); diff --git a/remote-wallet/src/remote_wallet.rs b/remote-wallet/src/remote_wallet.rs index 3ca8c52406dafb..489e5fa3782e86 100644 --- a/remote-wallet/src/remote_wallet.rs +++ b/remote-wallet/src/remote_wallet.rs @@ -127,7 +127,7 @@ impl RemoteWalletManager { Ok(info) => { ledger.pretty_path = info.get_pretty_path(); let path = device_info.path().to_str().unwrap().to_string(); - trace!("Found device: {:?}", info); + trace!("Found device: {info:?}"); detected_devices.push(Device { path, info, @@ -135,12 +135,12 @@ impl RemoteWalletManager { }) } Err(err) => { - error!("Error connecting to ledger device to read info: {}", err); + error!("Error connecting to ledger device to read info: {err}"); errors.push(err) } } } - Err(err) => error!("Error connecting to ledger device to read info: {}", err), + Err(err) => error!("Error connecting to ledger device to read info: {err}"), } } @@ -198,7 +198,7 @@ impl RemoteWalletManager { while start_time.elapsed() <= *max_polling_duration { if let Ok(num_devices) = self.update_devices() { let plural = if num_devices == 1 { "" } else { "s" }; - trace!("{} Remote Wallet{} found", num_devices, plural); + trace!("{num_devices} Remote Wallet{plural} found"); return true; } } diff --git a/rpc-client-api/src/client_error.rs b/rpc-client-api/src/client_error.rs index 00c228471ea184..e69a06a5db050a 100644 --- a/rpc-client-api/src/client_error.rs +++ b/rpc-client-api/src/client_error.rs @@ -1,11 +1,13 @@ -pub use reqwest; use { crate::{request, response}, - solana_signer::SignerError, - solana_transaction_error::{TransactionError, TransportError}, + solana_transaction_error::TransportError, std::io, thiserror::Error as ThisError, }; +pub use { + anyhow::Error as AnyhowError, reqwest, serde_json::error::Error as SerdeJsonError, + solana_signer::SignerError, solana_transaction_error::TransactionError, +}; #[derive(ThisError, Debug)] #[allow(clippy::large_enum_variant)] @@ -15,11 +17,11 @@ pub enum ErrorKind { #[error(transparent)] Reqwest(#[from] reqwest::Error), #[error("Middleware: {0}")] - Middleware(anyhow::Error), + Middleware(AnyhowError), #[error(transparent)] RpcError(#[from] request::RpcError), #[error(transparent)] - SerdeJson(#[from] serde_json::error::Error), + SerdeJson(#[from] SerdeJsonError), #[error(transparent)] SigningError(#[from] SignerError), #[error(transparent)] diff --git a/rpc-client-api/src/custom_error.rs b/rpc-client-api/src/custom_error.rs index cd1449ae6c8664..de1b9c2fb1efa1 100644 --- a/rpc-client-api/src/custom_error.rs +++ b/rpc-client-api/src/custom_error.rs @@ -29,6 +29,7 @@ pub const JSON_RPC_SERVER_ERROR_SLOT_NOT_EPOCH_BOUNDARY: i64 = -32018; pub const JSON_RPC_SERVER_ERROR_LONG_TERM_STORAGE_UNREACHABLE: i64 = -32019; #[derive(Error, Debug)] +#[allow(clippy::large_enum_variant)] pub enum RpcCustomError { #[error("BlockCleanedUp")] BlockCleanedUp { diff --git a/rpc-client-types/Cargo.toml b/rpc-client-types/Cargo.toml index 8fb008655cd346..6121e822d56a87 100644 --- a/rpc-client-types/Cargo.toml +++ b/rpc-client-types/Cargo.toml @@ -21,11 +21,11 @@ serde_derive = { workspace = true } serde_json = { workspace = true } solana-account = { workspace = true } solana-account-decoder-client-types = { workspace = true } +solana-address = { workspace = true } solana-clock = { workspace = true } solana-commitment-config = { workspace = true, features = ["serde"] } solana-fee-calculator = { workspace = true, features = ["serde"] } solana-inflation = { workspace = true } -solana-pubkey = { workspace = true } solana-transaction-error = { workspace = true } solana-transaction-status-client-types = { workspace = true } solana-version = { workspace = true } diff --git a/rpc-client-types/src/config.rs b/rpc-client-types/src/config.rs index 020cd41de01567..f5ef8204f4a811 100644 --- a/rpc-client-types/src/config.rs +++ b/rpc-client-types/src/config.rs @@ -1,7 +1,9 @@ use { crate::filter::RpcFilterType, - solana_account_decoder_client_types::{UiAccountEncoding, UiDataSliceConfig}, solana_clock::{Epoch, Slot}, +}; +pub use { + solana_account_decoder_client_types::{UiAccountEncoding, UiDataSliceConfig}, solana_commitment_config::{CommitmentConfig, CommitmentLevel}, solana_transaction_status_client_types::{TransactionDetails, UiTransactionEncoding}, }; diff --git a/rpc-client-types/src/filter.rs b/rpc-client-types/src/filter.rs index a3dbdfb72cdf1d..7c3d616cefefb8 100644 --- a/rpc-client-types/src/filter.rs +++ b/rpc-client-types/src/filter.rs @@ -1,8 +1,6 @@ use { base64::{prelude::BASE64_STANDARD, Engine}, serde::Deserialize, - solana_account::{AccountSharedData, ReadableAccount}, - spl_generic_token::{token::GenericTokenAccount, token_2022::Account}, std::borrow::Cow, thiserror::Error, }; @@ -59,18 +57,6 @@ impl RpcFilterType { RpcFilterType::TokenAccountState => Ok(()), } } - - #[deprecated( - since = "2.0.0", - note = "Use solana_rpc::filter::filter_allows instead" - )] - pub fn allows(&self, account: &AccountSharedData) -> bool { - match self { - RpcFilterType::DataSize(size) => account.data().len() as u64 == *size, - RpcFilterType::Memcmp(compare) => compare.bytes_match(account.data()), - RpcFilterType::TokenAccountState => Account::valid_account_data(account.data()), - } - } } #[derive(Error, PartialEq, Eq, Debug)] diff --git a/rpc-client-types/src/request.rs b/rpc-client-types/src/request.rs index e66797bff62bc1..14b1287e77ccb7 100644 --- a/rpc-client-types/src/request.rs +++ b/rpc-client-types/src/request.rs @@ -1,8 +1,8 @@ +pub use solana_address::Address; use { crate::response::RpcSimulateTransactionResult, serde_json::{json, Value}, solana_clock::Slot, - solana_pubkey::Pubkey, std::fmt, thiserror::Error, }; @@ -169,6 +169,7 @@ impl RpcRequest { } #[derive(Debug)] +#[allow(clippy::large_enum_variant)] pub enum RpcResponseErrorData { Empty, SendTransactionPreflightFailure(RpcSimulateTransactionResult), @@ -219,8 +220,8 @@ pub enum RpcError { #[derive(Serialize, Deserialize)] pub enum TokenAccountsFilter { - Mint(Pubkey), - ProgramId(Pubkey), + Mint(Address), + ProgramId(Address), } #[cfg(test)] diff --git a/rpc-client-types/src/response.rs b/rpc-client-types/src/response.rs index ff96616e127b58..1541e266cde276 100644 --- a/rpc-client-types/src/response.rs +++ b/rpc-client-types/src/response.rs @@ -1,17 +1,20 @@ use { serde::{Deserialize, Deserializer, Serialize, Serializer}, - solana_account_decoder_client_types::{token::UiTokenAmount, UiAccount}, solana_clock::{Epoch, Slot, UnixTimestamp}, - solana_fee_calculator::{FeeCalculator, FeeRateGovernor}, solana_inflation::Inflation, - solana_transaction_error::TransactionResult as Result, - solana_transaction_status_client_types::{ - ConfirmedTransactionStatusWithSignature, TransactionConfirmationStatus, UiConfirmedBlock, - UiInnerInstructions, UiTransactionError, UiTransactionReturnData, - }, + solana_transaction_status_client_types::ConfirmedTransactionStatusWithSignature, std::{collections::HashMap, fmt, net::SocketAddr, str::FromStr}, thiserror::Error, }; +pub use { + solana_account_decoder_client_types::{token::UiTokenAmount, UiAccount}, + solana_fee_calculator::{FeeCalculator, FeeRateGovernor}, + solana_transaction_error::TransactionResult, + solana_transaction_status_client_types::{ + TransactionConfirmationStatus, UiConfirmedBlock, UiInnerInstructions, UiLoadedAddresses, + UiTransactionError, UiTransactionReturnData, UiTransactionTokenBalance, + }, +}; /// Wrapper for rpc return types of methods that provide responses both with and without context. /// Main purpose of this is to fix methods that lack context information in their return type, @@ -391,10 +394,10 @@ pub struct RpcVoteAccountInfo { #[serde(rename_all = "camelCase")] pub struct RpcSignatureConfirmation { pub confirmations: usize, - pub status: Result<()>, + pub status: TransactionResult<()>, } -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] #[serde(rename_all = "camelCase")] pub struct RpcSimulateTransactionResult { pub err: Option, @@ -405,6 +408,12 @@ pub struct RpcSimulateTransactionResult { pub return_data: Option, pub inner_instructions: Option>, pub replacement_blockhash: Option, + pub fee: Option, + pub pre_balances: Option>, + pub post_balances: Option>, + pub pre_token_balances: Option>, + pub post_token_balances: Option>, + pub loaded_addresses: Option, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] diff --git a/rpc-client/Cargo.toml b/rpc-client/Cargo.toml index 9db6b57706e833..7cb61012149c07 100644 --- a/rpc-client/Cargo.toml +++ b/rpc-client/Cargo.toml @@ -64,3 +64,4 @@ solana-pubkey = { workspace = true, features = ["rand"] } solana-signer = { workspace = true } solana-system-transaction = { workspace = true } static_assertions = { workspace = true } +test-case = { workspace = true } diff --git a/rpc-client/src/lib.rs b/rpc-client/src/lib.rs index fddf3e0aab2d8c..c178ee55ea7d1d 100644 --- a/rpc-client/src/lib.rs +++ b/rpc-client/src/lib.rs @@ -6,6 +6,7 @@ pub mod nonblocking; pub mod rpc_client; pub mod rpc_sender; pub mod spinner; +pub use solana_rpc_client_api as api; pub mod mock_sender_for_cli { /// Magic `SIGNATURE` value used by `solana-cli` unit tests. diff --git a/rpc-client/src/mock_sender.rs b/rpc-client/src/mock_sender.rs index d433543780933b..76a9e3ed9e67b5 100644 --- a/rpc-client/src/mock_sender.rs +++ b/rpc-client/src/mock_sender.rs @@ -355,8 +355,14 @@ impl RpcSender for MockSender { loaded_accounts_data_size: None, return_data: None, inner_instructions: None, - replacement_blockhash: None - }, + replacement_blockhash: None, + fee: None, + pre_balances: None, + post_balances: None, + pre_token_balances: None, + post_token_balances: None, + loaded_addresses: None, + } })?, "getMinimumBalanceForRentExemption" => json![20], "getVersion" => { diff --git a/rpc-client/src/nonblocking/rpc_client.rs b/rpc-client/src/nonblocking/rpc_client.rs index e4e1396abcfb19..293228b07b7183 100644 --- a/rpc-client/src/nonblocking/rpc_client.rs +++ b/rpc-client/src/nonblocking/rpc_client.rs @@ -4671,7 +4671,8 @@ impl RpcClient { &self, message: &impl SerializableMessage, ) -> ClientResult { - let serialized_encoded = serialize_and_encode(message, UiTransactionEncoding::Base64)?; + let serialized = message.serialize(); + let serialized_encoded = BASE64_STANDARD.encode(serialized); let result = self .send::>>( RpcRequest::GetFeeForMessage, diff --git a/rpc-client/src/rpc_client.rs b/rpc-client/src/rpc_client.rs index bf42e2fb06ad6c..745c68c16f8bee 100644 --- a/rpc-client/src/rpc_client.rs +++ b/rpc-client/src/rpc_client.rs @@ -62,9 +62,19 @@ impl RpcClientConfig { /// Trait used to add support for versioned messages to RPC APIs while /// retaining backwards compatibility -pub trait SerializableMessage: Serialize {} -impl SerializableMessage for LegacyMessage {} -impl SerializableMessage for v0::Message {} +pub trait SerializableMessage { + fn serialize(&self) -> Vec; +} +impl SerializableMessage for LegacyMessage { + fn serialize(&self) -> Vec { + self.serialize() + } +} +impl SerializableMessage for v0::Message { + fn serialize(&self) -> Vec { + self.serialize() + } +} /// Trait used to add support for versioned transactions to RPC APIs while /// retaining backwards compatibility @@ -3797,19 +3807,23 @@ mod tests { super::*, crate::mock_sender::PUBKEY, assert_matches::assert_matches, + base64::{prelude::BASE64_STANDARD, Engine}, crossbeam_channel::unbounded, jsonrpc_core::{futures::prelude::*, Error, IoHandler, Params}, jsonrpc_http_server::{AccessControlAllowOrigin, DomainsValidation, ServerBuilder}, serde_json::{json, Number}, solana_account_decoder::encode_ui_account, solana_account_decoder_client_types::UiAccountEncoding, + solana_hash::Hash, solana_instruction::error::InstructionError, solana_keypair::Keypair, + solana_message::{compiled_instruction::CompiledInstruction, MessageHeader}, solana_rpc_client_api::client_error::ErrorKind, solana_signer::Signer, solana_system_transaction as system_transaction, solana_transaction_error::TransactionError, std::{io, thread}, + test_case::test_case, }; #[test] @@ -4254,4 +4268,85 @@ mod tests { assert_eq!(expected_result, result1); } } + + #[test_case(LegacyMessage { + header: MessageHeader { + num_required_signatures: 1, + num_readonly_signed_accounts: 0, + num_readonly_unsigned_accounts: 1, + }, + account_keys: vec![Pubkey::new_unique()], + recent_blockhash: Hash::new_unique(), + instructions: vec![CompiledInstruction { + program_id_index: 1, + accounts: vec![0], + data: vec![], + }], + }; "legacy message")] + #[test_case(v0::Message { + header: MessageHeader { + num_required_signatures: 1, + num_readonly_signed_accounts: 0, + num_readonly_unsigned_accounts: 0, + }, + account_keys: vec![Pubkey::new_unique()], + recent_blockhash: Hash::new_unique(), + instructions: vec![CompiledInstruction { + program_id_index: 0, + accounts: vec![], + data: vec![], + }], + address_table_lookups: vec![], + }; "v0 message")] + fn test_get_fee_for_message_sends_properly_serialized_v0_transaction(message: M) + where + M: SerializableMessage, + { + let serialized_message = message.serialize(); + let serialized_message_base64 = BASE64_STANDARD.encode(serialized_message); + + let (sender, receiver) = unbounded(); + thread::spawn(move || { + let rpc_addr = "0.0.0.0:0".parse().unwrap(); + let mut io = IoHandler::default(); + // Successful request + io.add_method("getFeeForMessage", move |params: Params| match params { + Params::Array(p) => { + let first_element = p.first().unwrap(); + if let Value::String(actual_serialized_message) = first_element { + assert_eq!(actual_serialized_message, &serialized_message_base64); + return future::ok(json!(Response { + context: RpcResponseContext { + api_version: None, + slot: 1, + }, + value: json!(42), + })); + } + future::err(Error::invalid_params( + "Expected the serialized message to be the first element of the params", + )) + } + _ => { + panic!("Expected an array of params to be forwarded to `getFeeForMessage"); + } + }); + + let server = ServerBuilder::new(io) + .threads(1) + .cors(DomainsValidation::AllowOnly(vec![ + AccessControlAllowOrigin::Any, + ])) + .start_http(&rpc_addr) + .expect("Unable to start RPC server"); + sender.send(*server.address()).unwrap(); + server.wait(); + }); + + let rpc_addr = receiver.recv().unwrap(); + let rpc_client = RpcClient::new_socket(rpc_addr); + + let fee: u64 = rpc_client.get_fee_for_message(&message).unwrap(); + assert_eq!(fee, 42); + } } diff --git a/rpc-test/Cargo.toml b/rpc-test/Cargo.toml index 7d7ebb8e6311d8..c17305fa960d41 100644 --- a/rpc-test/Cargo.toml +++ b/rpc-test/Cargo.toml @@ -36,6 +36,7 @@ solana-transaction-status = { workspace = true } tokio = { workspace = true, features = ["full"] } [dev-dependencies] +solana-client = { workspace = true, features = ["dev-context-only-utils"] } solana-clock = { workspace = true } solana-commitment-config = { workspace = true } solana-connection-cache = { workspace = true } diff --git a/rpc-test/tests/rpc.rs b/rpc-test/tests/rpc.rs index 9cab007a90680b..e96123f2a5498c 100644 --- a/rpc-test/tests/rpc.rs +++ b/rpc-test/tests/rpc.rs @@ -10,7 +10,7 @@ use { solana_commitment_config::CommitmentConfig, solana_hash::Hash, solana_keypair::Keypair, - solana_net_utils::bind_to_unspecified, + solana_net_utils::sockets::bind_to_localhost_unique, solana_pubkey::Pubkey, solana_pubsub_client::nonblocking::pubsub_client::PubsubClient, solana_rent::Rent, @@ -290,7 +290,7 @@ fn test_rpc_subscriptions() { let test_validator = TestValidator::with_no_fees_udp(alice.pubkey(), None, SocketAddrSpace::Unspecified); - let transactions_socket = bind_to_unspecified().unwrap(); + let transactions_socket = bind_to_localhost_unique().unwrap(); transactions_socket.connect(test_validator.tpu()).unwrap(); let rpc_client = RpcClient::new(test_validator.rpc_url()); @@ -503,7 +503,10 @@ fn run_tpu_send_transaction(tpu_use_quic: bool) { CommitmentConfig::processed(), )); let connection_cache = if tpu_use_quic { - ConnectionCache::new_quic("connection_cache_test", DEFAULT_TPU_CONNECTION_POOL_SIZE) + ConnectionCache::new_quic_for_tests( + "connection_cache_test", + DEFAULT_TPU_CONNECTION_POOL_SIZE, + ) } else { ConnectionCache::with_udp("connection_cache_test", DEFAULT_TPU_CONNECTION_POOL_SIZE) }; diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 67c89bbc70cc88..72ed58bedd0ebc 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -46,6 +46,7 @@ soketto = { workspace = true } solana-account = { workspace = true } solana-account-decoder = { workspace = true } solana-accounts-db = { workspace = true } +solana-cli-output = { workspace = true } solana-client = { workspace = true } solana-clock = { workspace = true } solana-commitment-config = { workspace = true } @@ -106,11 +107,11 @@ tokio-util = { workspace = true, features = ["codec", "compat"] } agave-reserved-account-keys = { workspace = true } serial_test = { workspace = true } solana-address-lookup-table-interface = { workspace = true } +solana-cluster-type = { workspace = true } solana-compute-budget-interface = { workspace = true } solana-fee-calculator = { workspace = true } solana-fee-structure = { workspace = true } solana-instruction = { workspace = true } -solana-log-collector = { workspace = true } solana-net-utils = { workspace = true } solana-nonce = { workspace = true } solana-nonce-account = { workspace = true } @@ -127,6 +128,7 @@ solana-send-transaction-service = { workspace = true, features = ["dev-context-o solana-sha256-hasher = { workspace = true } solana-stake-interface = { workspace = true } solana-stake-program = { workspace = true } +solana-svm-log-collector = { workspace = true } solana-vote-interface = { workspace = true } spl-pod = { workspace = true } symlink = { workspace = true } diff --git a/rpc/src/optimistically_confirmed_bank_tracker.rs b/rpc/src/optimistically_confirmed_bank_tracker.rs index 7b14954c7d81f7..99466b531aa911 100644 --- a/rpc/src/optimistically_confirmed_bank_tracker.rs +++ b/rpc/src/optimistically_confirmed_bank_tracker.rs @@ -72,13 +72,13 @@ impl std::fmt::Debug for BankNotification { } } -pub type BankNotificationWithEventSequence = ( +pub type BankNotificationWithDependencyWork = ( BankNotification, - Option, // dependecy work sequence number + Option, // dependecy work id ); -pub type BankNotificationReceiver = Receiver; -pub type BankNotificationSender = Sender; +pub type BankNotificationReceiver = Receiver; +pub type BankNotificationSender = Sender; #[derive(Clone)] pub struct BankNotificationSenderConfig { @@ -138,7 +138,7 @@ impl OptimisticallyConfirmedBankTracker { #[allow(clippy::too_many_arguments)] fn recv_notification( - receiver: &Receiver, + receiver: &Receiver, bank_forks: &RwLock, optimistically_confirmed_bank: &RwLock, subscriptions: &RpcSubscriptions, @@ -270,7 +270,7 @@ impl OptimisticallyConfirmedBankTracker { #[allow(clippy::too_many_arguments)] pub fn process_notification( - (notification, dependency_work): BankNotificationWithEventSequence, + (notification, dependency_work): BankNotificationWithDependencyWork, bank_forks: &RwLock, optimistically_confirmed_bank: &RwLock, subscriptions: &RpcSubscriptions, @@ -469,7 +469,7 @@ mod tests { OptimisticallyConfirmedBankTracker::process_notification( ( BankNotification::OptimisticallyConfirmed(2), - None, /* no work sequence */ + None, /* no dependency work */ ), &bank_forks, &optimistically_confirmed_bank, @@ -489,7 +489,7 @@ mod tests { OptimisticallyConfirmedBankTracker::process_notification( ( BankNotification::OptimisticallyConfirmed(1), - None, /* no work sequence */ + None, /* no dependency work */ ), &bank_forks, &optimistically_confirmed_bank, @@ -509,7 +509,7 @@ mod tests { OptimisticallyConfirmedBankTracker::process_notification( ( BankNotification::OptimisticallyConfirmed(3), - None, /* no work sequence */ + None, /* no dependency work */ ), &bank_forks, &optimistically_confirmed_bank, @@ -534,7 +534,7 @@ mod tests { OptimisticallyConfirmedBankTracker::process_notification( ( BankNotification::Frozen(bank3), - None, /* no work sequence */ + None, /* no dependency work */ ), &bank_forks, &optimistically_confirmed_bank, @@ -558,7 +558,7 @@ mod tests { OptimisticallyConfirmedBankTracker::process_notification( ( BankNotification::OptimisticallyConfirmed(4), - None, /* no work sequence */ + None, /* no dependency work */ ), &bank_forks, &optimistically_confirmed_bank, @@ -591,7 +591,7 @@ mod tests { OptimisticallyConfirmedBankTracker::process_notification( ( BankNotification::NewRootBank(bank5), - None, /* no work sequence */ + None, /* no dependency work */ ), &bank_forks, &optimistically_confirmed_bank, @@ -614,7 +614,7 @@ mod tests { OptimisticallyConfirmedBankTracker::process_notification( ( BankNotification::NewRootedChain(parent_roots), - None, /* no work sequence */ + None, /* no dependency work */ ), &bank_forks, &optimistically_confirmed_bank, @@ -645,7 +645,7 @@ mod tests { OptimisticallyConfirmedBankTracker::process_notification( ( BankNotification::OptimisticallyConfirmed(6), - None, /* no work sequence */ + None, /* no dependency work */ ), &bank_forks, &optimistically_confirmed_bank, @@ -670,7 +670,7 @@ mod tests { OptimisticallyConfirmedBankTracker::process_notification( ( BankNotification::NewRootBank(bank7), - None, /* no work sequence */ + None, /* no dependency work */ ), &bank_forks, &optimistically_confirmed_bank, @@ -692,7 +692,7 @@ mod tests { OptimisticallyConfirmedBankTracker::process_notification( ( BankNotification::NewRootedChain(parent_roots), - None, /* no work sequence */ + None, /* no dependency work */ ), &bank_forks, &optimistically_confirmed_bank, @@ -718,8 +718,8 @@ mod tests { let exit = Arc::new(AtomicBool::new(false)); let dependency_tracker: Arc = Arc::new(dependency_tracker::DependencyTracker::default()); - let work_sequence_1 = 345; - let work_sequence_2 = 678; + let work_id_1 = 345; + let work_id_2 = 678; let tracker_clone = dependency_tracker.clone(); let handle = thread::spawn(move || { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(100); @@ -756,7 +756,7 @@ mod tests { OptimisticallyConfirmedBankTracker::process_notification( ( BankNotification::OptimisticallyConfirmed(1), - Some(work_sequence_1), /* dependency work sequence */ + Some(work_id_1), /* dependency work id */ ), &bank_forks, &optimistically_confirmed_bank, @@ -781,7 +781,7 @@ mod tests { OptimisticallyConfirmedBankTracker::process_notification( ( BankNotification::Frozen(bank1), - Some(work_sequence_2), /* dependency work sequence */ + Some(work_id_2), /* dependency work id */ ), &bank_forks, &optimistically_confirmed_bank, @@ -800,8 +800,8 @@ mod tests { assert_eq!(pending_optimistically_confirmed_banks.len(), 0); }); - dependency_tracker.mark_this_and_all_previous_work_processed(work_sequence_1); - dependency_tracker.mark_this_and_all_previous_work_processed(work_sequence_2); + dependency_tracker.mark_this_and_all_previous_work_processed(work_id_1); + dependency_tracker.mark_this_and_all_previous_work_processed(work_id_2); handle.join().unwrap(); } diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index a9f13d366def4b..f884657ee3b76a 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -82,7 +82,7 @@ use { sanitized::{MessageHash, SanitizedTransaction, MAX_TX_ACCOUNT_LOCKS}, versioned::VersionedTransaction, }, - solana_transaction_context::TransactionAccount, + solana_transaction_context::transaction_accounts::TransactionAccount, solana_transaction_error::TransactionError, solana_transaction_status::{ map_inner_instructions, BlockEncodingOptions, ConfirmedBlock, @@ -159,7 +159,7 @@ fn is_finalized( && (blockstore.is_root(slot) || bank.status_cache_ancestors().contains(&slot)) } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct JsonRpcConfig { pub enable_rpc_transaction_history: bool, pub enable_extended_tx_metadata_storage: bool, @@ -211,7 +211,7 @@ impl JsonRpcConfig { } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct RpcBigtableConfig { pub enable_bigtable_ledger_upload: bool, pub bigtable_instance_name: String, @@ -494,7 +494,6 @@ impl JsonRpcRequestProcessor { ); let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); - let startup_verification_complete = Arc::clone(bank.get_startup_verification_complete()); let slot = bank.slot(); let optimistically_confirmed_bank = Arc::new(RwLock::new(OptimisticallyConfirmedBank { bank })); @@ -514,7 +513,6 @@ impl JsonRpcRequestProcessor { blockstore, 0, exit, - startup_verification_complete, )), cluster_info, genesis_hash, @@ -3454,7 +3452,7 @@ pub mod rpc_full { use { super::*, solana_message::{SanitizedVersionedMessage, VersionedMessage}, - solana_transaction_status::parse_ui_inner_instructions, + solana_transaction_status::{parse_ui_inner_instructions, UiLoadedAddresses}, }; #[rpc] pub trait Full { @@ -3891,6 +3889,11 @@ pub mod rpc_full { loaded_accounts_data_size, return_data, inner_instructions: _, // Always `None` due to `enable_cpi_recording = false` + fee, + pre_balances: _, + post_balances: _, + pre_token_balances: _, + post_token_balances: _, } = preflight_bank.simulate_transaction(&transaction, false) { match err { @@ -3912,6 +3915,12 @@ pub mod rpc_full { return_data: return_data.map(|return_data| return_data.into()), inner_instructions: None, replacement_blockhash: None, + fee, + pre_balances: None, + post_balances: None, + pre_token_balances: None, + post_token_balances: None, + loaded_addresses: None, }, } .into()); @@ -3993,6 +4002,11 @@ pub mod rpc_full { loaded_accounts_data_size, return_data, inner_instructions, + fee, + pre_balances, + post_balances, + pre_token_balances, + post_token_balances, } = bank.simulate_transaction(&transaction, enable_cpi_recording); let account_keys = transaction.message().account_keys(); @@ -4061,6 +4075,16 @@ pub mod rpc_full { return_data: return_data.map(|return_data| return_data.into()), inner_instructions, replacement_blockhash: blockhash, + fee, + pre_balances, + post_balances, + pre_token_balances: pre_token_balances.map(|balances| { + balances.into_iter().map(|balance| solana_ledger::transaction_balances::svm_token_info_to_token_balance(balance).into()).collect() + }), + post_token_balances: post_token_balances.map(|balances| { + balances.into_iter().map(|balance| solana_ledger::transaction_balances::svm_token_info_to_token_balance(balance).into()).collect() + }), + loaded_addresses: Some(UiLoadedAddresses::from(&transaction.get_loaded_addresses())), }, )) } @@ -4503,7 +4527,6 @@ pub mod tests { genesis_utils::{create_genesis_config, GenesisConfigInfo}, get_tmp_ledger_path, }, - solana_log_collector::ic_logger_msg, solana_message::{ v0::{self, MessageAddressTableLookup}, Message, MessageHeader, SimpleAddressLoader, VersionedMessage, @@ -4536,6 +4559,7 @@ pub mod tests { solana_sha256_hasher::hash, solana_signer::Signer, solana_svm::account_loader::TRANSACTION_ACCOUNT_BASE_SIZE, + solana_svm_log_collector::ic_logger_msg, solana_system_interface::{instruction as system_instruction, program as system_program}, solana_system_transaction as system_transaction, solana_sysvar::slot_hashes::SlotHashes, @@ -4546,7 +4570,7 @@ pub mod tests { EncodedConfirmedBlock, EncodedTransaction, EncodedTransactionWithStatusMeta, TransactionDetails, }, - solana_vote_interface::state::VoteState, + solana_vote_interface::state::VoteStateV3, solana_vote_program::{ vote_instruction, vote_state::{self, TowerSync, VoteInit, VoteStateVersions, MAX_LOCKOUT_HISTORY}, @@ -4649,15 +4673,9 @@ pub mod tests { ic_logger_msg!(log_collector, "I am logging from a builtin program!"); ic_logger_msg!(log_collector, "I am about to CPI to System!"); - let from_pubkey = *transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(0)?, - )?; - let to_pubkey = *transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(1)?, - )?; - let owner_pubkey = *transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(2)?, - )?; + let from_pubkey = *instruction_context.get_key_of_instruction_account(0)?; + let to_pubkey = *instruction_context.get_key_of_instruction_account(1)?; + let owner_pubkey = *instruction_context.get_key_of_instruction_account(2)?; invoke_context.native_invoke( system_instruction::create_account( @@ -5006,10 +5024,10 @@ pub mod tests { bank } - fn store_vote_account(&self, vote_pubkey: &Pubkey, vote_state: VoteState) { + fn store_vote_account(&self, vote_pubkey: &Pubkey, vote_state: VoteStateV3) { let bank = self.working_bank(); - let versioned = VoteStateVersions::new_current(vote_state); - let space = VoteState::size_of(); + let versioned = VoteStateVersions::new_v3(vote_state); + let space = VoteStateV3::size_of(); let balance = bank.get_minimum_balance_for_rent_exemption(space); let mut vote_account = AccountSharedData::new(balance, space, &solana_vote_program::id()); @@ -6016,6 +6034,12 @@ pub mod tests { "err":null, "innerInstructions": null, "loadedAccountsDataSize": loaded_accounts_data_size, + "fee": 5000, + "loadedAddresses": {"readonly": [], "writable": []}, + "preBalances": [1000000000, 0, 1], + "postBalances": [999982200, 12800, 1], + "preTokenBalances": [], + "postTokenBalances": [], "logs":[ "Program 11111111111111111111111111111111 invoke [1]", "Program 11111111111111111111111111111111 success" @@ -6103,6 +6127,12 @@ pub mod tests { "err":null, "innerInstructions":null, "loadedAccountsDataSize": loaded_accounts_data_size, + "fee": 5000, + "loadedAddresses": {"readonly": [], "writable": []}, + "preBalances": [1000000000, 0, 1], + "postBalances": [999982200, 12800, 1], + "preTokenBalances": [], + "postTokenBalances": [], "logs":[ "Program 11111111111111111111111111111111 invoke [1]", "Program 11111111111111111111111111111111 success" @@ -6134,6 +6164,12 @@ pub mod tests { "err":null, "innerInstructions":null, "loadedAccountsDataSize": loaded_accounts_data_size, + "fee": 5000, + "loadedAddresses": {"readonly": [], "writable": []}, + "preBalances": [1000000000, 0, 1], + "postBalances": [999982200, 12800, 1], + "preTokenBalances": [], + "postTokenBalances": [], "logs":[ "Program 11111111111111111111111111111111 invoke [1]", "Program 11111111111111111111111111111111 success" @@ -6189,6 +6225,12 @@ pub mod tests { "accounts":null, "innerInstructions":null, "loadedAccountsDataSize":0, + "fee": null, + "loadedAddresses": {"readonly": [], "writable": []}, + "preBalances": [1000000000, 0, 1], + "postBalances": [1000000000, 0, 1], + "preTokenBalances": [], + "postTokenBalances": [], "logs":[], "replacementBlockhash": null, "returnData": null, @@ -6223,6 +6265,12 @@ pub mod tests { "err":null, "innerInstructions":null, "loadedAccountsDataSize": loaded_accounts_data_size, + "fee": 5000, + "loadedAddresses": {"readonly": [], "writable": []}, + "preBalances": [1000000000, 0, 1], + "postBalances": [999982200, 12800, 1], + "preTokenBalances": [], + "postTokenBalances": [], "logs":[ "Program 11111111111111111111111111111111 invoke [1]", "Program 11111111111111111111111111111111 success" @@ -6374,6 +6422,12 @@ pub mod tests { "err": null, "innerInstructions": null, "loadedAccountsDataSize": loaded_accounts_data_size, + "fee": 5000, + "loadedAddresses": {"readonly": [], "writable": []}, + "preBalances": [1000000000, 29300, 1], + "postBalances": [999994999, 29301, 1], + "preTokenBalances": [], + "postTokenBalances": [], "logs":[ "Program 11111111111111111111111111111111 invoke [1]", "Program 11111111111111111111111111111111 success" @@ -6453,6 +6507,12 @@ pub mod tests { "err":null, "innerInstructions": null, "loadedAccountsDataSize": loaded_accounts_data_size, + "fee": 10000, + "loadedAddresses": {"readonly": [], "writable": []}, + "preBalances": [1000000000, 0, 1, 0, 1], + "postBalances": [999977200, 12800, 1, 0, 1], + "preTokenBalances": [], + "postTokenBalances": [], "logs":[ "Program TestProgram11111111111111111111111111111111 invoke [1]", "I am logging from a builtin program!", @@ -6496,6 +6556,12 @@ pub mod tests { "err":null, "innerInstructions": null, "loadedAccountsDataSize": loaded_accounts_data_size, + "fee": 10000, + "loadedAddresses": {"readonly": [], "writable": []}, + "preBalances": [1000000000, 0, 1, 0, 1], + "postBalances": [999977200, 12800, 1, 0, 1], + "preTokenBalances": [], + "postTokenBalances": [], "logs":[ "Program TestProgram11111111111111111111111111111111 invoke [1]", "I am logging from a builtin program!", @@ -6560,6 +6626,12 @@ pub mod tests { } ], "loadedAccountsDataSize": loaded_accounts_data_size, + "fee": 10000, + "loadedAddresses": {"readonly": [], "writable": []}, + "preBalances": [1000000000, 0, 1, 0, 1], + "postBalances": [999977200, 12800, 1, 0, 1], + "preTokenBalances": [], + "postTokenBalances": [], "logs":[ "Program TestProgram11111111111111111111111111111111 invoke [1]", "I am logging from a builtin program!", @@ -6820,7 +6892,7 @@ pub mod tests { assert_eq!( res, Some( - r#"{"jsonrpc":"2.0","error":{"code":-32002,"message":"Transaction simulation failed: Blockhash not found","data":{"accounts":null,"err":"BlockhashNotFound","innerInstructions":null,"loadedAccountsDataSize":0,"logs":[],"replacementBlockhash":null,"returnData":null,"unitsConsumed":0}},"id":1}"#.to_string(), + r#"{"jsonrpc":"2.0","error":{"code":-32002,"message":"Transaction simulation failed: Blockhash not found","data":{"accounts":null,"err":"BlockhashNotFound","fee":null,"innerInstructions":null,"loadedAccountsDataSize":0,"loadedAddresses":null,"logs":[],"postBalances":null,"postTokenBalances":null,"preBalances":null,"preTokenBalances":null,"replacementBlockhash":null,"returnData":null,"unitsConsumed":0}},"id":1}"#.to_string(), ) ); @@ -7571,7 +7643,7 @@ pub mod tests { // Create a vote account with no stake. let alice_vote_keypair = Keypair::new(); - let alice_vote_state = VoteState::new( + let alice_vote_state = VoteStateV3::new( &VoteInit { node_pubkey: mint_keypair.pubkey(), authorized_voter: alice_vote_keypair.pubkey(), @@ -8809,7 +8881,7 @@ pub mod tests { OptimisticallyConfirmedBankTracker::process_notification( ( BankNotification::OptimisticallyConfirmed(2), - None, /* no work sequence */ + None, /* no dependency work */ ), &bank_forks, &optimistically_confirmed_bank, @@ -8833,7 +8905,7 @@ pub mod tests { OptimisticallyConfirmedBankTracker::process_notification( ( BankNotification::OptimisticallyConfirmed(1), - None, /* no work sequence */ + None, /* no dependency work */ ), &bank_forks, &optimistically_confirmed_bank, @@ -8857,7 +8929,7 @@ pub mod tests { OptimisticallyConfirmedBankTracker::process_notification( ( BankNotification::OptimisticallyConfirmed(3), - None, /* no work sequence */ + None, /* no dependency work */ ), &bank_forks, &optimistically_confirmed_bank, @@ -8882,7 +8954,7 @@ pub mod tests { OptimisticallyConfirmedBankTracker::process_notification( ( BankNotification::Frozen(bank3), - None, /* no work sequence */ + None, /* no dependency work */ ), &bank_forks, &optimistically_confirmed_bank, diff --git a/rpc/src/rpc_health.rs b/rpc/src/rpc_health.rs index 56684d8edbd802..fee2b2b2cf6fe5 100644 --- a/rpc/src/rpc_health.rs +++ b/rpc/src/rpc_health.rs @@ -20,7 +20,6 @@ pub struct RpcHealth { blockstore: Arc, health_check_slot_distance: u64, override_health_check: Arc, - startup_verification_complete: Arc, #[cfg(test)] stub_health_status: std::sync::RwLock>, } @@ -31,14 +30,12 @@ impl RpcHealth { blockstore: Arc, health_check_slot_distance: u64, override_health_check: Arc, - startup_verification_complete: Arc, ) -> Self { Self { optimistically_confirmed_bank, blockstore, health_check_slot_distance, override_health_check, - startup_verification_complete, #[cfg(test)] stub_health_status: std::sync::RwLock::new(None), } @@ -55,9 +52,6 @@ impl RpcHealth { if self.override_health_check.load(Ordering::Relaxed) { return RpcHealthStatus::Ok; } - if !self.startup_verification_complete.load(Ordering::Acquire) { - return RpcHealthStatus::Unknown; - } // A node can observe votes by both replaying blocks and observing gossip. // @@ -122,7 +116,6 @@ impl RpcHealth { blockstore, 42, Arc::new(AtomicBool::new(false)), - Arc::new(AtomicBool::new(true)), )) } @@ -160,25 +153,19 @@ pub mod tests { let health_check_slot_distance = 10; let override_health_check = Arc::new(AtomicBool::new(true)); - let startup_verification_complete = Arc::clone(bank0.get_startup_verification_complete()); let health = RpcHealth::new( optimistically_confirmed_bank.clone(), blockstore.clone(), health_check_slot_distance, override_health_check.clone(), - startup_verification_complete, ); // Override health check set to true - status is ok assert_eq!(health.check(), RpcHealthStatus::Ok); - // Remove the override - status now unknown with incomplete startup verification - override_health_check.store(false, Ordering::Relaxed); - assert_eq!(health.check(), RpcHealthStatus::Unknown); - - // Mark startup verification complete - status still unknown as no slots have been + // Remove the override - status now unknown as no slots have been // optimistically confirmed yet - bank0.set_initial_accounts_hash_verification_completed(); + override_health_check.store(false, Ordering::Relaxed); assert_eq!(health.check(), RpcHealthStatus::Unknown); // Mark slot 15 as being optimistically confirmed in the Blockstore, this could diff --git a/rpc/src/rpc_pubsub.rs b/rpc/src/rpc_pubsub.rs index 5d22eadcf52957..3cc83626c19d68 100644 --- a/rpc/src/rpc_pubsub.rs +++ b/rpc/src/rpc_pubsub.rs @@ -643,22 +643,20 @@ mod tests { }, }, solana_signer::Signer, - solana_stake_interface::{ - self as stake, instruction as stake_instruction, - state::{Authorized, Lockup, StakeAuthorize, StakeStateV2}, - }, - solana_stake_program::stake_state, solana_system_interface::{instruction as system_instruction, program as system_program}, solana_system_transaction as system_transaction, solana_transaction::Transaction, solana_vote::vote_transaction::VoteTransaction, - solana_vote_program::vote_state::Vote, + solana_vote_interface::{ + instruction::{self as vote_instruction, CreateVoteAccountConfig}, + program as vote_program, + state::{Vote, VoteInit, VoteStateV3}, + }, std::{ sync::{ atomic::{AtomicBool, AtomicU64}, RwLock, }, - thread::sleep, time::Duration, }, }; @@ -875,11 +873,10 @@ mod tests { genesis_config.rent = Rent::default(); activate_all_features(&mut genesis_config); - let new_stake_authority = solana_pubkey::new_rand(); - let stake_authority = Keypair::new(); + let validator = Keypair::new(); + let voter = Keypair::new(); let from = Keypair::new(); - let stake_account = Keypair::new(); - let stake_program_id = stake::program::id(); + let vote_account = Keypair::new(); let bank = Bank::new_for_tests(&genesis_config); let blockhash = bank.last_blockhash(); let bank_forks = BankForks::new_rw_arc(bank); @@ -902,7 +899,7 @@ mod tests { let encoding = UiAccountEncoding::Base64; rpc.account_subscribe( - stake_account.pubkey().to_string(), + vote_account.pubkey().to_string(), Some(RpcAccountInfoConfig { commitment: Some(CommitmentConfig::processed()), encoding: Some(encoding), @@ -913,24 +910,42 @@ mod tests { .unwrap(); rpc.block_until_processed(&rpc_subscriptions); - let balance = { + let (validator_balance, vote_balance) = { let bank = bank_forks.read().unwrap().working_bank(); let rent = &bank.rent_collector().rent; - rent.minimum_balance(StakeStateV2::size_of()) + ( + rent.minimum_balance(0), + rent.minimum_balance(VoteStateV3::size_of()), + ) }; + let balance = validator_balance + vote_balance; let tx = system_transaction::transfer(&alice, &from.pubkey(), balance, blockhash); process_transaction_and_notify(&bank_forks, &tx, &rpc_subscriptions, 1).unwrap(); - let authorized = Authorized::auto(&stake_authority.pubkey()); - let ixs = stake_instruction::create_account( + let mut ixs = vec![system_instruction::create_account( &from.pubkey(), - &stake_account.pubkey(), - &authorized, - &Lockup::default(), - balance, - ); + &validator.pubkey(), + validator_balance, + 0, + &system_program::id(), + )]; + ixs.append(&mut vote_instruction::create_account_with_config( + &from.pubkey(), + &vote_account.pubkey(), + &VoteInit { + node_pubkey: validator.pubkey(), + authorized_voter: voter.pubkey(), + authorized_withdrawer: Pubkey::new_unique(), + ..VoteInit::default() + }, + vote_balance, + CreateVoteAccountConfig { + space: VoteStateV3::size_of() as u64, + ..CreateVoteAccountConfig::default() + }, + )); let message = Message::new(&ixs, Some(&from.pubkey())); - let tx = Transaction::new(&[&from, &stake_account], message, blockhash); + let tx = Transaction::new(&[&from, &vote_account, &validator], message, blockhash); process_transaction_and_notify(&bank_forks, &tx, &rpc_subscriptions, 1).unwrap(); // Test signature confirmation notification #1 @@ -939,7 +954,7 @@ mod tests { .unwrap() .get(1) .unwrap() - .get_account(&stake_account.pubkey()) + .get_account(&vote_account.pubkey()) .unwrap(); let expected_data = account.data(); let expected = json!({ @@ -949,8 +964,8 @@ mod tests { "result": { "context": { "slot": 1 }, "value": { - "owner": stake_program_id.to_string(), - "lamports": balance, + "owner": vote_program::id().to_string(), + "lamports": vote_balance, "data": [BASE64_STANDARD.encode(expected_data), encoding], "executable": false, "rentEpoch": u64::MAX, @@ -966,34 +981,6 @@ mod tests { expected, serde_json::from_str::(&response).unwrap(), ); - - let balance = { - let bank = bank_forks.read().unwrap().working_bank(); - let rent = &bank.rent_collector().rent; - rent.minimum_balance(0) - }; - let tx = - system_transaction::transfer(&alice, &stake_authority.pubkey(), balance, blockhash); - process_transaction_and_notify(&bank_forks, &tx, &rpc_subscriptions, 1).unwrap(); - sleep(Duration::from_millis(200)); - let ix = stake_instruction::authorize( - &stake_account.pubkey(), - &stake_authority.pubkey(), - &new_stake_authority, - StakeAuthorize::Staker, - None, - ); - let message = Message::new(&[ix], Some(&stake_authority.pubkey())); - let tx = Transaction::new(&[&stake_authority], message, blockhash); - process_transaction_and_notify(&bank_forks, &tx, &rpc_subscriptions, 1).unwrap(); - sleep(Duration::from_millis(200)); - - let bank = bank_forks.read().unwrap()[1].clone(); - let account = bank.get_account(&stake_account.pubkey()).unwrap(); - assert_eq!( - stake_state::authorized_from(&account).unwrap().staker, - new_stake_authority - ); } #[test] diff --git a/rpc/src/rpc_pubsub_service.rs b/rpc/src/rpc_pubsub_service.rs index 5532558a0683fc..de93e3765dac36 100644 --- a/rpc/src/rpc_pubsub_service.rs +++ b/rpc/src/rpc_pubsub_service.rs @@ -37,7 +37,7 @@ pub const DEFAULT_TEST_QUEUE_CAPACITY_ITEMS: usize = 100; pub const DEFAULT_QUEUE_CAPACITY_BYTES: usize = 256 * 1024 * 1024; pub const DEFAULT_WORKER_THREADS: usize = 1; -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct PubSubConfig { pub enable_block_subscription: bool, pub enable_vote_subscription: bool, @@ -87,7 +87,6 @@ impl PubSubService { pubsub_addr: SocketAddr, ) -> (Trigger, Self) { let subscription_control = subscriptions.control().clone(); - info!("rpc_pubsub bound to {pubsub_addr:?}"); let (trigger, tripwire) = Tripwire::new(); let thread_hdl = Builder::new() @@ -448,7 +447,19 @@ async fn listen( subscription_control: SubscriptionControl, mut tripwire: Tripwire, ) -> io::Result<()> { - let listener = tokio::net::TcpListener::bind(&listen_address).await?; + let listener = match tokio::net::TcpListener::bind(&listen_address).await { + Ok(listener) => { + info!("rpc_pubsub listening on {listen_address:?}"); + listener + } + Err(e) => { + error!( + "failed to bind rpc_pubsub listener on {listen_address:?}: {e}. Hint: is the port \ + already in use?" + ); + return Err(e); + } + }; let counter = TokenCounter::new("rpc_pubsub_connections"); loop { select! { diff --git a/rpc/src/rpc_service.rs b/rpc/src/rpc_service.rs index 62e4742ee7cfca..a2d73e38d192c9 100644 --- a/rpc/src/rpc_service.rs +++ b/rpc/src/rpc_service.rs @@ -16,18 +16,20 @@ use { RequestMiddlewareAction, ServerBuilder, }, regex::Regex, - solana_client::connection_cache::{ConnectionCache, Protocol}, + solana_cli_output::display::build_balance_message, + solana_client::{ + client_option::ClientOption, + connection_cache::{ConnectionCache, Protocol}, + }, solana_genesis_config::DEFAULT_GENESIS_DOWNLOAD_PATH, solana_gossip::cluster_info::ClusterInfo, solana_hash::Hash, - solana_keypair::Keypair, solana_ledger::{ bigtable_upload::ConfirmedBlockUploadConfig, bigtable_upload_service::BigTableUploadService, blockstore::Blockstore, leader_schedule_cache::LeaderScheduleCache, }, solana_metrics::inc_new_counter_info, - solana_native_token::lamports_to_sol, solana_perf::thread::renice_this_thread, solana_poh::poh_recorder::PohRecorder, solana_quic_definitions::NotifyKeyUpdate, @@ -48,7 +50,7 @@ use { solana_storage_bigtable::CredentialType, solana_validator_exit::Exit, std::{ - net::{SocketAddr, UdpSocket}, + net::SocketAddr, path::{Path, PathBuf}, pin::Pin, sync::{ @@ -59,11 +61,10 @@ use { thread::{self, Builder, JoinHandle}, time::{Duration, Instant}, }, - tokio::runtime::{Builder as TokioBuilder, Handle as RuntimeHandle, Runtime as TokioRuntime}, + tokio::runtime::{Builder as TokioBuilder, Runtime as TokioRuntime}, tokio_util::{ bytes::Bytes, codec::{BytesCodec, FramedRead}, - sync::CancellationToken, }, }; @@ -434,14 +435,14 @@ async fn handle_rest(bank_forks: &Arc>, path: &str) -> Option< let bank = bank_forks.read().unwrap().root_bank(); let supply_result = calculate_circulating_supply_async(&bank).await; match supply_result { - Ok(supply) => Some(format!("{}", lamports_to_sol(supply))), + Ok(supply) => Some(build_balance_message(supply, false, false)), Err(_) => None, } } "/v0/total-supply" => { let bank = bank_forks.read().unwrap().root_bank(); let total_supply = bank.capitalization(); - Some(format!("{}", lamports_to_sol(total_supply))) + Some(build_balance_message(total_supply, false, false)) } _ => None, } @@ -483,7 +484,6 @@ pub struct JsonRpcServiceConfig<'a> { pub validator_exit: Arc>, pub exit: Arc, pub override_health_check: Arc, - pub startup_verification_complete: Arc, pub optimistically_confirmed_bank: Arc>, pub send_transaction_service_config: send_transaction_service::Config, pub max_slots: Arc, @@ -493,17 +493,6 @@ pub struct JsonRpcServiceConfig<'a> { pub client_option: ClientOption<'a>, } -/// [`ClientOption`] enum represents the available client types for TPU -/// communication: -/// * [`ConnectionCacheClient`]: Uses a shared [`ConnectionCache`] to manage -/// connections efficiently. -/// * [`TpuClientNextClient`]: Relies on the `tpu-client-next` crate and -/// requires a reference to a [`Keypair`]. -pub enum ClientOption<'a> { - ConnectionCache(Arc), - TpuClientNext(&'a Keypair, UdpSocket, RuntimeHandle, CancellationToken), -} - impl JsonRpcService { pub fn new_with_config(config: JsonRpcServiceConfig) -> Result { let runtime = service_runtime( @@ -545,7 +534,6 @@ impl JsonRpcService { config.validator_exit, config.exit, config.override_health_check, - config.startup_verification_complete, config.optimistically_confirmed_bank, config.send_transaction_service_config, config.max_slots, @@ -595,7 +583,6 @@ impl JsonRpcService { config.validator_exit, config.exit, config.override_health_check, - config.startup_verification_complete, config.optimistically_confirmed_bank, config.send_transaction_service_config, config.max_slots, @@ -625,7 +612,6 @@ impl JsonRpcService { validator_exit: Arc>, exit: Arc, override_health_check: Arc, - startup_verification_complete: Arc, optimistically_confirmed_bank: Arc>, send_transaction_service_config: send_transaction_service::Config, max_slots: Arc, @@ -672,7 +658,6 @@ impl JsonRpcService { validator_exit, exit, override_health_check, - startup_verification_complete, optimistically_confirmed_bank, send_transaction_service_config, max_slots, @@ -706,7 +691,6 @@ impl JsonRpcService { validator_exit: Arc>, exit: Arc, override_health_check: Arc, - startup_verification_complete: Arc, optimistically_confirmed_bank: Arc>, send_transaction_service_config: send_transaction_service::Config, max_slots: Arc, @@ -725,7 +709,6 @@ impl JsonRpcService { Arc::clone(&blockstore), config.health_check_slot_distance, override_health_check, - startup_verification_complete, )); let largest_accounts_cache = Arc::new(RwLock::new(LargestAccountsCache::new( @@ -956,7 +939,8 @@ mod tests { use { super::*, crate::rpc::{create_validator_exit, tests::new_test_cluster_info}, - solana_genesis_config::{ClusterType, DEFAULT_GENESIS_ARCHIVE}, + solana_cluster_type::ClusterType, + solana_genesis_config::DEFAULT_GENESIS_ARCHIVE, solana_ledger::{ genesis_utils::{create_genesis_config, GenesisConfigInfo}, get_tmp_ledger_path_auto_delete, @@ -1009,7 +993,6 @@ mod tests { validator_exit, exit, Arc::new(AtomicBool::new(false)), - Arc::new(AtomicBool::new(true)), optimistically_confirmed_bank, send_transaction_service::Config { retry_rate_ms: 1000, diff --git a/rpc/src/rpc_subscriptions.rs b/rpc/src/rpc_subscriptions.rs index 28be1d239d316c..7b6035030468f0 100644 --- a/rpc/src/rpc_subscriptions.rs +++ b/rpc/src/rpc_subscriptions.rs @@ -1992,7 +1992,7 @@ pub(crate) mod tests { OptimisticallyConfirmedBankTracker::process_notification( ( BankNotification::OptimisticallyConfirmed(3), - None, /* no work sequence */ + None, /* no dependency work */ ), &bank_forks, &optimistically_confirmed_bank, @@ -2049,7 +2049,7 @@ pub(crate) mod tests { OptimisticallyConfirmedBankTracker::process_notification( ( BankNotification::Frozen(bank3), - None, /* no work sequence */ + None, /* no dependency work */ ), &bank_forks, &optimistically_confirmed_bank, @@ -2172,7 +2172,7 @@ pub(crate) mod tests { OptimisticallyConfirmedBankTracker::process_notification( ( BankNotification::OptimisticallyConfirmed(3), - None, /* no work sequence */ + None, /* no dependency work */ ), &bank_forks, &optimistically_confirmed_bank, @@ -2291,7 +2291,7 @@ pub(crate) mod tests { OptimisticallyConfirmedBankTracker::process_notification( ( BankNotification::OptimisticallyConfirmed(3), - None, /* no work sequence */ + None, /* no dependency work */ ), &bank_forks, &optimistically_confirmed_bank, @@ -2350,7 +2350,7 @@ pub(crate) mod tests { OptimisticallyConfirmedBankTracker::process_notification( ( BankNotification::Frozen(bank3), - None, /* no work sequence */ + None, /* no dependency work */ ), &bank_forks, &optimistically_confirmed_bank, @@ -2800,7 +2800,7 @@ pub(crate) mod tests { OptimisticallyConfirmedBankTracker::process_notification( ( BankNotification::OptimisticallyConfirmed(1), - None, /* no work sequence */ + None, /* no dependency work */ ), &bank_forks, &optimistically_confirmed_bank, @@ -2859,7 +2859,7 @@ pub(crate) mod tests { OptimisticallyConfirmedBankTracker::process_notification( ( BankNotification::Frozen(bank2), - None, /* no work sequence */ + None, /* no dependency work */ ), &bank_forks, &optimistically_confirmed_bank, diff --git a/rpc/src/transaction_status_service.rs b/rpc/src/transaction_status_service.rs index a903a2d473aad5..a2bca34220c867 100644 --- a/rpc/src/transaction_status_service.rs +++ b/rpc/src/transaction_status_service.rs @@ -51,14 +51,14 @@ const TSS_TEST_QUIESCE_SLEEP_TIME_MS: u64 = 50; pub struct TransactionStatusService { thread_hdl: JoinHandle<()>, #[cfg(feature = "dev-context-only-utils")] - transaction_status_receiver: Arc>, + transaction_status_receiver: Receiver, } impl TransactionStatusService { const SERVICE_NAME: &str = "TransactionStatusService"; pub fn new( - write_transaction_status_receiver: Receiver, + transaction_status_receiver: Receiver, max_complete_transaction_status_slot: Arc, enable_rpc_transaction_history: bool, transaction_notifier: Option, @@ -67,49 +67,49 @@ impl TransactionStatusService { depenency_tracker: Option>, exit: Arc, ) -> Self { - let transaction_status_receiver = Arc::new(write_transaction_status_receiver); - let transaction_status_receiver_handle = Arc::clone(&transaction_status_receiver); - let thread_hdl = Builder::new() .name("solTxStatusWrtr".to_string()) - .spawn(move || { - info!("{} has started", Self::SERVICE_NAME); - loop { - if exit.load(Ordering::Relaxed) { - break; - } - - let message = match transaction_status_receiver_handle - .recv_timeout(Duration::from_secs(1)) - { - Ok(message) => message, - Err(err @ RecvTimeoutError::Disconnected) => { - info!("{} is stopping because: {err}", Self::SERVICE_NAME); + .spawn({ + let transaction_status_receiver = transaction_status_receiver.clone(); + move || { + info!("{} has started", Self::SERVICE_NAME); + loop { + if exit.load(Ordering::Relaxed) { break; } - Err(RecvTimeoutError::Timeout) => { - continue; - } - }; - match Self::write_transaction_status_batch( - message, - &max_complete_transaction_status_slot, - enable_rpc_transaction_history, - transaction_notifier.clone(), - &blockstore, - enable_extended_tx_metadata_storage, - depenency_tracker.clone(), - ) { - Ok(_) => {} - Err(err) => { - error!("{} is stopping because: {err}", Self::SERVICE_NAME); - exit.store(true, Ordering::Relaxed); - break; + let message = match transaction_status_receiver + .recv_timeout(Duration::from_secs(1)) + { + Ok(message) => message, + Err(err @ RecvTimeoutError::Disconnected) => { + info!("{} is stopping because: {err}", Self::SERVICE_NAME); + break; + } + Err(RecvTimeoutError::Timeout) => { + continue; + } + }; + + match Self::write_transaction_status_batch( + message, + &max_complete_transaction_status_slot, + enable_rpc_transaction_history, + transaction_notifier.clone(), + &blockstore, + enable_extended_tx_metadata_storage, + depenency_tracker.clone(), + ) { + Ok(_) => {} + Err(err) => { + error!("{} is stopping because: {err}", Self::SERVICE_NAME); + exit.store(true, Ordering::Relaxed); + break; + } } } + info!("{} has stopped", Self::SERVICE_NAME); } - info!("{} has stopped", Self::SERVICE_NAME); }) .unwrap(); Self { @@ -139,7 +139,7 @@ impl TransactionStatusService { costs, transaction_indexes, }, - work_sequence, + work_id, )) => { let mut status_and_memos_batch = blockstore.get_write_batch()?; @@ -256,8 +256,8 @@ impl TransactionStatusService { } if let Some(dependency_tracker) = dependency_tracker.as_ref() { - if let Some(work_sequence) = work_sequence { - dependency_tracker.mark_this_and_all_previous_work_processed(work_sequence); + if let Some(work_id) = work_id { + dependency_tracker.mark_this_and_all_previous_work_processed(work_id); } } } @@ -463,6 +463,7 @@ pub(crate) mod tests { executed_units: 0, fee_details: FeeDetails::default(), loaded_account_stats: TransactionLoadedAccountsStats::default(), + fee_payer_post_balance: 0, }); let balances = TransactionBalancesSet { @@ -529,7 +530,7 @@ pub(crate) mod tests { transaction_status_sender .send(TransactionStatusMessage::Batch(( transaction_status_batch, - None, /* No work sequence */ + None, /* No work id */ ))) .unwrap(); @@ -593,6 +594,7 @@ pub(crate) mod tests { executed_units: 0, fee_details: FeeDetails::default(), loaded_account_stats: TransactionLoadedAccountsStats::default(), + fee_payer_post_balance: 0, }); let balances = TransactionBalancesSet { @@ -633,11 +635,11 @@ pub(crate) mod tests { Some(dependency_tracker.clone()), exit.clone(), ); - let work_sequence = 345; + let work_id = 345; transaction_status_sender .send(TransactionStatusMessage::Batch(( transaction_status_batch, - Some(work_sequence), + Some(work_id), ))) .unwrap(); transaction_status_service.quiesce_and_join_for_tests(exit); diff --git a/run.sh b/run.sh deleted file mode 100755 index 9f17e60f59b1d4..00000000000000 --- a/run.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bash - -cat <<'EOF' - - WARNING! LEGACY SHELL SCRIPT - - You almost certainly do not want to run this script! - - If you are a dapp developer and looking for a way to run a local validator, please - see https://docs.solanalabs.com/cli/examples/test-validator - - If you are a prospective validator, please see https://docs.solanalabs.com/operations - - If you are a core developer, many apologies for what you're about to endure, but - you may be in the right place. This script is now located at `./scripts/run.sh`. - Please update whatever docs lead you here to reflect this change - -EOF diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index ad3d5acc8457ad..09f6ce39a4717b 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -19,6 +19,7 @@ name = "solana_runtime" [features] dev-context-only-utils = [ "dep:solana-system-program", + "solana-accounts-db/dev-context-only-utils", "solana-svm/dev-context-only-utils", "solana-runtime-transaction/dev-context-only-utils", "solana-vote/dev-context-only-utils", @@ -28,16 +29,17 @@ frozen-abi = [ "dep:solana-frozen-abi-macro", "solana-account/frozen-abi", "solana-accounts-db/frozen-abi", + "solana-bls-signatures/frozen-abi", "solana-compute-budget/frozen-abi", "solana-cost-model/frozen-abi", "solana-epoch-schedule/frozen-abi", "solana-hard-forks/frozen-abi", "solana-inflation/frozen-abi", "solana-instruction/frozen-abi", + "solana-instruction-error/frozen-abi", "solana-perf/frozen-abi", "solana-program-runtime/frozen-abi", "solana-rent/frozen-abi", - "solana-rent-collector/frozen-abi", "solana-stake-interface/frozen-abi", "solana-svm/frozen-abi", "solana-transaction-error/frozen-abi", @@ -82,6 +84,7 @@ qualifier_attr = { workspace = true } rand = { workspace = true } rayon = { workspace = true } regex = { workspace = true } +semver = { workspace = true } serde = { workspace = true, features = ["rc"] } serde_derive = { workspace = true } serde_json = { workspace = true } @@ -90,11 +93,13 @@ solana-account = { workspace = true } solana-account-info = { workspace = true } solana-accounts-db = { workspace = true } solana-address-lookup-table-interface = { workspace = true } +solana-bls-signatures = { workspace = true } solana-bpf-loader-program = { workspace = true } solana-bucket-map = { workspace = true } solana-builtins = { workspace = true } solana-client-traits = { workspace = true } solana-clock = { workspace = true } +solana-cluster-type = { workspace = true } solana-commitment-config = { workspace = true } solana-compute-budget = { workspace = true } solana-compute-budget-instruction = { workspace = true } @@ -105,7 +110,7 @@ solana-ed25519-program = { workspace = true } solana-epoch-info = { workspace = true } solana-epoch-rewards-hasher = { workspace = true } solana-epoch-schedule = { workspace = true } -solana-feature-gate-interface = { workspace = true } +solana-feature-gate-interface = { workspace = true, features = ["bincode"] } solana-fee = { workspace = true } solana-fee-calculator = { workspace = true } solana-fee-structure = { workspace = true, features = ["serde"] } @@ -139,7 +144,6 @@ solana-program-runtime = { workspace = true, features = ["metrics"] } solana-pubkey = { workspace = true, features = ["rand"] } solana-rayon-threadlimit = { workspace = true } solana-rent = { workspace = true } -solana-rent-collector = { workspace = true, features = ["serde"] } solana-reward-info = { workspace = true } solana-runtime-transaction = { workspace = true } solana-sdk-ids = { workspace = true } @@ -155,6 +159,7 @@ solana-stake-interface = { workspace = true } solana-stake-program = { workspace = true } solana-svm = { workspace = true } solana-svm-callback = { workspace = true } +solana-svm-timings = { workspace = true } solana-svm-transaction = { workspace = true } solana-system-interface = { workspace = true } solana-system-program = { workspace = true, optional = true } @@ -162,7 +167,6 @@ solana-system-transaction = { workspace = true } solana-sysvar = { workspace = true } solana-sysvar-id = { workspace = true } solana-time-utils = { workspace = true } -solana-timings = { workspace = true } solana-transaction = { workspace = true, features = ["verify"] } solana-transaction-context = { workspace = true } solana-transaction-error = { workspace = true } @@ -191,14 +195,18 @@ rand0-7 = { package = "rand", version = "0.7" } rand_chacha = { workspace = true } solana-accounts-db = { workspace = true, features = ["dev-context-only-utils"] } solana-builtins = { workspace = true, features = ["dev-context-only-utils"] } +solana-instruction-error = { workspace = true } solana-logger = { workspace = true } +solana-program-binaries = { workspace = true } # See order-crates-for-publishing.py for using this unusual `path = "."` solana-runtime = { path = ".", features = ["dev-context-only-utils"] } solana-runtime-transaction = { workspace = true, features = [ "dev-context-only-utils", ] } solana-sdk-ids = { workspace = true } +solana-secp256k1-program = { workspace = true, features = ["bincode"] } solana-signature = { workspace = true, features = ["std"] } +solana-stake-interface = { workspace = true, features = ["sysvar"] } solana-svm = { workspace = true, features = ["dev-context-only-utils"] } solana-transaction-context = { workspace = true, features = [ "dev-context-only-utils", diff --git a/runtime/benches/bank.rs b/runtime/benches/bank.rs deleted file mode 100644 index 226b9fc8003f68..00000000000000 --- a/runtime/benches/bank.rs +++ /dev/null @@ -1,200 +0,0 @@ -#![feature(test)] -#![allow(clippy::arithmetic_side_effects)] - -extern crate test; - -use { - log::*, - solana_client_traits::{AsyncClient, SyncClient}, - solana_clock::MAX_RECENT_BLOCKHASHES, - solana_genesis_config::create_genesis_config, - solana_hash::Hash, - solana_keypair::Keypair, - solana_message::Message, - solana_program_runtime::declare_process_instruction, - solana_pubkey::Pubkey, - solana_runtime::{ - bank::{test_utils::goto_end_of_slot, *}, - bank_client::BankClient, - loader_utils::create_invoke_instruction, - }, - solana_signer::Signer, - solana_svm::transaction_processing_callback::TransactionProcessingCallback, - solana_transaction::Transaction, - std::{sync::Arc, thread::sleep, time::Duration}, - test::Bencher, -}; - -const BUILTIN_PROGRAM_ID: [u8; 32] = [ - 98, 117, 105, 108, 116, 105, 110, 95, 112, 114, 111, 103, 114, 97, 109, 95, 105, 100, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -]; - -const NOOP_PROGRAM_ID: [u8; 32] = [ - 98, 117, 105, 108, 116, 105, 110, 95, 112, 114, 111, 103, 114, 97, 109, 95, 105, 100, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -]; - -pub fn create_builtin_transactions( - bank_client: &BankClient, - mint_keypair: &Keypair, -) -> Vec<(Transaction, Hash)> { - let program_id = Pubkey::from(BUILTIN_PROGRAM_ID); - - (0..4096) - .map(|_| { - // Seed the signer account - let rando0 = Keypair::new(); - bank_client - .transfer_and_confirm(10_000, mint_keypair, &rando0.pubkey()) - .unwrap_or_else(|_| panic!("{}:{}", line!(), file!())); - - let instruction = create_invoke_instruction(rando0.pubkey(), program_id, &1u8); - let blockhash = bank_client.get_latest_blockhash().unwrap(); - let message = Message::new(&[instruction], Some(&mint_keypair.pubkey())); - let message_hash = message.hash(); - let tx = Transaction::new(&[&rando0], message, blockhash); - (tx, message_hash) - }) - .collect() -} - -pub fn create_native_loader_transactions( - bank_client: &BankClient, - mint_keypair: &Keypair, -) -> Vec<(Transaction, Hash)> { - let program_id = Pubkey::from(NOOP_PROGRAM_ID); - - (0..4096) - .map(|_| { - // Seed the signer account©41 - let rando0 = Keypair::new(); - bank_client - .transfer_and_confirm(10_000, mint_keypair, &rando0.pubkey()) - .unwrap_or_else(|_| panic!("{}:{}", line!(), file!())); - - let instruction = create_invoke_instruction(rando0.pubkey(), program_id, &1u8); - let blockhash = bank_client.get_latest_blockhash().unwrap(); - let message = Message::new(&[instruction], Some(&mint_keypair.pubkey())); - let message_hash = message.hash(); - let tx = Transaction::new(&[&rando0], message, blockhash); - (tx, message_hash) - }) - .collect() -} - -fn sync_bencher(bank: &Bank, _bank_client: &BankClient, transactions: &[(Transaction, Hash)]) { - let results = bank.process_transactions(transactions.iter().map(|(tx, _)| tx)); - assert!(results.iter().all(Result::is_ok)); -} - -fn async_bencher(bank: &Bank, bank_client: &BankClient, transactions: &[(Transaction, Hash)]) { - for (transaction, _hash) in transactions.iter().cloned() { - bank_client.async_send_transaction(transaction).unwrap(); - } - let (last_transaction, last_tx_hash) = transactions.last().unwrap(); - for _ in 0..1_000_000_000_u64 { - if let Some((_slot, status)) = bank.get_committed_transaction_status_and_slot( - last_tx_hash, - &last_transaction.message.recent_blockhash, - ) { - if !status { - panic!("transaction failed"); - } - break; - } - sleep(Duration::from_nanos(1)); - } -} - -#[allow(clippy::type_complexity)] -fn do_bench_transactions( - bencher: &mut Bencher, - bench_work: &dyn Fn(&Bank, &BankClient, &[(Transaction, Hash)]), - create_transactions: &dyn Fn(&BankClient, &Keypair) -> Vec<(Transaction, Hash)>, -) { - solana_logger::setup(); - let ns_per_s = 1_000_000_000; - let (mut genesis_config, mint_keypair) = create_genesis_config(100_000_000_000_000); - genesis_config.ticks_per_slot = 100; - - let bank = Bank::new_for_benches(&genesis_config); - // freeze bank so that slot hashes is populated - bank.freeze(); - - declare_process_instruction!(MockBuiltin, 1, |_invoke_context| { - // Do nothing - Ok(()) - }); - - let mut bank = Bank::new_from_parent(Arc::new(bank), &Pubkey::default(), 1); - bank.add_mockup_builtin(Pubkey::from(BUILTIN_PROGRAM_ID), MockBuiltin::vm); - bank.add_builtin_account("solana_noop_program", &Pubkey::from(NOOP_PROGRAM_ID)); - let bank = Arc::new(bank); - let bank_client = BankClient::new_shared(bank.clone()); - let transactions = create_transactions(&bank_client, &mint_keypair); - - // Do once to fund accounts, load modules, etc... - let results = bank.process_transactions(transactions.iter().map(|(tx, _)| tx)); - assert!(results.iter().all(Result::is_ok)); - - bencher.iter(|| { - // Since bencher runs this multiple times, we need to clear the signatures. - bank.clear_signatures(); - bench_work(&bank, &bank_client, &transactions); - }); - - let summary = bencher.bench(|_bencher| Ok(())).unwrap().unwrap(); - info!(" {:?} transactions", transactions.len()); - info!(" {:?} ns/iter median", summary.median as u64); - assert!(0f64 != summary.median); - let tps = transactions.len() as u64 * (ns_per_s / summary.median as u64); - info!(" {:?} TPS", tps); -} - -#[bench] -#[ignore] -fn bench_bank_sync_process_builtin_transactions(bencher: &mut Bencher) { - do_bench_transactions(bencher, &sync_bencher, &create_builtin_transactions); -} - -#[bench] -#[ignore] -fn bench_bank_sync_process_native_loader_transactions(bencher: &mut Bencher) { - do_bench_transactions(bencher, &sync_bencher, &create_native_loader_transactions); -} - -#[bench] -#[ignore] -fn bench_bank_async_process_builtin_transactions(bencher: &mut Bencher) { - do_bench_transactions(bencher, &async_bencher, &create_builtin_transactions); -} - -#[bench] -#[ignore] -fn bench_bank_async_process_native_loader_transactions(bencher: &mut Bencher) { - do_bench_transactions(bencher, &async_bencher, &create_native_loader_transactions); -} - -#[bench] -#[ignore] -fn bench_bank_update_recent_blockhashes(bencher: &mut Bencher) { - let (genesis_config, _mint_keypair) = create_genesis_config(100); - let mut bank = Arc::new(Bank::new_for_benches(&genesis_config)); - goto_end_of_slot(bank.clone()); - let genesis_hash = bank.last_blockhash(); - // Prime blockhash_queue - for i in 0..(MAX_RECENT_BLOCKHASHES + 1) { - bank = Arc::new(Bank::new_from_parent( - bank, - &Pubkey::default(), - (i + 1) as u64, - )); - goto_end_of_slot(bank.clone()); - } - // Verify blockhash_queue is full (genesis hash has been kicked out) - assert!(!bank.is_hash_valid_for_age(&genesis_hash, MAX_RECENT_BLOCKHASHES)); - bencher.iter(|| { - bank.update_recent_blockhashes(); - }); -} diff --git a/runtime/benches/status_cache.rs b/runtime/benches/status_cache.rs index 6b60e0e0892a16..d477c8923924e6 100644 --- a/runtime/benches/status_cache.rs +++ b/runtime/benches/status_cache.rs @@ -35,6 +35,20 @@ fn bench_status_cache_serialize(bencher: &mut Bencher) { }); } +#[bench] +fn bench_status_cache_serialize_max(bencher: &mut Bencher) { + // Fill up the status cache to better match what intense runtime usage would + // look like. + let max_cache_entries = MAX_CACHE_ENTRIES as u64; + let mut status_cache = BankStatusCache::default(); + fill_status_cache(&mut status_cache, max_cache_entries, 100_000); + + assert!(status_cache.roots().contains(&0)); + bencher.iter(|| { + let _ = serialize(&status_cache.root_slot_deltas()).unwrap(); + }); +} + #[bench] fn bench_status_cache_root_slot_deltas(bencher: &mut Bencher) { let mut status_cache = BankStatusCache::default(); diff --git a/runtime/src/account_saver.rs b/runtime/src/account_saver.rs index 011e21d4cc6836..c89ff51efbf745 100644 --- a/runtime/src/account_saver.rs +++ b/runtime/src/account_saver.rs @@ -11,7 +11,7 @@ use { }, solana_svm_transaction::svm_message::SVMMessage, solana_transaction::sanitized::SanitizedTransaction, - solana_transaction_context::TransactionAccount, + solana_transaction_context::transaction_accounts::TransactionAccount, }; // Used to approximate how many accounts will be calculated for storage so that diff --git a/runtime/src/accounts_background_service.rs b/runtime/src/accounts_background_service.rs index 74768a71ed9b88..e1bb80a0b1c9e3 100644 --- a/runtime/src/accounts_background_service.rs +++ b/runtime/src/accounts_background_service.rs @@ -234,9 +234,6 @@ impl SnapshotRequestHandler { enqueued: _, } = snapshot_request; - // we should not rely on the state of this validator until startup verification is complete - assert!(snapshot_root_bank.has_initial_accounts_hash_verification_completed()); - if snapshot_kind.is_full_snapshot() { // The latest full snapshot slot is what accounts-db uses to properly handle // zero lamport accounts. We are handling a full snapshot request here, and @@ -368,7 +365,7 @@ impl PrunedBanksRequestHandler { // Purge all the slots in parallel // Banks for the same slot are purged sequentially let accounts_db = bank.rc.accounts.accounts_db.as_ref(); - accounts_db.thread_pool_clean.install(|| { + accounts_db.thread_pool_background.install(|| { grouped_banks_to_purge.into_par_iter().for_each(|group| { group.iter().for_each(|(slot, bank_id)| { accounts_db.purge_slot(*slot, *bank_id, true); @@ -436,7 +433,7 @@ impl AccountsBackgroundService { let mut removed_slots_count = 0; let mut total_remove_slots_time = 0; let t_background = Builder::new() - .name("solBgAccounts".to_string()) + .name("solAcctsBgSvc".to_string()) .spawn({ let is_running = is_running.clone(); let stop = stop.clone(); @@ -491,15 +488,8 @@ impl AccountsBackgroundService { // before setting a root `R > N`, and // snapshot_request_handler.handle_requests() will always look for the // latest available snapshot in the channel. - // - // NOTE: We must wait for startup verification to complete before handling - // snapshot requests. This is because startup verification and snapshot - // request handling can both kick off accounts hash calculations in - // background threads, and these must not happen concurrently. - let snapshot_handle_result = bank - .has_initial_accounts_hash_verification_completed() - .then(|| request_handlers.handle_snapshot_requests(non_snapshot_time)) - .flatten(); + let snapshot_handle_result = + request_handlers.handle_snapshot_requests(non_snapshot_time); if let Some(snapshot_handle_result) = snapshot_handle_result { // Safe, see proof above @@ -509,11 +499,8 @@ impl AccountsBackgroundService { Ok(snapshot_slot) => { assert!( last_cleaned_slot <= snapshot_slot, - "last cleaned slot: {last_cleaned_slot}, \ - snapshot request slot: {snapshot_slot}, \ - is startup verification complete: {}, \ - enqueued snapshot requests: {:?}", - bank.has_initial_accounts_hash_verification_completed(), + "last cleaned slot: {last_cleaned_slot}, snapshot request \ + slot: {snapshot_slot}, enqueued snapshot requests: {:?}", request_handlers .snapshot_request_handler .snapshot_request_receiver @@ -526,8 +513,8 @@ impl AccountsBackgroundService { } Err(err) => { error!( - "Stopping AccountsBackgroundService! \ - Fatal error while handling snapshot requests: {err}", + "Stopping AccountsBackgroundService! Fatal error while \ + handling snapshot requests: {err}", ); exit.store(true, Ordering::Relaxed); break; @@ -813,7 +800,6 @@ mod test { genesis_config_info.genesis_config.epoch_schedule = EpochSchedule::custom(SLOTS_PER_EPOCH, SLOTS_PER_EPOCH, false); let mut bank = Arc::new(Bank::new_for_tests(&genesis_config_info.genesis_config)); - bank.set_initial_accounts_hash_verification_completed(); // We need to get and set accounts-db's latest full snapshot slot to test // get_next_snapshot_request(). To workaround potential borrowing issues @@ -909,7 +895,6 @@ mod test { }; let genesis_config_info = create_genesis_config(10); let bank = Bank::new_for_tests(&genesis_config_info.genesis_config); - bank.set_initial_accounts_hash_verification_completed(); bank.rc.accounts.accounts_db.enable_bank_drop_callback(); bank.set_callback(Some(Box::new(SendDroppedBankCallback::new( pruned_banks_sender, diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 4a550fd50e3dfd..1e597e764084f2 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -38,12 +38,13 @@ use { account_saver::collect_accounts_to_store, bank::{ metrics::*, - partitioned_epoch_rewards::{EpochRewardStatus, StakeRewards, VoteRewardsAccounts}, + partitioned_epoch_rewards::{EpochRewardStatus, VoteRewardsAccounts}, }, bank_forks::BankForks, epoch_stakes::{NodeVoteAccounts, VersionedEpochStakes}, inflation_rewards::points::InflationPointCalculationEvent, installed_scheduler_pool::{BankWithScheduler, InstalledSchedulerRwLock}, + rent_collector::RentCollector, runtime_config::RuntimeConfig, snapshot_hash::SnapshotHash, stake_account::StakeAccount, @@ -62,11 +63,11 @@ use { agave_syscalls::{ create_program_runtime_environment_v1, create_program_runtime_environment_v2, }, - ahash::{AHashSet, RandomState}, + ahash::AHashSet, dashmap::DashMap, log::*, partitioned_epoch_rewards::PartitionedRewardsCalculation, - rayon::ThreadPoolBuilder, + rayon::{ThreadPool, ThreadPoolBuilder}, serde::Serialize, solana_account::{ create_account_shared_data_with_fields as create_account, from_account, Account, @@ -75,7 +76,7 @@ use { solana_accounts_db::{ account_locks::validate_account_locks, accounts::{AccountAddressFilter, Accounts, PubkeyAccountSlot}, - accounts_db::{self, AccountStorageEntry, AccountsDb, AccountsDbConfig, DuplicatesLtHash}, + accounts_db::{AccountStorageEntry, AccountsDb, AccountsDbConfig}, accounts_hash::AccountsLtHash, accounts_index::{IndexKey, ScanConfig, ScanResult}, accounts_update_notifier_interface::AccountsUpdateNotifier, @@ -83,11 +84,12 @@ use { blockhash_queue::BlockhashQueue, storable_accounts::StorableAccounts, }, - solana_builtins::{prototype::BuiltinPrototype, BUILTINS, STATELESS_BUILTINS}, + solana_builtins::{BUILTINS, STATELESS_BUILTINS}, solana_clock::{ BankId, Epoch, Slot, SlotIndex, UnixTimestamp, INITIAL_RENT_EPOCH, MAX_PROCESSING_AGE, MAX_TRANSACTION_FORWARDING_DELAY, }, + solana_cluster_type::ClusterType, solana_compute_budget::compute_budget::ComputeBudget, solana_compute_budget_instruction::instructions_processor::process_compute_budget_instructions, solana_cost_model::{block_cost_limits::simd_0286_block_limits, cost_tracker::CostTracker}, @@ -97,13 +99,13 @@ use { solana_fee::FeeFeatures, solana_fee_calculator::FeeRateGovernor, solana_fee_structure::{FeeBudgetLimits, FeeDetails, FeeStructure}, - solana_genesis_config::{ClusterType, GenesisConfig}, + solana_genesis_config::GenesisConfig, solana_hard_forks::HardForks, solana_hash::Hash, solana_inflation::Inflation, solana_keypair::Keypair, solana_lattice_hash::lt_hash::LtHash, - solana_measure::{meas_dur, measure::Measure, measure_time, measure_us}, + solana_measure::{measure::Measure, measure_time, measure_us}, solana_message::{inner_instruction::InnerInstructions, AccountKeys, SanitizedMessage}, solana_native_token::LAMPORTS_PER_SOL, solana_packet::PACKET_DATA_SIZE, @@ -111,8 +113,7 @@ use { solana_program_runtime::{ invoke_context::BuiltinFunctionWithContext, loaded_programs::ProgramCacheEntry, }, - solana_pubkey::Pubkey, - solana_rent_collector::RentCollector, + solana_pubkey::{Pubkey, PubkeyHasherBuilder}, solana_reward_info::RewardInfo, solana_runtime_transaction::{ runtime_transaction::RuntimeTransaction, transaction_with_meta::TransactionWithMeta, @@ -122,12 +123,14 @@ use { solana_signature::Signature, solana_slot_hashes::SlotHashes, solana_slot_history::{Check, SlotHistory}, - solana_stake_interface::state::Delegation, + solana_stake_interface::{ + stake_history::StakeHistory, state::Delegation, sysvar::stake_history, + }, solana_svm::{ account_loader::LoadedTransaction, account_overrides::AccountOverrides, program_loader::load_program_with_pubkey, - transaction_balances::BalanceCollector, + transaction_balances::{BalanceCollector, SvmTokenInfo}, transaction_commit_result::{CommittedTransaction, TransactionCommitResult}, transaction_error_metrics::TransactionErrorMetrics, transaction_execution_result::{ @@ -143,24 +146,24 @@ use { }, }, solana_svm_callback::{AccountState, InvokeContextCallback, TransactionProcessingCallback}, + solana_svm_timings::{ExecuteTimingType, ExecuteTimings}, solana_svm_transaction::svm_message::SVMMessage, solana_system_transaction as system_transaction, - solana_sysvar::{self as sysvar, last_restart_slot::LastRestartSlot, Sysvar}, + solana_sysvar::{self as sysvar, last_restart_slot::LastRestartSlot, SysvarSerialize}, solana_sysvar_id::SysvarId, solana_time_utils::years_as_slots, - solana_timings::{ExecuteTimingType, ExecuteTimings}, solana_transaction::{ sanitized::{MessageHash, SanitizedTransaction, MAX_TX_ACCOUNT_LOCKS}, versioned::VersionedTransaction, Transaction, TransactionVerificationMode, }, - solana_transaction_context::{TransactionAccount, TransactionReturnData}, + solana_transaction_context::{transaction_accounts::TransactionAccount, TransactionReturnData}, solana_transaction_error::{TransactionError, TransactionResult as Result}, solana_vote::vote_account::{VoteAccount, VoteAccountsHashMap}, std::{ collections::{HashMap, HashSet}, fmt, - ops::{AddAssign, RangeFull}, + ops::AddAssign, path::PathBuf, slice, sync::{ @@ -170,7 +173,6 @@ use { }, Arc, LockResult, Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard, Weak, }, - thread::Builder, time::{Duration, Instant}, }, }; @@ -190,13 +192,11 @@ pub use {partitioned_epoch_rewards::KeyedRewardsAndNumPartitions, solana_reward_ /// params to `verify_accounts_hash` struct VerifyAccountsHashConfig { require_rooted_bank: bool, - run_in_background: bool, } mod accounts_lt_hash; mod address_lookup_table; pub mod bank_hash_details; -mod builtin_programs; pub mod builtins; mod check_transactions; mod fee_distribution; @@ -214,7 +214,7 @@ pub const MAX_LEADER_SCHEDULE_STAKES: Epoch = 5; pub type BankStatusCache = StatusCache>; #[cfg_attr( feature = "frozen-abi", - frozen_abi(digest = "5dfDCRGWPV7thfoZtLpTJAV8cC93vQUXgTm6BnrfeUsN") + frozen_abi(digest = "FUttxQbsCnX5VMRuj8c2sUxZKNARUTaomdgsbg8wM3D6") )] pub type BankSlotDelta = SlotDelta>; @@ -223,8 +223,6 @@ pub struct SquashTiming { pub squash_accounts_ms: u64, pub squash_accounts_cache_ms: u64, pub squash_accounts_index_ms: u64, - pub squash_accounts_store_ms: u64, - pub squash_cache_ms: u64, } @@ -233,7 +231,6 @@ impl AddAssign for SquashTiming { self.squash_accounts_ms += rhs.squash_accounts_ms; self.squash_accounts_cache_ms += rhs.squash_accounts_cache_ms; self.squash_accounts_index_ms += rhs.squash_accounts_index_ms; - self.squash_accounts_store_ms += rhs.squash_accounts_store_ms; self.squash_cache_ms += rhs.squash_cache_ms; } } @@ -314,6 +311,11 @@ pub struct TransactionSimulationResult { pub loaded_accounts_data_size: u32, pub return_data: Option, pub inner_instructions: Option>, + pub fee: Option, + pub pre_balances: Option>, + pub post_balances: Option>, + pub pre_token_balances: Option>, + pub post_token_balances: Option>, } #[derive(Clone, Debug)] @@ -527,7 +529,6 @@ impl PartialEq for Bank { // TODO: Confirm if all these fields are intentionally ignored! rewards: _, cluster_type: _, - rewards_pool_pubkeys: _, transaction_debug_keys: _, transaction_log_collector_config: _, transaction_log_collector: _, @@ -817,9 +818,6 @@ pub struct Bank { pub cluster_type: Option, - // this is temporary field only to remove rewards_pool entirely - pub rewards_pool_pubkeys: Arc>, - transaction_debug_keys: Option>>, // Global configuration for how transaction logs should be collected across all banks @@ -911,7 +909,7 @@ struct VoteReward { vote_rewards: u64, } -type VoteRewards = DashMap; +type VoteRewards = HashMap; #[derive(Debug, Default)] pub struct NewBankOptions { @@ -1070,7 +1068,6 @@ impl Bank { is_delta: AtomicBool::default(), rewards: RwLock::>::default(), cluster_type: Option::::default(), - rewards_pool_pubkeys: Arc::>::default(), transaction_debug_keys: Option::>>::default(), transaction_log_collector_config: Arc::>::default( ), @@ -1110,14 +1107,12 @@ impl Bank { } #[allow(clippy::too_many_arguments)] - pub fn new_with_paths( + pub fn new_from_genesis( genesis_config: &GenesisConfig, runtime_config: Arc, paths: Vec, debug_keys: Option>>, - additional_builtins: Option<&[BuiltinPrototype]>, - debug_do_not_add_builtins: bool, - accounts_db_config: Option, + accounts_db_config: AccountsDbConfig, accounts_update_notifier: Option, #[allow(unused)] collector_id_for_tests: Option, exit: Arc, @@ -1130,6 +1125,10 @@ impl Bank { let mut bank = Self::default_with_accounts(accounts); bank.ancestors = Ancestors::from(vec![bank.slot()]); bank.compute_budget = runtime_config.compute_budget; + if let Some(compute_budget) = &bank.compute_budget { + bank.transaction_processor + .set_execution_cost(compute_budget.to_cost()); + } bank.transaction_account_lock_limit = runtime_config.transaction_account_lock_limit; bank.transaction_debug_keys = debug_keys; bank.cluster_type = Some(genesis_config.cluster_type); @@ -1144,11 +1143,7 @@ impl Bank { #[cfg(feature = "dev-context-only-utils")] bank.process_genesis_config(genesis_config, collector_id_for_tests, genesis_hash); - bank.finish_init( - genesis_config, - additional_builtins, - debug_do_not_add_builtins, - ); + bank.compute_and_apply_genesis_features(); // genesis needs stakes for all epochs up to the epoch implied by // slot = 0 and genesis configuration @@ -1254,9 +1249,6 @@ impl Bank { TransactionBatchProcessor::new_from(&parent.transaction_processor, slot, epoch) ); - let (rewards_pool_pubkeys, rewards_pool_pubkeys_time_us) = - measure_us!(parent.rewards_pool_pubkeys.clone()); - let (transaction_debug_keys, transaction_debug_keys_time_us) = measure_us!(parent.transaction_debug_keys.clone()); @@ -1317,7 +1309,6 @@ impl Bank { hard_forks: parent.hard_forks.clone(), rewards: RwLock::new(vec![]), cluster_type: parent.cluster_type, - rewards_pool_pubkeys, transaction_debug_keys, transaction_log_collector_config, transaction_log_collector: Arc::new(RwLock::new(TransactionLogCollector::default())), @@ -1430,7 +1421,6 @@ impl Bank { stakes_cache_time_us, epoch_stakes_time_us, builtin_program_ids_time_us, - rewards_pool_pubkeys_time_us, executor_cache_time_us: 0, transaction_debug_keys_time_us, transaction_log_collector_config_time_us, @@ -1447,7 +1437,7 @@ impl Bank { report_loaded_programs_stats( &parent .transaction_processor - .program_cache + .global_program_cache .read() .unwrap() .stats, @@ -1455,7 +1445,7 @@ impl Bank { ); new.transaction_processor - .program_cache + .global_program_cache .write() .unwrap() .stats @@ -1466,7 +1456,7 @@ impl Bank { pub fn set_fork_graph_in_program_cache(&self, fork_graph: Weak>) { self.transaction_processor - .program_cache + .global_program_cache .write() .unwrap() .set_fork_graph(fork_graph); @@ -1490,7 +1480,11 @@ impl Bank { .checked_div(2) .unwrap(); - let mut program_cache = self.transaction_processor.program_cache.write().unwrap(); + let mut program_cache = self + .transaction_processor + .global_program_cache + .write() + .unwrap(); if program_cache.upcoming_environments.is_some() { if let Some((key, program_to_recompile)) = program_cache.programs_to_recompile.pop() { @@ -1498,7 +1492,7 @@ impl Bank { drop(program_cache); let environments_for_epoch = self .transaction_processor - .program_cache + .global_program_cache .read() .unwrap() .get_environments_for_epoch(effective_epoch); @@ -1516,14 +1510,11 @@ impl Bank { .load(Ordering::Relaxed), Ordering::Relaxed, ); - recompiled.ix_usage_counter.fetch_add( - program_to_recompile - .ix_usage_counter - .load(Ordering::Relaxed), - Ordering::Relaxed, - ); - let mut program_cache = - self.transaction_processor.program_cache.write().unwrap(); + let mut program_cache = self + .transaction_processor + .global_program_cache + .write() + .unwrap(); program_cache.assign_program(key, recompiled); } } @@ -1533,7 +1524,11 @@ impl Bank { // Anticipate the upcoming program runtime environment for the next epoch, // so we can try to recompile loaded programs before the feature transition hits. drop(program_cache); - let mut program_cache = self.transaction_processor.program_cache.write().unwrap(); + let mut program_cache = self + .transaction_processor + .global_program_cache + .write() + .unwrap(); let program_runtime_environment_v1 = create_program_runtime_environment_v1( &upcoming_feature_set.runtime_features(), &compute_budget, @@ -1567,7 +1562,7 @@ impl Bank { pub fn prune_program_cache(&self, new_root_slot: Slot, new_root_epoch: Epoch) { self.transaction_processor - .program_cache + .global_program_cache .write() .unwrap() .prune(new_root_slot, new_root_epoch); @@ -1575,7 +1570,7 @@ impl Bank { pub fn prune_program_cache_by_deployment_slot(&self, deployment_slot: Slot) { self.transaction_processor - .program_cache + .global_program_cache .write() .unwrap() .prune_by_deployment_slot(deployment_slot); @@ -1602,9 +1597,9 @@ impl Bank { .build() .expect("new rayon threadpool")); - let (_, apply_feature_activations_time_us) = measure_us!(thread_pool.install(|| { - self.apply_feature_activations(ApplyFeatureActivationsCaller::NewFromParent, false) - })); + let (_, apply_feature_activations_time_us) = measure_us!( + thread_pool.install(|| { self.compute_and_apply_new_feature_activations() }) + ); // Add new entry to stakes.stake_history, set appropriate epoch and // update vote accounts with warmed up stakes before saving a @@ -1684,7 +1679,6 @@ impl Bank { parent.freeze(); let parent_timestamp = parent.clock().unix_timestamp; let mut new = Bank::new_from_parent(parent, collector_id, slot); - new.apply_feature_activations(ApplyFeatureActivationsCaller::WarpFromParent, false); new.update_epoch_stakes(new.epoch_schedule().get_epoch(slot)); new.tick_height.store(new.max_tick_height(), Relaxed); @@ -1704,14 +1698,12 @@ impl Bank { } /// Create a bank from explicit arguments and deserialized fields from snapshot - pub(crate) fn new_from_fields( + pub(crate) fn new_from_snapshot( bank_rc: BankRc, genesis_config: &GenesisConfig, runtime_config: Arc, fields: BankFieldsToDeserialize, debug_keys: Option>>, - additional_builtins: Option<&[BuiltinPrototype]>, - debug_do_not_add_builtins: bool, accounts_data_size_initial: u64, ) -> Self { let now = Instant::now(); @@ -1733,8 +1725,8 @@ impl Bank { Some(account) }) .expect( - "Stakes cache is inconsistent with accounts-db. This can indicate \ - a corrupted snapshot or bugs in cached accounts or accounts-db.", + "Stakes cache is inconsistent with accounts-db. This can indicate a corrupted \ + snapshot or bugs in cached accounts or accounts-db.", )); info!("Loading Stakes took: {stakes_time}"); let stakes_accounts_load_duration = now.elapsed(); @@ -1777,7 +1769,6 @@ impl Bank { is_delta: AtomicBool::new(fields.is_delta), rewards: RwLock::new(vec![]), cluster_type: Some(genesis_config.cluster_type), - rewards_pool_pubkeys: Arc::>::default(), transaction_debug_keys: debug_keys, transaction_log_collector_config: Arc::>::default( ), @@ -1809,38 +1800,14 @@ impl Bank { epoch_rewards_calculation_cache: Arc::new(Mutex::new(HashMap::default())), }; - bank.transaction_processor = - TransactionBatchProcessor::new_uninitialized(bank.slot, bank.epoch); - - // TODO: Only create the thread pool if we need to recalculate rewards, - // i.e. epoch_reward_status is active. Currently, this thread pool is - // always created and used for recalculate_partitioned_rewards and - // lt_hash calculation. Once lt_hash feature is active, lt_hash won't - // need the thread pool. Thereby, after lt_hash feature activation, we - // can change to create the thread pool only when we need to recalculate - // rewards. - let thread_pool = ThreadPoolBuilder::new() - .thread_name(|i| format!("solBnkNewFlds{i:02}")) - .build() - .expect("new rayon threadpool"); - bank.recalculate_partitioned_rewards(null_tracer(), &thread_pool); - - bank.finish_init( - genesis_config, - additional_builtins, - debug_do_not_add_builtins, - ); - bank.transaction_processor - .fill_missing_sysvar_cache_entries(&bank); - // Sanity assertions between bank snapshot and genesis config // Consider removing from serializable bank state // (BankFieldsToSerialize/BankFieldsToDeserialize) and initializing - // from the passed in genesis_config instead (as new()/new_with_paths() already do) + // from the passed in genesis_config instead (as new()/new_from_genesis() already do) assert_eq!( bank.genesis_creation_time, genesis_config.creation_time, - "Bank snapshot genesis creation time does not match genesis.bin creation time. \ - The snapshot and genesis.bin might pertain to different clusters" + "Bank snapshot genesis creation time does not match genesis.bin creation time. The \ + snapshot and genesis.bin might pertain to different clusters" ); assert_eq!(bank.ticks_per_slot, genesis_config.ticks_per_slot); assert_eq!( @@ -1860,6 +1827,13 @@ impl Bank { assert_eq!(bank.epoch_schedule, genesis_config.epoch_schedule); assert_eq!(bank.epoch, bank.epoch_schedule.get_epoch(bank.slot)); + bank.initialize_after_snapshot_restore(|| { + ThreadPoolBuilder::new() + .thread_name(|i| format!("solBnkClcRwds{i:02}")) + .build() + .expect("new rayon threadpool") + }); + datapoint_info!( "bank-new-from-fields", ( @@ -2123,7 +2097,7 @@ impl Bank { pub fn set_sysvar_for_tests(&self, sysvar: &T) where - T: Sysvar + SysvarId, + T: SysvarSerialize + SysvarId, { self.update_sysvar_account(&T::id(), |account| { create_account( @@ -2232,8 +2206,8 @@ impl Bank { return; } // if I'm the first Bank in an epoch, ensure stake_history is updated - self.update_sysvar_account(&sysvar::stake_history::id(), |account| { - create_account::( + self.update_sysvar_account(&stake_history::id(), |account| { + create_account::( self.stakes_cache.stakes().history(), self.inherit_specially_retained_account_fields(account), ) @@ -2370,7 +2344,7 @@ impl Bank { }, )| { if let Err(err) = vote_account.checked_add_lamports(vote_rewards) { - debug!("reward redemption failed for {}: {:?}", vote_pubkey, err); + debug!("reward redemption failed for {vote_pubkey}: {err:?}"); return; } @@ -2390,24 +2364,15 @@ impl Bank { result } - fn update_reward_history( - &self, - stake_rewards: StakeRewards, - vote_rewards: &VoteRewardsAccounts, - ) { - let additional_reserve = stake_rewards.len() + vote_rewards.accounts_with_rewards.len(); + fn update_vote_rewards(&self, vote_rewards: &VoteRewardsAccounts) { let mut rewards = self.rewards.write().unwrap(); - rewards.reserve(additional_reserve); + rewards.reserve(vote_rewards.accounts_with_rewards.len()); vote_rewards .accounts_with_rewards .iter() .for_each(|(vote_pubkey, vote_reward, _)| { rewards.push((*vote_pubkey, *vote_reward)); }); - stake_rewards - .into_iter() - .filter(|x| x.get_stake_reward() > 0) - .for_each(|x| rewards.push((x.stake_pubkey, x.stake_reward_info))); } fn update_recent_blockhashes_locked(&self, locked_blockhash_queue: &BlockhashQueue) { @@ -2535,7 +2500,6 @@ impl Bank { let mut total_index_us = 0; let mut total_cache_us = 0; - let mut total_store_us = 0; let mut squash_accounts_time = Measure::start("squash_accounts_time"); for slot in roots.iter().rev() { @@ -2543,7 +2507,6 @@ impl Bank { let add_root_timing = self.rc.accounts.add_root(*slot); total_index_us += add_root_timing.index_us; total_cache_us += add_root_timing.cache_us; - total_store_us += add_root_timing.store_us; } squash_accounts_time.stop(); @@ -2559,8 +2522,6 @@ impl Bank { squash_accounts_ms: squash_accounts_time.as_ms(), squash_accounts_index_ms: total_index_us / 1000, squash_accounts_cache_ms: total_cache_us / 1000, - squash_accounts_store_ms: total_store_us / 1000, - squash_cache_ms: squash_cache_time.as_ms(), } } @@ -2643,11 +2604,6 @@ impl Bank { self.slots_per_year, genesis_config.rent.clone(), ); - - // Add additional builtin programs specified in the genesis config - for (name, program_id) in &genesis_config.native_instruction_processors { - self.add_builtin_account(name, program_id); - } } fn burn_and_purge_account(&self, program_id: &Pubkey, mut account: AccountSharedData) { @@ -2679,8 +2635,9 @@ impl Bank { assert!( !self.freeze_started(), - "Can't change frozen bank by adding not-existing new precompiled program ({program_id}). \ - Maybe, inconsistent program activation is detected on snapshot restore?" + "Can't change frozen bank by adding not-existing new precompiled program \ + ({program_id}). Maybe, inconsistent program activation is detected on snapshot \ + restore?" ); // Add a bogus executable account, which will be loaded and ignored. @@ -2751,16 +2708,6 @@ impl Bank { Some(self.get_fee_for_message_with_lamports_per_signature(message, lamports_per_signature)) } - /// Returns true when startup accounts hash verification has completed or never had to run in background. - pub fn get_startup_verification_complete(&self) -> &Arc { - &self - .rc - .accounts - .accounts_db - .verify_accounts_hash_in_bg - .verified - } - pub fn get_fee_for_message_with_lamports_per_signature( &self, message: &impl SVMMessage, @@ -3103,6 +3050,7 @@ impl Bank { let LoadAndExecuteTransactionsOutput { mut processing_results, + balance_collector, .. } = self.load_and_execute_transactions( &batch, @@ -3121,12 +3069,12 @@ impl Bank { enable_cpi_recording, enable_log_recording: true, enable_return_data_recording: true, - enable_transaction_balance_recording: false, + enable_transaction_balance_recording: true, }, }, ); - debug!("simulate_transaction: {:?}", timings); + debug!("simulate_transaction: {timings:?}"); let processing_result = processing_results .pop() @@ -3134,45 +3082,69 @@ impl Bank { let ( post_simulation_accounts, result, + fee, logs, return_data, inner_instructions, units_consumed, loaded_accounts_data_size, ) = match processing_result { - Ok(processed_tx) => match processed_tx { - ProcessedTransaction::Executed(executed_tx) => { - let details = executed_tx.execution_details; - let post_simulation_accounts = executed_tx - .loaded_transaction - .accounts - .into_iter() - .take(number_of_accounts) - .collect::>(); - ( - post_simulation_accounts, - details.status, - details.log_messages, - details.return_data, - details.inner_instructions, - details.executed_units, - executed_tx.loaded_transaction.loaded_accounts_data_size, - ) + Ok(processed_tx) => { + let executed_units = processed_tx.executed_units(); + let loaded_accounts_data_size = processed_tx.loaded_accounts_data_size(); + + match processed_tx { + ProcessedTransaction::Executed(executed_tx) => { + let details = executed_tx.execution_details; + let post_simulation_accounts = executed_tx + .loaded_transaction + .accounts + .into_iter() + .take(number_of_accounts) + .collect::>(); + ( + post_simulation_accounts, + details.status, + Some(executed_tx.loaded_transaction.fee_details.total_fee()), + details.log_messages, + details.return_data, + details.inner_instructions, + executed_units, + loaded_accounts_data_size, + ) + } + ProcessedTransaction::FeesOnly(fees_only_tx) => ( + vec![], + Err(fees_only_tx.load_error), + Some(fees_only_tx.fee_details.total_fee()), + None, + None, + None, + executed_units, + loaded_accounts_data_size, + ), } - ProcessedTransaction::FeesOnly(fees_only_tx) => ( - vec![], - Err(fees_only_tx.load_error), - None, - None, - None, - 0, - fees_only_tx.rollback_accounts.data_size() as u32, - ), - }, - Err(error) => (vec![], Err(error), None, None, None, 0, 0), + } + Err(error) => (vec![], Err(error), None, None, None, None, 0, 0), }; let logs = logs.unwrap_or_default(); + let (pre_balances, post_balances, pre_token_balances, post_token_balances) = + match balance_collector { + Some(balance_collector) => { + let (mut native_pre, mut native_post, mut token_pre, mut token_post) = + balance_collector.into_vecs(); + + ( + native_pre.pop(), + native_post.pop(), + token_pre.pop(), + token_post.pop(), + ) + } + None => (None, None, None, None), + }; + TransactionSimulationResult { result, logs, @@ -3181,6 +3153,11 @@ impl Bank { loaded_accounts_data_size, return_data, inner_instructions, + fee, + pre_balances, + post_balances, + pre_token_balances, + post_token_balances, } } @@ -3327,7 +3304,7 @@ impl Bank { } Err(err) => { if err_count.0 == 0 { - debug!("tx error: {:?} {:?}", err, tx); + debug!("tx error: {err:?} {tx:?}"); } *err_count += 1; } @@ -3536,7 +3513,8 @@ impl Bank { ) -> Vec { assert!( !self.freeze_started(), - "commit_transactions() working on a bank that is already frozen or is undergoing freezing!" + "commit_transactions() working on a bank that is already frozen or is undergoing \ + freezing!" ); let ProcessedTransactionCounts { @@ -3587,9 +3565,11 @@ impl Bank { let to_store = (self.slot(), accounts_to_store.as_slice()); self.update_bank_hash_stats(&to_store); + // See https://github.com/solana-labs/solana/pull/31455 for discussion + // on *not* updating the index within a threadpool. self.rc .accounts - .store_cached(to_store, transactions.as_deref()); + .store_accounts_seq(to_store, transactions.as_deref()); }); // Cached vote and stake accounts are synchronized with accounts-db @@ -3607,7 +3587,10 @@ impl Bank { if executed_tx.was_successful() && !programs_modified_by_tx.is_empty() { cache .get_or_insert_with(|| { - self.transaction_processor.program_cache.write().unwrap() + self.transaction_processor + .global_program_cache + .write() + .unwrap() }) .merge(programs_modified_by_tx); } @@ -3659,13 +3642,22 @@ impl Bank { match processing_result { ProcessedTransaction::Executed(executed_tx) => { + let successful = executed_tx.was_successful(); let execution_details = executed_tx.execution_details; let LoadedTransaction { accounts: loaded_accounts, fee_details, + rollback_accounts, .. } = executed_tx.loaded_transaction; + // Rollback value is used for failure. + let fee_payer_post_balance = if successful { + loaded_accounts[0].1.lamports() + } else { + rollback_accounts.fee_payer().1.lamports() + }; + Ok(CommittedTransaction { status: execution_details.status, log_messages: execution_details.log_messages, @@ -3677,6 +3669,7 @@ impl Bank { loaded_accounts_count: loaded_accounts.len(), loaded_accounts_data_size, }, + fee_payer_post_balance, }) } ProcessedTransaction::FeesOnly(fees_only_tx) => Ok(CommittedTransaction { @@ -3690,6 +3683,11 @@ impl Bank { loaded_accounts_count: fees_only_tx.rollback_accounts.count(), loaded_accounts_data_size, }, + fee_payer_post_balance: fees_only_tx + .rollback_accounts + .fee_payer() + .1 + .lamports(), }), } }) @@ -3945,7 +3943,7 @@ impl Bank { }) }); self.update_bank_hash_stats(&accounts); - self.rc.accounts.store_accounts_cached(accounts); + self.rc.accounts.store_accounts_par(accounts, None); m.stop(); self.rc .accounts @@ -3976,40 +3974,32 @@ impl Bank { pubkey: &Pubkey, new_account: &AccountSharedData, ) { - let old_account_data_size = - if let Some(old_account) = self.get_account_with_fixed_root_no_cache(pubkey) { - match new_account.lamports().cmp(&old_account.lamports()) { - std::cmp::Ordering::Greater => { - let increased = new_account.lamports() - old_account.lamports(); - trace!( - "store_account_and_update_capitalization: increased: {} {}", - pubkey, - increased - ); - self.capitalization.fetch_add(increased, Relaxed); - } - std::cmp::Ordering::Less => { - let decreased = old_account.lamports() - new_account.lamports(); - trace!( - "store_account_and_update_capitalization: decreased: {} {}", - pubkey, - decreased - ); - self.capitalization.fetch_sub(decreased, Relaxed); - } - std::cmp::Ordering::Equal => {} + let old_account_data_size = if let Some(old_account) = + self.get_account_with_fixed_root_no_cache(pubkey) + { + match new_account.lamports().cmp(&old_account.lamports()) { + std::cmp::Ordering::Greater => { + let diff = new_account.lamports() - old_account.lamports(); + trace!("store_account_and_update_capitalization: increased: {pubkey} {diff}"); + self.capitalization.fetch_add(diff, Relaxed); } - old_account.data().len() - } else { - trace!( - "store_account_and_update_capitalization: created: {} {}", - pubkey, - new_account.lamports() - ); - self.capitalization - .fetch_add(new_account.lamports(), Relaxed); - 0 - }; + std::cmp::Ordering::Less => { + let diff = old_account.lamports() - new_account.lamports(); + trace!("store_account_and_update_capitalization: decreased: {pubkey} {diff}"); + self.capitalization.fetch_sub(diff, Relaxed); + } + std::cmp::Ordering::Equal => {} + } + old_account.data().len() + } else { + trace!( + "store_account_and_update_capitalization: created: {pubkey} {}", + new_account.lamports() + ); + self.capitalization + .fetch_add(new_account.lamports(), Relaxed); + 0 + }; self.store_account(pubkey, new_account); self.calculate_and_update_accounts_data_size_delta_off_chain( @@ -4031,24 +4021,17 @@ impl Bank { cost_tracker.set_limits(account_cost_limit, block_cost_limit, vote_cost_limit); } - fn finish_init( - &mut self, - genesis_config: &GenesisConfig, - additional_builtins: Option<&[BuiltinPrototype]>, - debug_do_not_add_builtins: bool, - ) { - if let Some(compute_budget) = self.compute_budget { - self.transaction_processor - .set_execution_cost(compute_budget.to_cost()); - } - - self.rewards_pool_pubkeys = - Arc::new(genesis_config.rewards_pools.keys().cloned().collect()); + /// This is called from genesis and snapshot restore + fn apply_activated_features(&mut self) { + // Update active set of reserved account keys which are not allowed to be write locked + self.reserved_account_keys = { + let mut reserved_keys = ReservedAccountKeys::clone(&self.reserved_account_keys); + reserved_keys.update_active_set(&self.feature_set); + Arc::new(reserved_keys) + }; - self.apply_feature_activations( - ApplyFeatureActivationsCaller::FinishInit, - debug_do_not_add_builtins, - ); + // Update the transaction processor with all active built-in programs + self.add_active_builtin_programs(); // Cost-Tracker is not serialized in snapshot or any configs. // We must apply previously activated features related to limits here @@ -4072,38 +4055,7 @@ impl Bank { self.apply_simd_0306_cost_tracker_changes(); } - if !debug_do_not_add_builtins { - for builtin in BUILTINS - .iter() - .chain(additional_builtins.unwrap_or(&[]).iter()) - { - // The builtin should be added if it has no enable feature ID - // and it has not been migrated to Core BPF. - // - // If a program was previously migrated to Core BPF, accountsDB - // from snapshot should contain the BPF program accounts. - let builtin_is_bpf = |program_id: &Pubkey| { - self.get_account(program_id) - .map(|a| a.owner() == &bpf_loader_upgradeable::id()) - .unwrap_or(false) - }; - if builtin.enable_feature_id.is_none() && !builtin_is_bpf(&builtin.program_id) { - self.transaction_processor.add_builtin( - self, - builtin.program_id, - builtin.name, - ProgramCacheEntry::new_builtin(0, builtin.name.len(), builtin.entrypoint), - ); - } - } - for precompile in get_precompiles() { - if precompile.feature.is_none() { - self.add_precompile(&precompile.program_id); - } - } - } - - let simd_0296_active = self + let simd_0268_active = self .feature_set .is_active(&raise_cpi_nesting_limit_to_8::id()); @@ -4114,7 +4066,7 @@ impl Bank { &self.feature_set.runtime_features(), &self .compute_budget() - .unwrap_or(ComputeBudget::new_with_defaults(simd_0296_active)) + .unwrap_or(ComputeBudget::new_with_defaults(simd_0268_active)) .to_budget(), false, /* deployment */ false, /* debugging_features */ @@ -4124,13 +4076,17 @@ impl Bank { Some(Arc::new(create_program_runtime_environment_v2( &self .compute_budget() - .unwrap_or(ComputeBudget::new_with_defaults(simd_0296_active)) + .unwrap_or(ComputeBudget::new_with_defaults(simd_0268_active)) .to_budget(), false, /* debugging_features */ ))), ); } + pub fn set_tick_height(&self, tick_height: u64) { + self.tick_height.store(tick_height, Relaxed) + } + pub fn set_inflation(&self, inflation: Inflation) { *self.inflation.write().unwrap() = inflation; } @@ -4147,14 +4103,14 @@ impl Bank { let bank_frozen = *lock != Hash::default(); if new_hard_fork_slot < bank_slot { warn!( - "Hard fork at slot {new_hard_fork_slot} ignored, the hard fork is older \ - than the bank at slot {bank_slot} that attempted to register it." + "Hard fork at slot {new_hard_fork_slot} ignored, the hard fork is older than the \ + bank at slot {bank_slot} that attempted to register it." ); } else if (new_hard_fork_slot == bank_slot) && bank_frozen { warn!( - "Hard fork at slot {new_hard_fork_slot} ignored, the hard fork is the same \ - slot as the bank at slot {bank_slot} that attempted to register it, but that \ - bank is already frozen." + "Hard fork at slot {new_hard_fork_slot} ignored, the hard fork is the same slot \ + as the bank at slot {bank_slot} that attempted to register it, but that bank is \ + already frozen." ); } else { self.hard_forks @@ -4168,19 +4124,20 @@ impl Bank { &self, pubkey: &Pubkey, ) -> Option { - self.load_account_with(pubkey, |_| false) + self.load_account_with(pubkey, false) .map(|(acc, _slot)| acc) } fn load_account_with( &self, pubkey: &Pubkey, - callback: impl for<'local> Fn(&'local AccountSharedData) -> bool, + should_put_in_read_cache: bool, ) -> Option<(AccountSharedData, Slot)> { - self.rc - .accounts - .accounts_db - .load_account_with(&self.ancestors, pubkey, callback) + self.rc.accounts.accounts_db.load_account_with( + &self.ancestors, + pubkey, + should_put_in_read_cache, + ) } // Hi! leaky abstraction here.... @@ -4510,8 +4467,8 @@ impl Bank { ); info!( "bank frozen: {slot} hash: {hash} signature_count: {} last_blockhash: {} \ - capitalization: {}, accounts_lt_hash checksum: {accounts_lt_hash_checksum}, \ - stats: {bank_hash_stats:?}", + capitalization: {}, accounts_lt_hash checksum: {accounts_lt_hash_checksum}, stats: \ + {bank_hash_stats:?}", self.signature_count(), self.last_blockhash(), self.capitalization(), @@ -4528,51 +4485,45 @@ impl Bank { pub fn run_final_hash_calc(&self) { self.force_flush_accounts_cache(); // note that this slot may not be a root - _ = self.verify_accounts_hash( + _ = self.verify_accounts( VerifyAccountsHashConfig { require_rooted_bank: false, - run_in_background: false, }, None, ); } - /// Recalculate the accounts hash from the account stores. Used to verify a snapshot. - /// return true if all is good - /// Only called from startup or test code. + /// Verify the account state as part of startup, typically from a snapshot. + /// + /// This fn compares the calculated accounts lt hash against the stored value in the bank. + /// + /// Normal validator operation will calculate the accounts lt hash during index generation. + /// Tests/ledger-tool may not have the calculated value from index generation (or the bank + /// being verified is different from the snapshot/startup bank), and thus will be calculated in + /// this function, using the accounts index for input, running in the foreground. + /// + /// Returns true if all is good. + /// + /// Only intended to be called at startup, or from tests/ledger-tool. #[must_use] - fn verify_accounts_hash( + fn verify_accounts( &self, - mut config: VerifyAccountsHashConfig, - duplicates_lt_hash: Option>, + config: VerifyAccountsHashConfig, + calculated_accounts_lt_hash: Option<&AccountsLtHash>, ) -> bool { let accounts_db = &self.rc.accounts.accounts_db; - // Wait until initial hash calc is complete before starting a new hash calc. - // This should only occur when we halt at a slot in ledger-tool. - accounts_db - .verify_accounts_hash_in_bg - .join_background_thread(); let slot = self.slot(); - if duplicates_lt_hash.is_none() { - // Calculating the accounts lt hash from storages *requires* a duplicates_lt_hash. - // If it is None here, then we must use the index instead, which also means we - // cannot run in the background. - config.run_in_background = false; - } - if config.require_rooted_bank && !accounts_db.accounts_index.is_alive_root(slot) { if let Some(parent) = self.parent() { info!( "slot {slot} is not a root, so verify accounts hash on parent bank at slot {}", parent.slot(), ); - // The duplicates_lt_hash is only valid for the current slot, so we must fall - // back to verifying the accounts lt hash with the index (which also means we - // cannot run in the background). - config.run_in_background = false; - return parent.verify_accounts_hash(config, None); + // The calculated_accounts_lt_hash parameter is only valid for the current slot, so + // we must fall back to calculating the accounts lt hash with the index. + return parent.verify_accounts(config, None); } else { // this will result in mismatch errors // accounts hash calc doesn't include unrooted slots @@ -4589,105 +4540,25 @@ impl Bank { let expected = expected_accounts_lt_hash.0.checksum(); let calculated = calculated_accounts_lt_hash.0.checksum(); error!( - "Verifying accounts failed: accounts lattice hashes do not match, \ - expected: {expected}, calculated: {calculated}", + "Verifying accounts failed: accounts lattice hashes do not match, expected: \ + {expected}, calculated: {calculated}", ); } is_ok } - // The snapshot storages must be captured *before* starting the background verification. - // Otherwise, it is possible that a delayed call to `get_snapshot_storages()` will *not* - // get the correct storages required to calculate and verify the accounts hashes. - let snapshot_storages = accounts_db.get_storages(RangeFull); + info!("Verifying accounts..."); + let start = Instant::now(); let expected_accounts_lt_hash = self.accounts_lt_hash.lock().unwrap().clone(); - if config.run_in_background { - let accounts_db_ = Arc::clone(accounts_db); - accounts_db.verify_accounts_hash_in_bg.start(|| { - Builder::new() - .name("solBgHashVerify".into()) - .spawn(move || { - info!("Initial background accounts hash verification has started"); - let start = Instant::now(); - let thread_pool = { - let num_threads = accounts_db_ - .num_hash_threads - .unwrap_or_else(accounts_db::default_num_hash_threads) - .get(); - ThreadPoolBuilder::new() - .thread_name(|i| format!("solVerfyAccts{i:02}")) - .num_threads(num_threads) - .build() - .unwrap() - }; - let (calculated_accounts_lt_hash, lattice_verify_time) = - meas_dur!(thread_pool.install(|| { - accounts_db_.calculate_accounts_lt_hash_at_startup_from_storages( - snapshot_storages.0.as_slice(), - &duplicates_lt_hash.unwrap(), - slot, - ) - })); - let is_ok = - check_lt_hash(&expected_accounts_lt_hash, &calculated_accounts_lt_hash); - accounts_db_ - .verify_accounts_hash_in_bg - .background_finished(); - let total_time = start.elapsed(); - datapoint_info!( - "startup_verify_accounts", - ("total_us", total_time.as_micros(), i64), - ( - "verify_accounts_lt_hash_us", - lattice_verify_time.as_micros(), - i64 - ), - ); - info!( - "Initial background accounts hash verification has stopped \ - in {total_time:?}", - ); - is_ok - }) - .unwrap() - }); - true // initial result is true. We haven't failed yet. If verification fails, we'll panic from bg thread. + let is_ok = if let Some(calculated_accounts_lt_hash) = calculated_accounts_lt_hash { + check_lt_hash(&expected_accounts_lt_hash, calculated_accounts_lt_hash) } else { - let calculated_accounts_lt_hash = if let Some(duplicates_lt_hash) = duplicates_lt_hash { - accounts_db.calculate_accounts_lt_hash_at_startup_from_storages( - snapshot_storages.0.as_slice(), - &duplicates_lt_hash, - slot, - ) - } else { - accounts_db.calculate_accounts_lt_hash_at_startup_from_index(&self.ancestors, slot) - }; - let is_ok = check_lt_hash(&expected_accounts_lt_hash, &calculated_accounts_lt_hash); - self.set_initial_accounts_hash_verification_completed(); - is_ok - } - } - - /// Specify that initial verification has completed. - /// Called internally when verification runs in the foreground thread. - /// Also has to be called by some tests which don't do verification on startup. - pub fn set_initial_accounts_hash_verification_completed(&self) { - self.rc - .accounts - .accounts_db - .verify_accounts_hash_in_bg - .verification_complete(); - } - - /// return true if bg hash verification is complete - /// return false if bg hash verification has not completed yet - /// if hash verification failed, a panic will occur - pub fn has_initial_accounts_hash_verification_completed(&self) -> bool { - self.rc - .accounts - .accounts_db - .verify_accounts_hash_in_bg - .check_complete() + let calculated_accounts_lt_hash = + accounts_db.calculate_accounts_lt_hash_at_startup_from_index(&self.ancestors, slot); + check_lt_hash(&expected_accounts_lt_hash, &calculated_accounts_lt_hash) + }; + info!("Verifying accounts... Done in {:?}", start.elapsed()); + is_ok } /// Get this bank's storages to use for snapshots. @@ -4784,11 +4655,6 @@ impl Bank { /// Only intended to be called at startup by ledger-tool or tests. /// (cannot be made DCOU due to solana-program-test) pub fn calculate_capitalization_for_tests(&self) -> u64 { - self.rc - .accounts - .accounts_db - .verify_accounts_hash_in_bg - .join_background_thread(); self.rc .accounts .accounts_db @@ -4824,31 +4690,19 @@ impl Bank { skip_shrink: bool, force_clean: bool, latest_full_snapshot_slot: Slot, - duplicates_lt_hash: Option>, + calculated_accounts_lt_hash: Option<&AccountsLtHash>, ) -> bool { - // If we verify the accounts using the lattice-based hash *and* with storages (as opposed - // to the index), then we rely on the DuplicatesLtHash as given by generate_index(). Since - // the duplicates are based on a specific set of storages, we must use the exact same - // storages to do the lattice-based accounts verification. This means we must wait to - // clean/shrink until *after* we've gotten Arcs to the storages (this prevents their - // untimely removal). Simply, we call `verify_accounts_hash()` before we call `clean` or - // `shrink`. let (verified_accounts, verify_accounts_time_us) = measure_us!({ let should_verify_accounts = !self.rc.accounts.accounts_db.skip_initial_hash_calc; if should_verify_accounts { - info!("Verifying accounts..."); - let verified = self.verify_accounts_hash( + self.verify_accounts( VerifyAccountsHashConfig { require_rooted_bank: false, - run_in_background: true, }, - duplicates_lt_hash, - ); - info!("Verifying accounts... In background."); - verified + calculated_accounts_lt_hash, + ) } else { info!("Verifying accounts... Skipped."); - self.set_initial_accounts_hash_verification_completed(); true } }); @@ -5180,8 +5034,7 @@ impl Bank { program_id: Pubkey, builtin_function: BuiltinFunctionWithContext, ) { - self.transaction_processor.add_builtin( - self, + self.add_builtin( program_id, "mockup", ProgramCacheEntry::new_builtin(self.slot, 0, builtin_function), @@ -5189,9 +5042,9 @@ impl Bank { } pub fn add_precompile(&mut self, program_id: &Pubkey) { - debug!("Adding precompiled program {}", program_id); + debug!("Adding precompiled program {program_id}"); self.add_precompiled_account(program_id); - debug!("Added precompiled program {:?}", program_id); + debug!("Added precompiled program {program_id:?}"); } // Call AccountsDb::clean_accounts() @@ -5285,29 +5138,63 @@ impl Bank { &self.reserved_account_keys.active } - // This is called from snapshot restore AND for each epoch boundary - // The entire code path herein must be idempotent - fn apply_feature_activations( - &mut self, - caller: ApplyFeatureActivationsCaller, - debug_do_not_add_builtins: bool, - ) { - use ApplyFeatureActivationsCaller as Caller; - let allow_new_activations = match caller { - Caller::FinishInit => false, - Caller::NewFromParent => true, - Caller::WarpFromParent => false, - }; + /// Compute and apply all activated features, initialize the transaction + /// processor, and recalculate partitioned rewards if needed + fn initialize_after_snapshot_restore(&mut self, rewards_thread_pool_builder: F) + where + F: FnOnce() -> TP, + TP: std::borrow::Borrow, + { + self.transaction_processor = + TransactionBatchProcessor::new_uninitialized(self.slot, self.epoch); + if let Some(compute_budget) = &self.compute_budget { + self.transaction_processor + .set_execution_cost(compute_budget.to_cost()); + } + + self.compute_and_apply_features_after_snapshot_restore(); + + self.recalculate_partitioned_rewards_if_active(rewards_thread_pool_builder); + + self.transaction_processor + .fill_missing_sysvar_cache_entries(self); + } + + /// Compute and apply all activated features and also add accounts for builtins + fn compute_and_apply_genesis_features(&mut self) { + // Update the feature set to include all features active at this slot + let feature_set = self.compute_active_feature_set(false).0; + self.feature_set = Arc::new(feature_set); + + // Add built-in program accounts to the bank if they don't already exist + self.add_builtin_program_accounts(); + + self.apply_activated_features(); + } + + /// Compute and apply all activated features but do not add built-in + /// accounts because we shouldn't modify accounts db for a completed bank + fn compute_and_apply_features_after_snapshot_restore(&mut self) { + // Update the feature set to include all features active at this slot + let feature_set = self.compute_active_feature_set(false).0; + self.feature_set = Arc::new(feature_set); + + self.apply_activated_features(); + } + + /// This is called from each epoch boundary + fn compute_and_apply_new_feature_activations(&mut self) { + let include_pending = true; let (feature_set, new_feature_activations) = - self.compute_active_feature_set(allow_new_activations); + self.compute_active_feature_set(include_pending); self.feature_set = Arc::new(feature_set); // Update activation slot of features in `new_feature_activations` for feature_id in new_feature_activations.iter() { if let Some(mut account) = self.get_account_with_fixed_root(feature_id) { - if let Some(mut feature) = feature::from_account(&account) { + if let Some(mut feature) = feature::state::from_account(&account) { feature.activated_at = Some(self.slot()); - if feature::to_account(&feature, &mut account).is_some() { + if feature::state::to_account(&feature, &mut account).is_some() { self.store_account(feature_id, &account); } info!("Feature {} activated at slot {}", feature_id, self.slot()); @@ -5335,12 +5222,7 @@ impl Bank { self.rent_collector.rent.burn_percent = 50; // 50% rent burn } - if !debug_do_not_add_builtins { - self.apply_builtin_program_feature_transitions( - allow_new_activations, - &new_feature_activations, - ); - } + self.apply_new_builtin_program_feature_transitions(&new_feature_activations); if new_feature_activations.contains(&feature_set::raise_block_limits_to_100m::id()) { let block_cost_limit = simd_0286_block_limits(); @@ -5363,6 +5245,70 @@ impl Bank { } } + fn apply_new_builtin_program_feature_transitions( + &mut self, + new_feature_activations: &AHashSet, + ) { + for builtin in BUILTINS.iter() { + if let Some(feature_id) = builtin.enable_feature_id { + if new_feature_activations.contains(&feature_id) { + self.add_builtin( + builtin.program_id, + builtin.name, + ProgramCacheEntry::new_builtin( + self.feature_set.activated_slot(&feature_id).unwrap_or(0), + builtin.name.len(), + builtin.entrypoint, + ), + ); + } + } + + if let Some(core_bpf_migration_config) = &builtin.core_bpf_migration_config { + // If the builtin is set to be migrated to Core BPF on feature + // activation, perform the migration which will remove it from + // the builtins list and the cache. + if new_feature_activations.contains(&core_bpf_migration_config.feature_id) { + if let Err(e) = self + .migrate_builtin_to_core_bpf(&builtin.program_id, core_bpf_migration_config) + { + warn!( + "Failed to migrate builtin {} to Core BPF: {}", + builtin.name, e + ); + } + } + }; + } + + // Migrate any necessary stateless builtins to core BPF. + // Stateless builtins do not have an `enable_feature_id` since they + // do not exist on-chain. + for stateless_builtin in STATELESS_BUILTINS.iter() { + if let Some(core_bpf_migration_config) = &stateless_builtin.core_bpf_migration_config { + if new_feature_activations.contains(&core_bpf_migration_config.feature_id) { + if let Err(e) = self.migrate_builtin_to_core_bpf( + &stateless_builtin.program_id, + core_bpf_migration_config, + ) { + warn!( + "Failed to migrate stateless builtin {} to Core BPF: {}", + stateless_builtin.name, e + ); + } + } + } + } + + for precompile in get_precompiles() { + if let Some(feature_id) = &precompile.feature { + if new_feature_activations.contains(feature_id) { + self.add_precompile(&precompile.program_id); + } + } + } + } + fn adjust_sysvar_balance_for_rent(&self, account: &mut AccountSharedData) { account.set_lamports( self.get_minimum_balance_for_rent_exemption(account.data().len()) @@ -5381,7 +5327,7 @@ impl Bank { for feature_id in self.feature_set.inactive() { let mut activated = None; if let Some(account) = self.get_account_with_fixed_root(feature_id) { - if let Some(feature) = feature::from_account(&account) { + if let Some(feature) = feature::state::from_account(&account) { match feature.activated_at { None if include_pending => { // Feature activation is pending @@ -5406,11 +5352,21 @@ impl Bank { (FeatureSet::new(active, inactive), pending) } - fn apply_builtin_program_feature_transitions( - &mut self, - only_apply_transitions_for_new_features: bool, - new_feature_activations: &AHashSet, - ) { + /// If `feature_id` is pending to be activated at the next epoch boundary, return + /// the first slot at which it will be active (the epoch boundary). + pub fn compute_pending_activation_slot(&self, feature_id: &Pubkey) -> Option { + let account = self.get_account_with_fixed_root(feature_id)?; + let feature = feature::from_account(&account)?; + if feature.activated_at.is_some() { + // Feature is already active + return None; + } + // Feature will be active at the next epoch boundary + let active_epoch = self.epoch + 1; + Some(self.epoch_schedule.get_first_slot_in_epoch(active_epoch)) + } + + fn add_active_builtin_programs(&mut self) { for builtin in BUILTINS.iter() { // The `builtin_is_bpf` flag is used to handle the case where a // builtin is scheduled to be enabled by one feature gate and @@ -5428,82 +5384,88 @@ impl Bank { // // Using the same feature gate for both enabling and migrating a // builtin to Core BPF should be strictly avoided. - let mut builtin_is_bpf = false; - if let Some(core_bpf_migration_config) = &builtin.core_bpf_migration_config { - // If the builtin is set to be migrated to Core BPF on feature - // activation, perform the migration and do not add the program - // to the bank's builtins. The migration will remove it from - // the builtins list and the cache. - if new_feature_activations.contains(&core_bpf_migration_config.feature_id) { - if let Err(e) = self - .migrate_builtin_to_core_bpf(&builtin.program_id, core_bpf_migration_config) - { - warn!( - "Failed to migrate builtin {} to Core BPF: {}", - builtin.name, e - ); - } else { - builtin_is_bpf = true; - } - } else { - // If the builtin has already been migrated to Core BPF, do not - // add it to the bank's builtins. - builtin_is_bpf = self - .get_account(&builtin.program_id) - .map(|a| a.owner() == &bpf_loader_upgradeable::id()) - .unwrap_or(false); - } + let builtin_is_bpf = builtin.core_bpf_migration_config.is_some() && { + self.get_account(&builtin.program_id) + .map(|a| a.owner() == &bpf_loader_upgradeable::id()) + .unwrap_or(false) }; - if let Some(feature_id) = builtin.enable_feature_id { - let should_enable_builtin_on_feature_transition = !builtin_is_bpf - && if only_apply_transitions_for_new_features { - new_feature_activations.contains(&feature_id) - } else { - self.feature_set.is_active(&feature_id) - }; + // If the builtin has already been migrated to Core BPF, do not + // add it to the bank's builtins. + if builtin_is_bpf { + continue; + } - if should_enable_builtin_on_feature_transition { - self.transaction_processor.add_builtin( - self, - builtin.program_id, - builtin.name, - ProgramCacheEntry::new_builtin( - self.feature_set.activated_slot(&feature_id).unwrap_or(0), - builtin.name.len(), - builtin.entrypoint, - ), - ); - } + let builtin_is_active = builtin + .enable_feature_id + .map(|feature_id| self.feature_set.is_active(&feature_id)) + .unwrap_or(true); + + if builtin_is_active { + let activation_slot = builtin + .enable_feature_id + .and_then(|feature_id| self.feature_set.activated_slot(&feature_id)) + .unwrap_or(0); + self.transaction_processor.add_builtin( + builtin.program_id, + ProgramCacheEntry::new_builtin( + activation_slot, + builtin.name.len(), + builtin.entrypoint, + ), + ); } } + } - // Migrate any necessary stateless builtins to core BPF. - // Stateless builtins do not have an `enable_feature_id` since they - // do not exist on-chain. - for stateless_builtin in STATELESS_BUILTINS.iter() { - if let Some(core_bpf_migration_config) = &stateless_builtin.core_bpf_migration_config { - if new_feature_activations.contains(&core_bpf_migration_config.feature_id) { - if let Err(e) = self.migrate_builtin_to_core_bpf( - &stateless_builtin.program_id, - core_bpf_migration_config, - ) { - warn!( - "Failed to migrate stateless builtin {} to Core BPF: {}", - stateless_builtin.name, e - ); - } - } + fn add_builtin_program_accounts(&mut self) { + for builtin in BUILTINS.iter() { + // The `builtin_is_bpf` flag is used to handle the case where a + // builtin is scheduled to be enabled by one feature gate and + // later migrated to Core BPF by another. + // + // There should never be a case where a builtin is set to be + // migrated to Core BPF and is also set to be enabled on feature + // activation on the same feature gate. However, the + // `builtin_is_bpf` flag will handle this case as well, electing + // to first attempt the migration to Core BPF. + // + // The migration to Core BPF will fail gracefully because the + // program account will not exist. The builtin will subsequently + // be enabled, but it will never be migrated to Core BPF. + // + // Using the same feature gate for both enabling and migrating a + // builtin to Core BPF should be strictly avoided. + let builtin_is_bpf = builtin.core_bpf_migration_config.is_some() && { + self.get_account(&builtin.program_id) + .map(|a| a.owner() == &bpf_loader_upgradeable::id()) + .unwrap_or(false) + }; + + // If the builtin has already been migrated to Core BPF, do not + // add it to the bank's builtins. + if builtin_is_bpf { + continue; + } + + let builtin_is_active = builtin + .enable_feature_id + .map(|feature_id| self.feature_set.is_active(&feature_id)) + .unwrap_or(true); + + if builtin_is_active { + self.add_builtin_account(builtin.name, &builtin.program_id); } } for precompile in get_precompiles() { - let should_add_precompile = precompile + let precompile_is_active = precompile .feature .as_ref() .map(|feature_id| self.feature_set.is_active(feature_id)) - .unwrap_or(false); - if should_add_precompile { + .unwrap_or(true); + + if precompile_is_active { self.add_precompile(&precompile.program_id); } } @@ -5533,7 +5495,7 @@ impl Bank { // Unload a program from the bank's cache self.transaction_processor - .program_cache + .global_program_cache .write() .unwrap() .remove_programs([*old_address].into_iter()); @@ -5585,6 +5547,10 @@ impl Bank { &self.fee_structure } + pub fn parent_block_id(&self) -> Option { + self.parent().and_then(|p| p.block_id()) + } + pub fn block_id(&self) -> Option { *self.block_id.read().unwrap() } @@ -5598,8 +5564,54 @@ impl Bank { } pub fn add_builtin(&self, program_id: Pubkey, name: &str, builtin: ProgramCacheEntry) { - self.transaction_processor - .add_builtin(self, program_id, name, builtin) + debug!("Adding program {name} under {program_id:?}"); + self.add_builtin_account(name, &program_id); + self.transaction_processor.add_builtin(program_id, builtin); + debug!("Added program {name} under {program_id:?}"); + } + + // NOTE: must hold idempotent for the same set of arguments + /// Add a builtin program account + fn add_builtin_account(&self, name: &str, program_id: &Pubkey) { + let existing_genuine_program = + self.get_account_with_fixed_root(program_id) + .and_then(|account| { + // it's very unlikely to be squatted at program_id as non-system account because of burden to + // find victim's pubkey/hash. So, when account.owner is indeed native_loader's, it's + // safe to assume it's a genuine program. + if native_loader::check_id(account.owner()) { + Some(account) + } else { + // malicious account is pre-occupying at program_id + self.burn_and_purge_account(program_id, account); + None + } + }); + + // introducing builtin program + if existing_genuine_program.is_some() { + // The existing account is sufficient + return; + } + + assert!( + !self.freeze_started(), + "Can't change frozen bank by adding not-existing new builtin program ({name}, \ + {program_id}). Maybe, inconsistent program activation is detected on snapshot \ + restore?" + ); + + // Add a bogus executable builtin account, which will be loaded and ignored. + let (lamports, rent_epoch) = + self.inherit_specially_retained_account_fields(&existing_genuine_program); + let account: AccountSharedData = AccountSharedData::from(Account { + lamports, + data: name.as_bytes().to_vec(), + owner: solana_sdk_ids::native_loader::id(), + executable: true, + rent_epoch, + }); + self.store_account_and_update_capitalization(program_id, &account); } pub fn get_bank_hash_stats(&self) -> BankHashStats { @@ -5656,63 +5668,11 @@ impl InvokeContextCallback for Bank { } impl TransactionProcessingCallback for Bank { - fn account_matches_owners(&self, account: &Pubkey, owners: &[Pubkey]) -> Option { - self.rc - .accounts - .accounts_db - .account_matches_owners(&self.ancestors, account, owners) - .ok() - } - - fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option { + fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option<(AccountSharedData, Slot)> { self.rc .accounts .accounts_db .load_with_fixed_root(&self.ancestors, pubkey) - .map(|(acc, _)| acc) - } - - // NOTE: must hold idempotent for the same set of arguments - /// Add a builtin program account - fn add_builtin_account(&self, name: &str, program_id: &Pubkey) { - let existing_genuine_program = - self.get_account_with_fixed_root(program_id) - .and_then(|account| { - // it's very unlikely to be squatted at program_id as non-system account because of burden to - // find victim's pubkey/hash. So, when account.owner is indeed native_loader's, it's - // safe to assume it's a genuine program. - if native_loader::check_id(account.owner()) { - Some(account) - } else { - // malicious account is pre-occupying at program_id - self.burn_and_purge_account(program_id, account); - None - } - }); - - // introducing builtin program - if existing_genuine_program.is_some() { - // The existing account is sufficient - return; - } - - assert!( - !self.freeze_started(), - "Can't change frozen bank by adding not-existing new builtin program ({name}, {program_id}). \ - Maybe, inconsistent program activation is detected on snapshot restore?" - ); - - // Add a bogus executable builtin account, which will be loaded and ignored. - let (lamports, rent_epoch) = - self.inherit_specially_retained_account_fields(&existing_genuine_program); - let account: AccountSharedData = AccountSharedData::from(Account { - lamports, - data: name.as_bytes().to_vec(), - owner: solana_sdk_ids::native_loader::id(), - executable: true, - rent_epoch, - }); - self.store_account_and_update_capitalization(program_id, &account); } fn inspect_account(&self, address: &Pubkey, account_state: AccountState, is_writable: bool) { @@ -5794,14 +5754,12 @@ impl Bank { test_config: BankTestConfig, paths: Vec, ) -> Self { - Self::new_with_paths( + Self::new_from_genesis( genesis_config, runtime_config, paths, None, - None, - false, - Some(test_config.accounts_db_config), + test_config.accounts_db_config, None, Some(Pubkey::new_unique()), Arc::default(), @@ -5817,14 +5775,12 @@ impl Bank { /// Intended for use by benches only. /// create new bank with the given config and paths. pub fn new_with_paths_for_benches(genesis_config: &GenesisConfig, paths: Vec) -> Self { - Self::new_with_paths( + Self::new_from_genesis( genesis_config, Arc::::default(), paths, None, - None, - false, - Some(ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS), + ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS, None, Some(Pubkey::new_unique()), Arc::default(), @@ -5893,16 +5849,6 @@ impl Bank { .flush_accounts_cache_slot_for_tests(self.slot()) } - /// This is only valid to call from tests. - /// block until initial accounts hash verification has completed - pub fn wait_for_initial_accounts_hash_verification_completed_for_tests(&self) { - self.rc - .accounts - .accounts_db - .verify_accounts_hash_in_bg - .join_background_thread() - } - pub fn get_sysvar_cache_for_tests(&self) -> SysvarCache { self.transaction_processor.get_sysvar_cache_for_tests() } @@ -5918,7 +5864,11 @@ impl Bank { ProgramCacheForTxBatch::new_from_cache( slot, self.epoch_schedule.get_epoch(slot), - &self.transaction_processor.program_cache.read().unwrap(), + &self + .transaction_processor + .global_program_cache + .read() + .unwrap(), ) } @@ -6010,15 +5960,6 @@ fn calculate_data_size_delta(old_data_size: usize, new_data_size: usize) -> i64 new_data_size.saturating_sub(old_data_size) } -/// Since `apply_feature_activations()` has different behavior depending on its caller, enumerate -/// those callers explicitly. -#[derive(Debug, Copy, Clone, Eq, PartialEq)] -enum ApplyFeatureActivationsCaller { - FinishInit, - NewFromParent, - WarpFromParent, -} - impl Drop for Bank { fn drop(&mut self) { if let Some(drop_callback) = self.drop_callback.read().unwrap().0.as_ref() { @@ -6069,7 +6010,7 @@ pub mod test_utils { let mut vote_account = bank.get_account(vote_pubkey).unwrap_or_default(); let mut vote_state = vote_state::from(&vote_account).unwrap_or_default(); vote_state.last_timestamp = timestamp; - let versioned = VoteStateVersions::new_current(vote_state); + let versioned = VoteStateVersions::new_v3(vote_state); vote_state::to(&versioned, &mut vote_account).unwrap(); bank.store_account(vote_pubkey, &vote_account); } diff --git a/runtime/src/bank/accounts_lt_hash.rs b/runtime/src/bank/accounts_lt_hash.rs index 72221c6402cd27..8e09e7e28ffcc1 100644 --- a/runtime/src/bank/accounts_lt_hash.rs +++ b/runtime/src/bank/accounts_lt_hash.rs @@ -193,7 +193,7 @@ impl Bank { .rc .accounts .accounts_db - .thread_pool + .thread_pool_foreground .install(do_calculate_delta_lt_hash); let total_time = measure_total.end_as_duration(); @@ -395,7 +395,7 @@ mod tests { }, solana_account::{ReadableAccount as _, WritableAccount as _}, solana_accounts_db::{ - accounts_db::{AccountsDbConfig, DuplicatesLtHash, ACCOUNTS_DB_CONFIG_FOR_TESTING}, + accounts_db::{AccountsDbConfig, MarkObsoleteAccounts, ACCOUNTS_DB_CONFIG_FOR_TESTING}, accounts_index::{ AccountsIndexConfig, IndexLimitMb, ACCOUNTS_INDEX_CONFIG_FOR_TESTING, }, @@ -406,7 +406,7 @@ mod tests { solana_native_token::LAMPORTS_PER_SOL, solana_pubkey::{self as pubkey, Pubkey}, solana_signer::Signer as _, - std::{cmp, collections::HashMap, iter, ops::RangeFull, str::FromStr as _, sync::Arc}, + std::{cmp, iter, str::FromStr as _, sync::Arc}, tempfile::TempDir, test_case::{test_case, test_matrix}, }; @@ -523,13 +523,7 @@ mod tests { .unwrap(); // store account 5 into this new bank, unchanged - bank.rc.accounts.store_cached( - ( - bank.slot(), - [(&keypair5.pubkey(), &prev_account5.clone().unwrap())].as_slice(), - ), - None, - ); + bank.store_account(&keypair5.pubkey(), prev_account5.as_ref().unwrap()); // freeze the bank to trigger update_accounts_lt_hash() to run bank.freeze(); @@ -784,107 +778,15 @@ mod tests { assert_eq!(expected_accounts_lt_hash, calculated_accounts_lt_hash); } - #[test_case(Features::None; "no features")] - #[test_case(Features::All; "all features")] - fn test_calculate_accounts_lt_hash_at_startup_from_storages(features: Features) { - let (genesis_config, mint_keypair) = genesis_config_with(features); - let (mut bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); - - let amount = cmp::max( - bank.get_minimum_balance_for_rent_exemption(0), - LAMPORTS_PER_SOL, - ); - - // Write to this pubkey multiple times, so there are guaranteed duplicates in the storages. - let duplicate_pubkey = pubkey::new_rand(); - - // create some banks with some modified accounts so that there are stored accounts - // (note: the number of banks and transfers are arbitrary) - for _ in 0..7 { - let slot = bank.slot() + 1; - bank = - new_bank_from_parent_with_bank_forks(&bank_forks, bank, &Pubkey::default(), slot); - for _ in 0..9 { - bank.register_unique_recent_blockhash_for_test(); - // note: use a random pubkey here to ensure accounts - // are spread across all the index bins - // (and calculating the accounts lt hash from storages requires no duplicates) - bank.transfer(amount, &mint_keypair, &pubkey::new_rand()) - .unwrap(); - - bank.register_unique_recent_blockhash_for_test(); - bank.transfer(amount, &mint_keypair, &duplicate_pubkey) - .unwrap(); - } - - // flush the write cache each slot to ensure there are account duplicates in the storages - bank.squash(); - bank.force_flush_accounts_cache(); - } - let expected_accounts_lt_hash = bank.accounts_lt_hash.lock().unwrap().clone(); - - // go through the storages to find the duplicates - let (mut storages, _slots) = bank.rc.accounts.accounts_db.get_storages(RangeFull); - // sort the storages in slot-descending order - // this makes skipping the latest easier - storages.sort_unstable_by_key(|storage| cmp::Reverse(storage.slot())); - let storages = storages.into_boxed_slice(); - - // get all the lt hashes for each version of all accounts - let mut stored_accounts_map = HashMap::<_, Vec<_>>::new(); - for storage in &storages { - storage - .accounts - .scan_accounts(|_offset, account| { - let pubkey = account.pubkey(); - let account_lt_hash = AccountsDb::lt_hash_account(&account, pubkey); - stored_accounts_map - .entry(*pubkey) - .or_default() - .push(account_lt_hash) - }) - .expect("must scan accounts storage"); - } - - // calculate the duplicates lt hash by skipping the first version (latest) of each account, - // and then mixing together all the rest - let duplicates_lt_hash = stored_accounts_map - .values() - .map(|lt_hashes| { - // the first element in the vec is the latest; all the rest are duplicates - <_hashes[1..] - }) - .fold(LtHash::identity(), |mut accum, duplicate_lt_hashes| { - for duplicate_lt_hash in duplicate_lt_hashes { - accum.mix_in(&duplicate_lt_hash.0); - } - accum - }); - let duplicates_lt_hash = DuplicatesLtHash(duplicates_lt_hash); - - // ensure that calculating the accounts lt hash from storages is correct - let calculated_accounts_lt_hash_from_storages = bank - .rc - .accounts - .accounts_db - .calculate_accounts_lt_hash_at_startup_from_storages( - &storages, - &duplicates_lt_hash, - bank.slot(), - ); - assert_eq!( - expected_accounts_lt_hash, - calculated_accounts_lt_hash_from_storages - ); - } - #[test_matrix( [Features::None, Features::All], - [IndexLimitMb::Minimal, IndexLimitMb::InMemOnly] + [IndexLimitMb::Minimal, IndexLimitMb::InMemOnly], + [MarkObsoleteAccounts::Disabled, MarkObsoleteAccounts::Enabled] )] fn test_verify_accounts_lt_hash_at_startup( features: Features, accounts_index_limit: IndexLimitMb, + mark_obsolete_accounts: MarkObsoleteAccounts, ) { let (mut genesis_config, mint_keypair) = genesis_config_with(features); // This test requires zero fees so that we can easily transfer an account's entire balance. @@ -975,9 +877,10 @@ mod tests { }; let accounts_db_config = AccountsDbConfig { index: Some(accounts_index_config), + mark_obsolete_accounts, ..ACCOUNTS_DB_CONFIG_FOR_TESTING }; - let (roundtrip_bank, _) = snapshot_bank_utils::bank_from_snapshot_archives( + let roundtrip_bank = snapshot_bank_utils::bank_from_snapshot_archives( &[accounts_dir], &bank_snapshots_dir, &snapshot, @@ -986,23 +889,20 @@ mod tests { &RuntimeConfig::default(), None, None, - None, false, false, false, - Some(accounts_db_config), + accounts_db_config, None, Arc::default(), ) .unwrap(); - // Correctly calculating the accounts lt hash in Bank::new_from_fields() depends on the + // Correctly calculating the accounts lt hash in Bank::new_from_snapshot() depends on the // bank being frozen. This is so we don't call `update_accounts_lt_hash()` twice on the // same bank! assert!(roundtrip_bank.is_frozen()); - // Wait for the startup verification to complete. If we don't panic, then we're good! - roundtrip_bank.wait_for_initial_accounts_hash_verification_completed_for_tests(); assert_eq!(roundtrip_bank, *bank); } @@ -1077,7 +977,7 @@ mod tests { ) .unwrap(); let (_accounts_tempdir, accounts_dir) = snapshot_utils::create_tmp_accounts_dir_for_tests(); - let (roundtrip_bank, _) = snapshot_bank_utils::bank_from_snapshot_archives( + let roundtrip_bank = snapshot_bank_utils::bank_from_snapshot_archives( &[accounts_dir], &bank_snapshots_dir, &snapshot, @@ -1086,100 +986,15 @@ mod tests { &RuntimeConfig::default(), None, None, - None, false, false, false, - Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), + ACCOUNTS_DB_CONFIG_FOR_TESTING, None, Arc::default(), ) .unwrap(); - // Wait for the startup verification to complete. If we don't panic, then we're good! - roundtrip_bank.wait_for_initial_accounts_hash_verification_completed_for_tests(); assert_eq!(roundtrip_bank, *bank); } - - /// Obsolete accounts add metadata to storage entries that can effect the accounts_lt_hash - /// calculation. This test ensures that the accounts_lt_hash is not effected by updates in - /// storages that are not being considered for the accounts_lt_hash calculation. - #[test] - fn test_accounts_lt_hash_with_obsolete_accounts() { - let key1 = Pubkey::new_unique(); - let key2 = Pubkey::new_unique(); - let key3 = Pubkey::new_unique(); - - // Create a few accounts - let (genesis_config, mint_keypair) = - solana_genesis_config::create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); - let (bank, _forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); - bank.transfer(LAMPORTS_PER_SOL, &mint_keypair, &key1) - .unwrap(); - bank.transfer(2 * LAMPORTS_PER_SOL, &mint_keypair, &key2) - .unwrap(); - bank.transfer(3 * LAMPORTS_PER_SOL, &mint_keypair, &key3) - .unwrap(); - bank.fill_bank_with_ticks_for_tests(); - - // Force flush the bank to create the account storage entry - bank.squash(); - bank.force_flush_accounts_cache(); - - let (storages, _slots) = bank.rc.accounts.accounts_db.get_storages(RangeFull); - - // Calculate the current accounts_lt_hash - let expected_accounts_lt_hash = bank.accounts_lt_hash.lock().unwrap().clone(); - // Find the account storage entry for slot 0 - assert_eq!(storages.len(), 1); - let account_storage_entry = storages.first().unwrap(); - assert_eq!(account_storage_entry.slot(), bank.slot()); - - // Find all the accounts in slot 0 - let accounts = bank - .accounts() - .accounts_db - .get_unique_accounts_from_storage(account_storage_entry); - - // Find the offset of pubkey `key1` in the accounts db slot0 and save the offset. - let offset = accounts - .stored_accounts - .iter() - .find(|account| key1 == *account.pubkey()) - .map(|account| account.index_info.offset()) - .expect("Pubkey1 is present in Slot0"); - - // Mark pubkey1 as obsolete in slot 1 - // This is a valid scenario that the accounts_lt_hash verification could see if slot1 - // transfers the balance of pubkey1 to a new pubkey. - account_storage_entry - .mark_accounts_obsolete(vec![(offset, 0)].into_iter(), bank.slot() + 1); - - // Recalculate the hash from storages, calculating the hash as of slot 0 like before - let calculated_accounts_lt_hash = bank - .accounts() - .accounts_db - .calculate_accounts_lt_hash_at_startup_from_storages( - storages.as_slice(), - &DuplicatesLtHash::default(), - bank.slot(), - ); - - // Ensure that the hash is the same as before since the obsolete account updates in slot0 - // marked at slot1 should be ignored - assert_eq!(calculated_accounts_lt_hash, expected_accounts_lt_hash); - - // Recalculate the hash from storages, but include obsolete account updates marked in slot1 - let recalculated_accounts_lt_hash = bank - .accounts() - .accounts_db - .calculate_accounts_lt_hash_at_startup_from_storages( - storages.as_slice(), - &DuplicatesLtHash::default(), - bank.slot() + 1, - ); - - // The hashes should be different now as pubkey1 account will not be included in the hash - assert_ne!(recalculated_accounts_lt_hash, expected_accounts_lt_hash); - } } diff --git a/runtime/src/bank/address_lookup_table.rs b/runtime/src/bank/address_lookup_table.rs index fc2613977999ec..a6cbb52c297ca2 100644 --- a/runtime/src/bank/address_lookup_table.rs +++ b/runtime/src/bank/address_lookup_table.rs @@ -4,9 +4,10 @@ use { solana_clock::Slot, solana_message::{ v0::{LoadedAddresses, MessageAddressTableLookup}, - AddressLoader, AddressLoaderError, + AddressLoader, }, solana_svm_transaction::message_address_table_lookup::SVMMessageAddressTableLookup, + solana_transaction_error::AddressLoaderError, }; fn into_address_loader_error(err: AddressLookupError) -> AddressLoaderError { diff --git a/runtime/src/bank/builtin_programs.rs b/runtime/src/bank/builtin_programs.rs deleted file mode 100644 index 102680c055d950..00000000000000 --- a/runtime/src/bank/builtin_programs.rs +++ /dev/null @@ -1,609 +0,0 @@ -#[cfg(test)] -mod tests { - use { - crate::bank::*, agave_feature_set::FeatureSet, - solana_genesis_config::create_genesis_config, solana_sdk_ids::ed25519_program, - }; - - #[test] - fn test_apply_builtin_program_feature_transitions_for_new_epoch() { - let (genesis_config, _mint_keypair) = create_genesis_config(100_000); - - let mut bank = Bank::new_for_tests(&genesis_config); - bank.feature_set = Arc::new(FeatureSet::all_enabled()); - bank.finish_init(&genesis_config, None, false); - - // Overwrite precompile accounts to simulate a cluster which already added precompiles. - for precompile in get_precompiles() { - bank.store_account(&precompile.program_id, &AccountSharedData::default()); - // Simulate cluster which added ed25519 precompile with a system program owner - if precompile.program_id == ed25519_program::id() { - bank.add_precompiled_account_with_owner( - &precompile.program_id, - solana_system_interface::program::id(), - ); - } else { - bank.add_precompiled_account(&precompile.program_id); - } - } - - // Normally feature transitions are applied to a bank that hasn't been - // frozen yet. Freeze the bank early to ensure that no account changes - // are made. - bank.freeze(); - - // Simulate crossing an epoch boundary for a new bank - let only_apply_transitions_for_new_features = true; - bank.apply_builtin_program_feature_transitions( - only_apply_transitions_for_new_features, - &AHashSet::new(), - ); - } - - #[test] - fn test_startup_from_snapshot_after_precompile_transition() { - let (genesis_config, _mint_keypair) = create_genesis_config(100_000); - - let mut bank = Bank::new_for_tests(&genesis_config); - bank.feature_set = Arc::new(FeatureSet::all_enabled()); - bank.finish_init(&genesis_config, None, false); - - // Overwrite precompile accounts to simulate a cluster which already added precompiles. - for precompile in get_precompiles() { - bank.store_account(&precompile.program_id, &AccountSharedData::default()); - bank.add_precompiled_account(&precompile.program_id); - } - - bank.freeze(); - - // Simulate starting up from snapshot finishing the initialization for a frozen bank - bank.finish_init(&genesis_config, None, false); - } -} - -#[cfg(test)] -mod tests_core_bpf_migration { - use { - crate::bank::{ - builtins::core_bpf_migration::tests::TestContext, - test_utils::goto_end_of_slot, - tests::{create_genesis_config, new_bank_from_parent_with_bank_forks}, - Bank, - }, - agave_feature_set::FeatureSet, - solana_account::{AccountSharedData, ReadableAccount, WritableAccount}, - solana_builtins::{ - core_bpf_migration::CoreBpfMigrationConfig, - prototype::{BuiltinPrototype, StatelessBuiltinPrototype}, - BUILTINS, - }, - solana_epoch_schedule::EpochSchedule, - solana_feature_gate_interface::{self as feature, Feature}, - solana_instruction::{AccountMeta, Instruction}, - solana_loader_v3_interface::{get_program_data_address, state::UpgradeableLoaderState}, - solana_message::Message, - solana_native_token::LAMPORTS_PER_SOL, - solana_program_runtime::loaded_programs::ProgramCacheEntry, - solana_pubkey::Pubkey, - solana_sdk_ids::{bpf_loader_upgradeable, native_loader}, - solana_signer::Signer, - solana_transaction::Transaction, - std::{fs::File, io::Read, sync::Arc}, - test_case::test_case, - }; - - // CPI mockup to test CPI to newly migrated programs. - mod cpi_mockup { - use { - solana_instruction::Instruction, solana_program_runtime::declare_process_instruction, - }; - - declare_process_instruction!(Entrypoint, 0, |invoke_context| { - let transaction_context = &invoke_context.transaction_context; - let instruction_context = transaction_context.get_current_instruction_context()?; - - let target_program_id = transaction_context.get_key_of_account_at_index( - instruction_context.get_index_of_instruction_account_in_transaction(0)?, - )?; - - let instruction = Instruction::new_with_bytes(*target_program_id, &[], Vec::new()); - - invoke_context.native_invoke(instruction, &[]) - }); - } - - fn test_elf() -> Vec { - let mut elf = Vec::new(); - File::open("../programs/bpf_loader/test_elfs/out/noop_aligned.so") - .unwrap() - .read_to_end(&mut elf) - .unwrap(); - elf - } - - enum TestPrototype<'a> { - Builtin(&'a BuiltinPrototype), - #[allow(unused)] - // We aren't migrating any stateless builtins right now. Uncomment if needed. - Stateless(&'a StatelessBuiltinPrototype), - } - impl<'a> TestPrototype<'a> { - fn deconstruct(&'a self) -> (&'a Pubkey, &'a CoreBpfMigrationConfig) { - match self { - Self::Builtin(prototype) => ( - &prototype.program_id, - prototype.core_bpf_migration_config.as_ref().unwrap(), - ), - Self::Stateless(prototype) => ( - &prototype.program_id, - prototype.core_bpf_migration_config.as_ref().unwrap(), - ), - } - } - } - - // This test can't be used to the `compute_budget` program, unless a valid - // `compute_budget` program is provided as the replacement (source). - // See program_runtime::compute_budget_processor::process_compute_budget_instructions`.` - // It also can't test the `bpf_loader_upgradeable` program, as it's used in - // the SVM's loader to invoke programs. - // See `solana_svm::account_loader::load_transaction_accounts`. - #[test_case(TestPrototype::Builtin(&BUILTINS[0]); "system")] - #[test_case(TestPrototype::Builtin(&BUILTINS[1]); "vote")] - #[test_case(TestPrototype::Builtin(&BUILTINS[2]); "stake")] - #[test_case(TestPrototype::Builtin(&BUILTINS[3]); "bpf_loader_deprecated")] - #[test_case(TestPrototype::Builtin(&BUILTINS[4]); "bpf_loader")] - fn test_core_bpf_migration(prototype: TestPrototype) { - let (mut genesis_config, mint_keypair) = - create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); - let slots_per_epoch = 32; - genesis_config.epoch_schedule = - EpochSchedule::custom(slots_per_epoch, slots_per_epoch, false); - - let mut root_bank = Bank::new_for_tests(&genesis_config); - - // Set up the CPI mockup to test CPI'ing to the migrated program. - let cpi_program_id = Pubkey::new_unique(); - let cpi_program_name = "mock_cpi_program"; - root_bank.transaction_processor.add_builtin( - &root_bank, - cpi_program_id, - cpi_program_name, - ProgramCacheEntry::new_builtin(0, cpi_program_name.len(), cpi_mockup::Entrypoint::vm), - ); - - let (builtin_id, config) = prototype.deconstruct(); - let feature_id = &config.feature_id; - let source_buffer_address = &config.source_buffer_address; - let upgrade_authority_address = config.upgrade_authority_address; - - // Add the feature to the bank's inactive feature set. - // Note this will add the feature ID if it doesn't exist. - let mut feature_set = FeatureSet::all_enabled(); - feature_set.deactivate(feature_id); - root_bank.feature_set = Arc::new(feature_set); - - // Initialize the source buffer account. - let test_context = TestContext::new( - &root_bank, - builtin_id, - source_buffer_address, - upgrade_authority_address, - ); - - let (bank, bank_forks) = root_bank.wrap_with_bank_forks_for_tests(); - - // Advance to the next epoch without activating the feature. - let mut first_slot_in_next_epoch = slots_per_epoch + 1; - let bank = new_bank_from_parent_with_bank_forks( - &bank_forks, - bank, - &Pubkey::default(), - first_slot_in_next_epoch, - ); - - // Assert the feature was not activated and the program was not - // migrated. - assert!(!bank.feature_set.is_active(feature_id)); - assert!(bank.get_account(source_buffer_address).is_some()); - - // Store the account to activate the feature. - bank.store_account_and_update_capitalization( - feature_id, - &feature::create_account(&Feature::default(), 42), - ); - - // Advance the bank to cross the epoch boundary and activate the - // feature. - goto_end_of_slot(bank.clone()); - first_slot_in_next_epoch += slots_per_epoch; - let migration_slot = first_slot_in_next_epoch; - let bank = new_bank_from_parent_with_bank_forks( - &bank_forks, - bank, - &Pubkey::default(), - first_slot_in_next_epoch, - ); - - // Run the post-migration program checks. - assert!(bank.feature_set.is_active(feature_id)); - test_context.run_program_checks(&bank, migration_slot); - - // Advance one slot so that the new BPF builtin program becomes - // effective in the program cache. - goto_end_of_slot(bank.clone()); - let next_slot = bank.slot() + 1; - let bank = - new_bank_from_parent_with_bank_forks(&bank_forks, bank, &Pubkey::default(), next_slot); - - // Successfully invoke the new BPF builtin program. - bank.process_transaction(&Transaction::new( - &vec![&mint_keypair], - Message::new( - &[Instruction::new_with_bytes(*builtin_id, &[], Vec::new())], - Some(&mint_keypair.pubkey()), - ), - bank.last_blockhash(), - )) - .unwrap(); - - // Successfully invoke the new BPF builtin program via CPI. - bank.process_transaction(&Transaction::new( - &vec![&mint_keypair], - Message::new( - &[Instruction::new_with_bytes( - cpi_program_id, - &[], - vec![AccountMeta::new_readonly(*builtin_id, false)], - )], - Some(&mint_keypair.pubkey()), - ), - bank.last_blockhash(), - )) - .unwrap(); - - // Simulate crossing another epoch boundary for a new bank. - goto_end_of_slot(bank.clone()); - first_slot_in_next_epoch += slots_per_epoch; - let bank = new_bank_from_parent_with_bank_forks( - &bank_forks, - bank, - &Pubkey::default(), - first_slot_in_next_epoch, - ); - - // Run the post-migration program checks again. - assert!(bank.feature_set.is_active(feature_id)); - test_context.run_program_checks(&bank, migration_slot); - - // Again, successfully invoke the new BPF builtin program. - bank.process_transaction(&Transaction::new( - &vec![&mint_keypair], - Message::new( - &[Instruction::new_with_bytes(*builtin_id, &[], Vec::new())], - Some(&mint_keypair.pubkey()), - ), - bank.last_blockhash(), - )) - .unwrap(); - - // Again, successfully invoke the new BPF builtin program via CPI. - bank.process_transaction(&Transaction::new( - &vec![&mint_keypair], - Message::new( - &[Instruction::new_with_bytes( - cpi_program_id, - &[], - vec![AccountMeta::new_readonly(*builtin_id, false)], - )], - Some(&mint_keypair.pubkey()), - ), - bank.last_blockhash(), - )) - .unwrap(); - } - - // Simulate a failure to migrate the program. - // Here we want to see that the bank handles the failure gracefully and - // advances to the next epoch without issue. - #[test] - fn test_core_bpf_migration_failure() { - let (genesis_config, _mint_keypair) = create_genesis_config(0); - let mut root_bank = Bank::new_for_tests(&genesis_config); - - let test_prototype = TestPrototype::Builtin(&BUILTINS[0]); // System program - let (builtin_id, config) = test_prototype.deconstruct(); - let feature_id = &config.feature_id; - let source_buffer_address = &config.source_buffer_address; - let upgrade_authority_address = Some(Pubkey::new_unique()); - - // Add the feature to the bank's inactive feature set. - let mut feature_set = FeatureSet::all_enabled(); - feature_set.inactive_mut().insert(*feature_id); - root_bank.feature_set = Arc::new(feature_set); - - // Initialize the source buffer account. - let _test_context = TestContext::new( - &root_bank, - builtin_id, - source_buffer_address, - upgrade_authority_address, - ); - - let (bank, bank_forks) = root_bank.wrap_with_bank_forks_for_tests(); - - // Intentionally nuke the source buffer account to force the migration - // to fail. - bank.store_account_and_update_capitalization( - source_buffer_address, - &AccountSharedData::default(), - ); - - // Activate the feature. - bank.store_account_and_update_capitalization( - feature_id, - &feature::create_account(&Feature::default(), 42), - ); - - // Advance the bank to cross the epoch boundary and activate the - // feature. - goto_end_of_slot(bank.clone()); - let bank = new_bank_from_parent_with_bank_forks(&bank_forks, bank, &Pubkey::default(), 33); - - // Assert the feature _was_ activated but the program was not migrated. - assert!(bank.feature_set.is_active(feature_id)); - assert!(bank - .transaction_processor - .builtin_program_ids - .read() - .unwrap() - .contains(builtin_id)); - assert_eq!( - bank.get_account(builtin_id).unwrap().owner(), - &native_loader::id() - ); - - // Simulate crossing an epoch boundary again. - goto_end_of_slot(bank.clone()); - let bank = new_bank_from_parent_with_bank_forks(&bank_forks, bank, &Pubkey::default(), 96); - - // Again, assert the feature is still active and the program still was - // not migrated. - assert!(bank.feature_set.is_active(feature_id)); - assert!(bank - .transaction_processor - .builtin_program_ids - .read() - .unwrap() - .contains(builtin_id)); - assert_eq!( - bank.get_account(builtin_id).unwrap().owner(), - &native_loader::id() - ); - } - - // Simulate creating a bank from a snapshot after a migration feature was - // activated, but the migration failed. - // Here we want to see that the bank recognizes the failed migration and - // adds the original builtin to the new bank. - #[test] - fn test_core_bpf_migration_init_after_failed_migration() { - let (genesis_config, _mint_keypair) = create_genesis_config(0); - - let test_prototype = TestPrototype::Builtin(&BUILTINS[0]); // System program - let (builtin_id, config) = test_prototype.deconstruct(); - let feature_id = &config.feature_id; - - // Since the test feature IDs aren't included in the SDK, the only way - // to simulate loading from snapshot with this feature active is to - // create a bank, overwrite the feature set with the feature active, - // then re-run the `finish_init` method. - let mut bank = Bank::new_for_tests(&genesis_config); - - // Set up the feature set with the migration feature marked as active. - let mut feature_set = FeatureSet::all_enabled(); - feature_set.active_mut().insert(*feature_id, 0); - bank.feature_set = Arc::new(feature_set); - bank.store_account_and_update_capitalization( - feature_id, - &feature::create_account( - &Feature { - activated_at: Some(0), - }, - 42, - ), - ); - - // Run `finish_init` to simulate starting up from a snapshot. - // Clear all builtins to simulate a fresh bank init. - bank.transaction_processor - .program_cache - .write() - .unwrap() - .remove_programs( - bank.transaction_processor - .builtin_program_ids - .read() - .unwrap() - .clone() - .into_iter(), - ); - bank.transaction_processor - .builtin_program_ids - .write() - .unwrap() - .clear(); - bank.finish_init(&genesis_config, None, false); - - // Assert the feature is active and the bank still added the builtin. - assert!(bank.feature_set.is_active(feature_id)); - assert!(bank - .transaction_processor - .builtin_program_ids - .read() - .unwrap() - .contains(builtin_id)); - assert_eq!( - bank.get_account(builtin_id).unwrap().owner(), - &native_loader::id() - ); - - // Simulate crossing an epoch boundary for a new bank. - let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); - goto_end_of_slot(bank.clone()); - let bank = new_bank_from_parent_with_bank_forks(&bank_forks, bank, &Pubkey::default(), 33); - - // Assert the feature is active but the builtin was not migrated. - assert!(bank.feature_set.is_active(feature_id)); - assert!(bank - .transaction_processor - .builtin_program_ids - .read() - .unwrap() - .contains(builtin_id)); - assert_eq!( - bank.get_account(builtin_id).unwrap().owner(), - &native_loader::id() - ); - } - - // Simulate creating a bank from a snapshot after a migration feature was - // activated and the migration was successful. - // Here we want to see that the bank recognizes the migration and - // _does not_ add the original builtin to the new bank. - #[test] - fn test_core_bpf_migration_init_after_successful_migration() { - let (mut genesis_config, _mint_keypair) = create_genesis_config(0); - - let test_prototype = TestPrototype::Builtin(&BUILTINS[0]); // System program - let (builtin_id, config) = test_prototype.deconstruct(); - let feature_id = &config.feature_id; - - let upgrade_authority_address = Some(Pubkey::new_unique()); - let elf = test_elf(); - let program_data_metadata_size = UpgradeableLoaderState::size_of_programdata_metadata(); - let program_data_size = program_data_metadata_size + elf.len(); - - // Set up a post-migration builtin. - let builtin_program_data_address = get_program_data_address(builtin_id); - let builtin_program_account = AccountSharedData::new_data( - 100_000, - &UpgradeableLoaderState::Program { - programdata_address: builtin_program_data_address, - }, - &bpf_loader_upgradeable::id(), - ) - .unwrap(); - let mut builtin_program_data_account = AccountSharedData::new_data_with_space( - 100_000, - &UpgradeableLoaderState::ProgramData { - slot: 0, - upgrade_authority_address, - }, - program_data_size, - &bpf_loader_upgradeable::id(), - ) - .unwrap(); - builtin_program_data_account.data_as_mut_slice()[program_data_metadata_size..] - .copy_from_slice(&elf); - genesis_config - .accounts - .insert(*builtin_id, builtin_program_account.into()); - genesis_config.accounts.insert( - builtin_program_data_address, - builtin_program_data_account.into(), - ); - - // Use this closure to run checks on the builtin. - let check_builtin_is_bpf = |bank: &Bank| { - // The bank's transaction processor should not contain the builtin - // in its list of builtin program IDs. - assert!(!bank - .transaction_processor - .builtin_program_ids - .read() - .unwrap() - .contains(builtin_id)); - // The builtin should be owned by the upgradeable loader and have - // the correct state. - let fetched_builtin_program_account = bank.get_account(builtin_id).unwrap(); - assert_eq!( - fetched_builtin_program_account.owner(), - &bpf_loader_upgradeable::id() - ); - assert_eq!( - bincode::deserialize::( - fetched_builtin_program_account.data() - ) - .unwrap(), - UpgradeableLoaderState::Program { - programdata_address: builtin_program_data_address - } - ); - // The builtin's program data should be owned by the upgradeable - // loader and have the correct state. - let fetched_builtin_program_data_account = - bank.get_account(&builtin_program_data_address).unwrap(); - assert_eq!( - fetched_builtin_program_data_account.owner(), - &bpf_loader_upgradeable::id() - ); - assert_eq!( - bincode::deserialize::( - &fetched_builtin_program_data_account.data()[..program_data_metadata_size] - ) - .unwrap(), - UpgradeableLoaderState::ProgramData { - slot: 0, - upgrade_authority_address - } - ); - assert_eq!( - &fetched_builtin_program_data_account.data()[program_data_metadata_size..], - elf, - ); - }; - - // Create a new bank. - let mut bank = Bank::new_for_tests(&genesis_config); - check_builtin_is_bpf(&bank); - - // Now, add the feature ID as active, and run `finish_init` again to - // make sure the feature is idempotent. - let mut feature_set = FeatureSet::all_enabled(); - feature_set.active_mut().insert(*feature_id, 0); - bank.feature_set = Arc::new(feature_set); - bank.store_account_and_update_capitalization( - feature_id, - &feature::create_account( - &Feature { - activated_at: Some(0), - }, - 42, - ), - ); - - // Run `finish_init` to simulate starting up from a snapshot. - // Clear all builtins to simulate a fresh bank init. - bank.transaction_processor - .program_cache - .write() - .unwrap() - .remove_programs( - bank.transaction_processor - .builtin_program_ids - .read() - .unwrap() - .clone() - .into_iter(), - ); - bank.transaction_processor - .builtin_program_ids - .write() - .unwrap() - .clear(); - bank.finish_init(&genesis_config, None, false); - - check_builtin_is_bpf(&bank); - } -} diff --git a/runtime/src/bank/builtins/core_bpf_migration/mod.rs b/runtime/src/bank/builtins/core_bpf_migration/mod.rs index c498567c3febce..f8d753d5d90f57 100644 --- a/runtime/src/bank/builtins/core_bpf_migration/mod.rs +++ b/runtime/src/bank/builtins/core_bpf_migration/mod.rs @@ -137,7 +137,11 @@ impl Bank { let mut program_cache_for_tx_batch = ProgramCacheForTxBatch::new_from_cache( self.slot, self.epoch, - &self.transaction_processor.program_cache.read().unwrap(), + &self + .transaction_processor + .global_program_cache + .read() + .unwrap(), ); // Configure a dummy `InvokeContext` from the runtime's current @@ -147,7 +151,7 @@ impl Bank { let compute_budget = self .compute_budget() .unwrap_or(ComputeBudget::new_with_defaults( - /* simd_0296_active */ false, + /* simd_0268_active */ false, )); let mut sysvar_cache = SysvarCache::default(); sysvar_cache.fill_missing_entries(|pubkey, set_sysvar| { @@ -206,7 +210,7 @@ impl Bank { // Update the program cache by merging with `programs_modified`, which // should have been updated by the deploy function. self.transaction_processor - .program_cache + .global_program_cache .write() .unwrap() .merge(&program_cache_for_tx_batch.drain_modified_entries()); @@ -297,7 +301,7 @@ impl Bank { /// Upgrade a Core BPF program. /// To use this function, add a feature-gated callsite to bank's - /// `apply_feature_activations` function, similar to below. + /// `apply_new_feature_activations` function, similar to below. /// /// ```ignore /// if new_feature_activations.contains(&agave_feature_set::test_upgrade_program::id()) { @@ -394,15 +398,37 @@ impl Bank { pub(crate) mod tests { use { super::*, - crate::bank::tests::create_simple_test_bank, + crate::bank::{ + test_utils::goto_end_of_slot, + tests::{ + create_genesis_config, create_simple_test_bank, + new_bank_from_parent_with_bank_forks, + }, + Bank, + }, + agave_feature_set::FeatureSet, assert_matches::assert_matches, - solana_account::state_traits::StateMut, - solana_builtins::core_bpf_migration::CoreBpfMigrationTargetType, + solana_account::{ + state_traits::StateMut, AccountSharedData, ReadableAccount, WritableAccount, + }, + solana_builtins::{ + core_bpf_migration::{CoreBpfMigrationConfig, CoreBpfMigrationTargetType}, + prototype::{BuiltinPrototype, StatelessBuiltinPrototype}, + BUILTINS, + }, solana_clock::Slot, - solana_loader_v3_interface::get_program_data_address, + solana_epoch_schedule::EpochSchedule, + solana_feature_gate_interface::{self as feature, Feature}, + solana_instruction::{AccountMeta, Instruction}, + solana_loader_v3_interface::{get_program_data_address, state::UpgradeableLoaderState}, + solana_message::Message, + solana_native_token::LAMPORTS_PER_SOL, solana_program_runtime::loaded_programs::{ProgramCacheEntry, ProgramCacheEntryType}, + solana_pubkey::Pubkey, solana_sdk_ids::{bpf_loader_upgradeable, native_loader}, - std::{fs::File, io::Read}, + solana_signer::Signer, + solana_transaction::Transaction, + std::{fs::File, io::Read, sync::Arc}, test_case::test_case, }; @@ -592,7 +618,11 @@ pub(crate) mod tests { .contains(&self.target_program_address)); // The cache should contain the target program. - let program_cache = bank.transaction_processor.program_cache.read().unwrap(); + let program_cache = bank + .transaction_processor + .global_program_cache + .read() + .unwrap(); let entries = program_cache.get_flattened_entries(true, true); let target_entry = entries .iter() @@ -625,8 +655,7 @@ pub(crate) mod tests { let account = AccountSharedData::new_data(1, &builtin_name, &native_loader::id()).unwrap(); bank.store_account_and_update_capitalization(&builtin_id, &account); - bank.transaction_processor.add_builtin( - &bank, + bank.add_builtin( builtin_id, builtin_name.as_str(), ProgramCacheEntry::default(), @@ -762,8 +791,7 @@ pub(crate) mod tests { let account = AccountSharedData::new_data(1, &builtin_name, &native_loader::id()).unwrap(); bank.store_account_and_update_capitalization(&builtin_id, &account); - bank.transaction_processor.add_builtin( - &bank, + bank.add_builtin( builtin_id, builtin_name.as_str(), ProgramCacheEntry::default(), @@ -813,8 +841,7 @@ pub(crate) mod tests { let account = AccountSharedData::new_data(1, &builtin_name, &native_loader::id()).unwrap(); bank.store_account_and_update_capitalization(&builtin_id, &account); - bank.transaction_processor.add_builtin( - &bank, + bank.add_builtin( builtin_id, builtin_name.as_str(), ProgramCacheEntry::default(), @@ -864,8 +891,7 @@ pub(crate) mod tests { let account = AccountSharedData::new_data(1, &builtin_name, &native_loader::id()).unwrap(); bank.store_account_and_update_capitalization(&builtin_id, &account); - bank.transaction_processor.add_builtin( - &bank, + bank.add_builtin( builtin_id, builtin_name.as_str(), ProgramCacheEntry::default(), @@ -1092,4 +1118,508 @@ pub(crate) mod tests { }, ); } + + // CPI mockup to test CPI to newly migrated programs. + mod cpi_mockup { + use { + solana_instruction::Instruction, solana_program_runtime::declare_process_instruction, + }; + + declare_process_instruction!(Entrypoint, 0, |invoke_context| { + let transaction_context = &invoke_context.transaction_context; + let instruction_context = transaction_context.get_current_instruction_context()?; + + let target_program_id = instruction_context.get_key_of_instruction_account(0)?; + + let instruction = Instruction::new_with_bytes(*target_program_id, &[], Vec::new()); + + invoke_context.native_invoke(instruction, &[]) + }); + } + + enum TestPrototype<'a> { + Builtin(&'a BuiltinPrototype), + #[allow(unused)] + // We aren't migrating any stateless builtins right now. Uncomment if needed. + Stateless(&'a StatelessBuiltinPrototype), + } + impl<'a> TestPrototype<'a> { + fn deconstruct(&'a self) -> (&'a Pubkey, &'a CoreBpfMigrationConfig) { + match self { + Self::Builtin(prototype) => ( + &prototype.program_id, + prototype.core_bpf_migration_config.as_ref().unwrap(), + ), + Self::Stateless(prototype) => ( + &prototype.program_id, + prototype.core_bpf_migration_config.as_ref().unwrap(), + ), + } + } + } + + // This test can't be used to the `compute_budget` program, unless a valid + // `compute_budget` program is provided as the replacement (source). + // See program_runtime::compute_budget_processor::process_compute_budget_instructions`.` + // It also can't test the `bpf_loader_upgradeable` program, as it's used in + // the SVM's loader to invoke programs. + // See `solana_svm::account_loader::load_transaction_accounts`. + #[test_case(TestPrototype::Builtin(&BUILTINS[0]); "system")] + #[test_case(TestPrototype::Builtin(&BUILTINS[1]); "vote")] + #[test_case(TestPrototype::Builtin(&BUILTINS[2]); "bpf_loader_deprecated")] + #[test_case(TestPrototype::Builtin(&BUILTINS[3]); "bpf_loader")] + fn test_migrate_builtin_e2e(prototype: TestPrototype) { + let (mut genesis_config, mint_keypair) = + create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); + let slots_per_epoch = 32; + genesis_config.epoch_schedule = + EpochSchedule::custom(slots_per_epoch, slots_per_epoch, false); + + let mut root_bank = Bank::new_for_tests(&genesis_config); + + // Set up the CPI mockup to test CPI'ing to the migrated program. + let cpi_program_id = Pubkey::new_unique(); + let cpi_program_name = "mock_cpi_program"; + root_bank.add_builtin( + cpi_program_id, + cpi_program_name, + ProgramCacheEntry::new_builtin(0, cpi_program_name.len(), cpi_mockup::Entrypoint::vm), + ); + + let (builtin_id, config) = prototype.deconstruct(); + let feature_id = &config.feature_id; + let source_buffer_address = &config.source_buffer_address; + let upgrade_authority_address = config.upgrade_authority_address; + + // Add the feature to the bank's inactive feature set. + // Note this will add the feature ID if it doesn't exist. + let mut feature_set = FeatureSet::all_enabled(); + feature_set.deactivate(feature_id); + root_bank.feature_set = Arc::new(feature_set); + + // Initialize the source buffer account. + let test_context = TestContext::new( + &root_bank, + builtin_id, + source_buffer_address, + upgrade_authority_address, + ); + + let (bank, bank_forks) = root_bank.wrap_with_bank_forks_for_tests(); + + // Advance to the next epoch without activating the feature. + let mut first_slot_in_next_epoch = slots_per_epoch + 1; + let bank = new_bank_from_parent_with_bank_forks( + &bank_forks, + bank, + &Pubkey::default(), + first_slot_in_next_epoch, + ); + + // Assert the feature was not activated and the program was not + // migrated. + assert!(!bank.feature_set.is_active(feature_id)); + assert!(bank.get_account(source_buffer_address).is_some()); + + // Store the account to activate the feature. + bank.store_account_and_update_capitalization( + feature_id, + &feature::create_account(&Feature::default(), 42), + ); + + // Advance the bank to cross the epoch boundary and activate the + // feature. + goto_end_of_slot(bank.clone()); + first_slot_in_next_epoch += slots_per_epoch; + let migration_slot = first_slot_in_next_epoch; + let bank = new_bank_from_parent_with_bank_forks( + &bank_forks, + bank, + &Pubkey::default(), + first_slot_in_next_epoch, + ); + + // Run the post-migration program checks. + assert!(bank.feature_set.is_active(feature_id)); + test_context.run_program_checks(&bank, migration_slot); + + // Advance one slot so that the new BPF builtin program becomes + // effective in the program cache. + goto_end_of_slot(bank.clone()); + let next_slot = bank.slot() + 1; + let bank = + new_bank_from_parent_with_bank_forks(&bank_forks, bank, &Pubkey::default(), next_slot); + + // Successfully invoke the new BPF builtin program. + bank.process_transaction(&Transaction::new( + &vec![&mint_keypair], + Message::new( + &[Instruction::new_with_bytes(*builtin_id, &[], Vec::new())], + Some(&mint_keypair.pubkey()), + ), + bank.last_blockhash(), + )) + .unwrap(); + + // Successfully invoke the new BPF builtin program via CPI. + bank.process_transaction(&Transaction::new( + &vec![&mint_keypair], + Message::new( + &[Instruction::new_with_bytes( + cpi_program_id, + &[], + vec![AccountMeta::new_readonly(*builtin_id, false)], + )], + Some(&mint_keypair.pubkey()), + ), + bank.last_blockhash(), + )) + .unwrap(); + + // Simulate crossing another epoch boundary for a new bank. + goto_end_of_slot(bank.clone()); + first_slot_in_next_epoch += slots_per_epoch; + let bank = new_bank_from_parent_with_bank_forks( + &bank_forks, + bank, + &Pubkey::default(), + first_slot_in_next_epoch, + ); + + // Run the post-migration program checks again. + assert!(bank.feature_set.is_active(feature_id)); + test_context.run_program_checks(&bank, migration_slot); + + // Again, successfully invoke the new BPF builtin program. + bank.process_transaction(&Transaction::new( + &vec![&mint_keypair], + Message::new( + &[Instruction::new_with_bytes(*builtin_id, &[], Vec::new())], + Some(&mint_keypair.pubkey()), + ), + bank.last_blockhash(), + )) + .unwrap(); + + // Again, successfully invoke the new BPF builtin program via CPI. + bank.process_transaction(&Transaction::new( + &vec![&mint_keypair], + Message::new( + &[Instruction::new_with_bytes( + cpi_program_id, + &[], + vec![AccountMeta::new_readonly(*builtin_id, false)], + )], + Some(&mint_keypair.pubkey()), + ), + bank.last_blockhash(), + )) + .unwrap(); + } + + // Simulate a failure to migrate the program. + // Here we want to see that the bank handles the failure gracefully and + // advances to the next epoch without issue. + #[test] + fn test_migrate_builtin_e2e_failure() { + let (genesis_config, _mint_keypair) = create_genesis_config(0); + let mut root_bank = Bank::new_for_tests(&genesis_config); + + let test_prototype = TestPrototype::Builtin(&BUILTINS[0]); // System program + let (builtin_id, config) = test_prototype.deconstruct(); + let feature_id = &config.feature_id; + let source_buffer_address = &config.source_buffer_address; + let upgrade_authority_address = Some(Pubkey::new_unique()); + + // Add the feature to the bank's inactive feature set. + let mut feature_set = FeatureSet::all_enabled(); + feature_set.inactive_mut().insert(*feature_id); + root_bank.feature_set = Arc::new(feature_set); + + // Initialize the source buffer account. + let _test_context = TestContext::new( + &root_bank, + builtin_id, + source_buffer_address, + upgrade_authority_address, + ); + + let (bank, bank_forks) = root_bank.wrap_with_bank_forks_for_tests(); + + // Intentionally nuke the source buffer account to force the migration + // to fail. + bank.store_account_and_update_capitalization( + source_buffer_address, + &AccountSharedData::default(), + ); + + // Activate the feature. + bank.store_account_and_update_capitalization( + feature_id, + &feature::create_account(&Feature::default(), 42), + ); + + // Advance the bank to cross the epoch boundary and activate the + // feature. + goto_end_of_slot(bank.clone()); + let bank = new_bank_from_parent_with_bank_forks(&bank_forks, bank, &Pubkey::default(), 33); + + // Assert the feature _was_ activated but the program was not migrated. + assert!(bank.feature_set.is_active(feature_id)); + assert!(bank + .transaction_processor + .builtin_program_ids + .read() + .unwrap() + .contains(builtin_id)); + assert_eq!( + bank.get_account(builtin_id).unwrap().owner(), + &native_loader::id() + ); + + // Simulate crossing an epoch boundary again. + goto_end_of_slot(bank.clone()); + let bank = new_bank_from_parent_with_bank_forks(&bank_forks, bank, &Pubkey::default(), 96); + + // Again, assert the feature is still active and the program still was + // not migrated. + assert!(bank.feature_set.is_active(feature_id)); + assert!(bank + .transaction_processor + .builtin_program_ids + .read() + .unwrap() + .contains(builtin_id)); + assert_eq!( + bank.get_account(builtin_id).unwrap().owner(), + &native_loader::id() + ); + } + + // Simulate creating a bank from a snapshot after a migration feature was + // activated, but the migration failed. + // Here we want to see that the bank recognizes the failed migration and + // adds the original builtin to the new bank. + #[test] + fn test_migrate_builtin_e2e_init_after_failed_migration() { + let (genesis_config, _mint_keypair) = create_genesis_config(0); + + let test_prototype = TestPrototype::Builtin(&BUILTINS[0]); // System program + let (builtin_id, config) = test_prototype.deconstruct(); + let feature_id = &config.feature_id; + + // Since the test feature IDs aren't included in the SDK, the only way + // to simulate loading from snapshot with this feature active is to + // create a bank, overwrite the feature set with the feature active, + // then re-run the `finish_init` method. + let mut bank = Bank::new_for_tests(&genesis_config); + + // Set up the feature set with the migration feature marked as active. + let mut feature_set = FeatureSet::all_enabled(); + feature_set.active_mut().insert(*feature_id, 0); + bank.feature_set = Arc::new(feature_set); + bank.store_account_and_update_capitalization( + feature_id, + &feature::create_account( + &Feature { + activated_at: Some(0), + }, + 42, + ), + ); + + // Run `compute_and_apply_features_after_snapshot_restore` to simulate + // starting up from a snapshot. Clear all builtins to simulate a fresh + // bank init. + bank.transaction_processor + .global_program_cache + .write() + .unwrap() + .remove_programs( + bank.transaction_processor + .builtin_program_ids + .read() + .unwrap() + .clone() + .into_iter(), + ); + bank.transaction_processor + .builtin_program_ids + .write() + .unwrap() + .clear(); + bank.compute_and_apply_features_after_snapshot_restore(); + + // Assert the feature is active and the bank still added the builtin. + assert!(bank.feature_set.is_active(feature_id)); + assert!(bank + .transaction_processor + .builtin_program_ids + .read() + .unwrap() + .contains(builtin_id)); + assert_eq!( + bank.get_account(builtin_id).unwrap().owner(), + &native_loader::id() + ); + + // Simulate crossing an epoch boundary for a new bank. + let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); + goto_end_of_slot(bank.clone()); + let bank = new_bank_from_parent_with_bank_forks(&bank_forks, bank, &Pubkey::default(), 33); + + // Assert the feature is active but the builtin was not migrated. + assert!(bank.feature_set.is_active(feature_id)); + assert!(bank + .transaction_processor + .builtin_program_ids + .read() + .unwrap() + .contains(builtin_id)); + assert_eq!( + bank.get_account(builtin_id).unwrap().owner(), + &native_loader::id() + ); + } + + // Simulate creating a bank from a snapshot after a migration feature was + // activated and the migration was successful. + // Here we want to see that the bank recognizes the migration and + // _does not_ add the original builtin to the new bank. + #[test] + fn test_migrate_builtin_e2e_init_after_successful_migration() { + let (mut genesis_config, _mint_keypair) = create_genesis_config(0); + + let test_prototype = TestPrototype::Builtin(&BUILTINS[0]); // System program + let (builtin_id, config) = test_prototype.deconstruct(); + let feature_id = &config.feature_id; + + let upgrade_authority_address = Some(Pubkey::new_unique()); + let elf = test_elf(); + let program_data_metadata_size = UpgradeableLoaderState::size_of_programdata_metadata(); + let program_data_size = program_data_metadata_size + elf.len(); + + // Set up a post-migration builtin. + let builtin_program_data_address = get_program_data_address(builtin_id); + let builtin_program_account = AccountSharedData::new_data( + 100_000, + &UpgradeableLoaderState::Program { + programdata_address: builtin_program_data_address, + }, + &bpf_loader_upgradeable::id(), + ) + .unwrap(); + let mut builtin_program_data_account = AccountSharedData::new_data_with_space( + 100_000, + &UpgradeableLoaderState::ProgramData { + slot: 0, + upgrade_authority_address, + }, + program_data_size, + &bpf_loader_upgradeable::id(), + ) + .unwrap(); + builtin_program_data_account.data_as_mut_slice()[program_data_metadata_size..] + .copy_from_slice(&elf); + genesis_config + .accounts + .insert(*builtin_id, builtin_program_account.into()); + genesis_config.accounts.insert( + builtin_program_data_address, + builtin_program_data_account.into(), + ); + + // Use this closure to run checks on the builtin. + let check_builtin_is_bpf = |bank: &Bank| { + // The bank's transaction processor should not contain the builtin + // in its list of builtin program IDs. + assert!(!bank + .transaction_processor + .builtin_program_ids + .read() + .unwrap() + .contains(builtin_id)); + // The builtin should be owned by the upgradeable loader and have + // the correct state. + let fetched_builtin_program_account = bank.get_account(builtin_id).unwrap(); + assert_eq!( + fetched_builtin_program_account.owner(), + &bpf_loader_upgradeable::id() + ); + assert_eq!( + bincode::deserialize::( + fetched_builtin_program_account.data() + ) + .unwrap(), + UpgradeableLoaderState::Program { + programdata_address: builtin_program_data_address + } + ); + // The builtin's program data should be owned by the upgradeable + // loader and have the correct state. + let fetched_builtin_program_data_account = + bank.get_account(&builtin_program_data_address).unwrap(); + assert_eq!( + fetched_builtin_program_data_account.owner(), + &bpf_loader_upgradeable::id() + ); + assert_eq!( + bincode::deserialize::( + &fetched_builtin_program_data_account.data()[..program_data_metadata_size] + ) + .unwrap(), + UpgradeableLoaderState::ProgramData { + slot: 0, + upgrade_authority_address + } + ); + assert_eq!( + &fetched_builtin_program_data_account.data()[program_data_metadata_size..], + elf, + ); + }; + + // Create a new bank. + let mut bank = Bank::new_for_tests(&genesis_config); + check_builtin_is_bpf(&bank); + + // Now, add the feature ID as active, and run `finish_init` again to + // make sure the feature is idempotent. + let mut feature_set = FeatureSet::all_enabled(); + feature_set.active_mut().insert(*feature_id, 0); + bank.feature_set = Arc::new(feature_set); + bank.store_account_and_update_capitalization( + feature_id, + &feature::create_account( + &Feature { + activated_at: Some(0), + }, + 42, + ), + ); + + // Run `compute_and_apply_features_after_snapshot_restore` to simulate + // starting up from a snapshot. Clear all builtins to simulate a fresh + // bank init. + bank.transaction_processor + .global_program_cache + .write() + .unwrap() + .remove_programs( + bank.transaction_processor + .builtin_program_ids + .read() + .unwrap() + .clone() + .into_iter(), + ); + bank.transaction_processor + .builtin_program_ids + .write() + .unwrap() + .clear(); + bank.compute_and_apply_features_after_snapshot_restore(); + + check_builtin_is_bpf(&bank); + } } diff --git a/runtime/src/bank/builtins/core_bpf_migration/target_builtin.rs b/runtime/src/bank/builtins/core_bpf_migration/target_builtin.rs index fdfc0747e35b78..b7c8981b81d534 100644 --- a/runtime/src/bank/builtins/core_bpf_migration/target_builtin.rs +++ b/runtime/src/bank/builtins/core_bpf_migration/target_builtin.rs @@ -71,11 +71,8 @@ impl TargetBuiltin { #[cfg(test)] mod tests { use { - super::*, - crate::bank::{tests::create_simple_test_bank, ApplyFeatureActivationsCaller}, - agave_feature_set as feature_set, - assert_matches::assert_matches, - solana_account::Account, + super::*, crate::bank::tests::create_simple_test_bank, agave_feature_set as feature_set, + assert_matches::assert_matches, solana_account::Account, solana_feature_gate_interface as feature, solana_loader_v3_interface::state::UpgradeableLoaderState, solana_sdk_ids::bpf_loader_upgradeable::ID as BPF_LOADER_UPGRADEABLE_ID, @@ -106,7 +103,6 @@ mod tests { #[test_case(solana_sdk_ids::bpf_loader_deprecated::id(), None)] #[test_case(solana_sdk_ids::bpf_loader_upgradeable::id(), None)] #[test_case(solana_compute_budget_interface::id(), None)] - #[test_case(solana_stake_interface::program::id(), None)] #[test_case(solana_system_interface::program::id(), None)] #[test_case(solana_vote_interface::program::id(), None)] #[test_case( @@ -134,7 +130,7 @@ mod tests { bank.get_minimum_balance_for_rent_exemption(feature::Feature::size_of()), ), ); - bank.apply_feature_activations(ApplyFeatureActivationsCaller::NewFromParent, false); + bank.compute_and_apply_new_feature_activations(); } let program_account = bank.get_account_with_fixed_root(&program_address).unwrap(); diff --git a/runtime/src/bank/metrics.rs b/runtime/src/bank/metrics.rs index 1e54a385897082..e6e76639f5db8d 100644 --- a/runtime/src/bank/metrics.rs +++ b/runtime/src/bank/metrics.rs @@ -33,7 +33,6 @@ pub(crate) struct NewBankTimings { pub(crate) stakes_cache_time_us: u64, pub(crate) epoch_stakes_time_us: u64, pub(crate) builtin_program_ids_time_us: u64, - pub(crate) rewards_pool_pubkeys_time_us: u64, pub(crate) executor_cache_time_us: u64, pub(crate) transaction_debug_keys_time_us: u64, pub(crate) transaction_log_collector_config_time_us: u64, @@ -118,11 +117,6 @@ pub(crate) fn report_new_bank_metrics( timings.builtin_program_ids_time_us, i64 ), - ( - "rewards_pool_pubkeys_us", - timings.rewards_pool_pubkeys_time_us, - i64 - ), ("executor_cache_us", timings.executor_cache_time_us, i64), ( "transaction_debug_keys_us", diff --git a/runtime/src/bank/partitioned_epoch_rewards/calculation.rs b/runtime/src/bank/partitioned_epoch_rewards/calculation.rs index d8a33afc4ee9bb..5835fefed4efd9 100644 --- a/runtime/src/bank/partitioned_epoch_rewards/calculation.rs +++ b/runtime/src/bank/partitioned_epoch_rewards/calculation.rs @@ -3,13 +3,13 @@ use { epoch_rewards_hasher::hash_rewards_into_partitions, Bank, CalculateRewardsAndDistributeVoteRewardsResult, CalculateValidatorRewardsResult, EpochRewardCalculateParamInfo, PartitionedRewardsCalculation, PartitionedStakeReward, - StakeRewardCalculation, VoteRewardsAccounts, VoteRewardsAccountsStorable, - REWARD_CALCULATION_NUM_BLOCKS, + PartitionedStakeRewards, StakeRewardCalculation, VoteRewardsAccounts, + VoteRewardsAccountsStorable, REWARD_CALCULATION_NUM_BLOCKS, }, crate::{ bank::{ - PrevEpochInflationRewards, RewardCalcTracer, RewardCalculationEvent, RewardsMetrics, - VoteReward, VoteRewards, + null_tracer, PrevEpochInflationRewards, RewardCalcTracer, RewardCalculationEvent, + RewardsMetrics, VoteReward, VoteRewards, }, inflation_rewards::{ points::{calculate_points, PointValue}, @@ -18,27 +18,84 @@ use { stake_account::StakeAccount, stakes::Stakes, }, - ahash::random_state::RandomState as AHashRandomState, - dashmap::DashMap, log::{debug, info}, rayon::{ - iter::{IntoParallelRefIterator, ParallelIterator}, + iter::{IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator}, ThreadPool, }, - solana_account::ReadableAccount, solana_clock::{Epoch, Slot}, - solana_measure::measure_us, + solana_measure::{measure::Measure, measure_us}, solana_pubkey::Pubkey, - solana_stake_interface::state::Delegation, + solana_stake_interface::{stake_history::StakeHistory, state::Delegation}, solana_sysvar::epoch_rewards::EpochRewards, - solana_vote::vote_account::VoteAccount, - solana_vote_program::vote_state::VoteStateVersions, - std::sync::{ - atomic::{AtomicU64, Ordering::Relaxed}, - Arc, + solana_vote::vote_account::VoteAccounts, + std::{ + ops::Add, + sync::{atomic::Ordering::Relaxed, Arc}, }, }; +#[derive(Debug)] +struct DelegationRewards { + stake_reward: PartitionedStakeReward, + vote_pubkey: Pubkey, + vote_reward: VoteReward, +} + +#[derive(Default)] +struct RewardsAccumulator { + vote_rewards: VoteRewards, + num_stake_rewards: usize, + total_stake_rewards_lamports: u64, +} + +impl RewardsAccumulator { + fn add_reward(&mut self, vote_pubkey: Pubkey, vote_reward: VoteReward, stakers_reward: u64) { + self.vote_rewards + .entry(vote_pubkey) + .and_modify(|dst_vote_reward| { + dst_vote_reward.vote_rewards = dst_vote_reward + .vote_rewards + .saturating_add(vote_reward.vote_rewards) + }) + .or_insert(vote_reward); + self.num_stake_rewards = self.num_stake_rewards.saturating_add(1); + self.total_stake_rewards_lamports = self + .total_stake_rewards_lamports + .saturating_add(stakers_reward); + } +} + +impl Add for RewardsAccumulator { + type Output = Self; + + fn add(self, rhs: Self) -> Self::Output { + // Check which instance has more vote rewards. Treat the bigger one + // as a destination, which is going to be extended. This way we make + // the reallocation as small as possible. + let (mut dst, src) = if self.vote_rewards.len() >= rhs.vote_rewards.len() { + (self, rhs) + } else { + (rhs, self) + }; + for (vote_pubkey, vote_reward) in src.vote_rewards { + dst.vote_rewards + .entry(vote_pubkey) + .and_modify(|dst_vote_reward: &mut VoteReward| { + dst_vote_reward.vote_rewards = dst_vote_reward + .vote_rewards + .saturating_add(vote_reward.vote_rewards) + }) + .or_insert(vote_reward); + } + dst.num_stake_rewards = dst.num_stake_rewards.saturating_add(src.num_stake_rewards); + dst.total_stake_rewards_lamports = dst + .total_stake_rewards_lamports + .saturating_add(src.total_stake_rewards_lamports); + dst + } +} + impl Bank { /// Begin the process of calculating and distributing rewards. /// This process can take multiple slots. @@ -119,17 +176,12 @@ impl Bank { let rewards_calculation = epoch_rewards_calculation_cache .entry(self.parent_hash) .or_insert_with(|| { - let calculation = self.calculate_rewards_for_partitioning( + Arc::new(self.calculate_rewards_for_partitioning( prev_epoch, reward_calc_tracer, thread_pool, metrics, - ); - info!( - "calculated rewards for epoch: {}, parent_slot: {}, parent_hash: {}", - self.epoch, self.parent_slot, self.parent_hash - ); - Arc::new(calculation) + )) }) .clone(); drop(epoch_rewards_calculation_cache); @@ -146,9 +198,7 @@ impl Bank { let total_vote_rewards = vote_account_rewards.total_vote_rewards_lamports; self.store_vote_accounts_partitioned(vote_account_rewards, metrics); - - // update reward history of JUST vote_rewards, stake_rewards is vec![] here - self.update_reward_history(vec![], vote_account_rewards); + self.update_vote_rewards(vote_account_rewards); let StakeRewardCalculation { stake_rewards, @@ -253,6 +303,11 @@ impl Bank { ) .unwrap_or_default(); + info!( + "calculated rewards for epoch: {}, parent_slot: {}, parent_hash: {}", + self.epoch, self.parent_slot, self.parent_hash + ); + PartitionedRewardsCalculation { vote_account_rewards, stake_rewards, @@ -311,8 +366,12 @@ impl Bank { // Use `EpochStakes` for vote accounts let leader_schedule_epoch = self.epoch_schedule().get_leader_schedule_epoch(self.slot()); - let cached_vote_accounts = self.epoch_stakes(leader_schedule_epoch) - .expect("calculation should always run after Bank::update_epoch_stakes(leader_schedule_epoch)") + let cached_vote_accounts = self + .epoch_stakes(leader_schedule_epoch) + .expect( + "calculation should always run after \ + Bank::update_epoch_stakes(leader_schedule_epoch)", + ) .stakes() .vote_accounts(); @@ -323,6 +382,70 @@ impl Bank { } } + fn redeem_delegation_rewards( + &self, + rewarded_epoch: Epoch, + stake_pubkey: &Pubkey, + stake_account: &StakeAccount, + point_value: &PointValue, + stake_history: &StakeHistory, + cached_vote_accounts: &VoteAccounts, + reward_calc_tracer: Option, + new_rate_activation_epoch: Option, + ) -> Option { + // curry closure to add the contextual stake_pubkey + let reward_calc_tracer = reward_calc_tracer.as_ref().map(|outer| { + // inner + move |inner_event: &_| { + outer(&RewardCalculationEvent::Staking(stake_pubkey, inner_event)) + } + }); + + let stake_pubkey = *stake_pubkey; + let vote_pubkey = stake_account.delegation().voter_pubkey; + let Some(vote_account) = cached_vote_accounts.get(&vote_pubkey) else { + debug!("could not find vote account {vote_pubkey} in cache"); + return None; + }; + let vote_state = vote_account.vote_state_view(); + let stake_state = stake_account.stake_state(); + + match redeem_rewards( + rewarded_epoch, + stake_state, + vote_state, + point_value, + stake_history, + reward_calc_tracer, + new_rate_activation_epoch, + ) { + Ok((stake_reward, vote_rewards, stake)) => { + let commission = vote_state.commission(); + let stake_reward = PartitionedStakeReward { + stake_pubkey, + stake, + stake_reward, + commission, + }; + let vote_account = vote_account.into(); + let vote_reward = VoteReward { + commission, + vote_account, + vote_rewards, + }; + Some(DelegationRewards { + stake_reward, + vote_pubkey, + vote_reward, + }) + } + Err(e) => { + debug!("redeem_rewards() failed for {stake_pubkey}: {e:?}"); + None + } + } + } + /// Calculates epoch rewards for stake/vote accounts /// Returns vote rewards, stake rewards, and the sum of all stake rewards in lamports fn calculate_stake_vote_rewards( @@ -341,106 +464,90 @@ impl Bank { } = reward_calculate_params; let new_warmup_cooldown_rate_epoch = self.new_warmup_cooldown_rate_epoch(); - let estimated_num_vote_accounts = cached_vote_accounts.len(); - let vote_account_rewards: VoteRewards = DashMap::with_capacity_and_hasher_and_shard_amount( - estimated_num_vote_accounts, - AHashRandomState::default(), - 1024, // shard amount - ); - let total_stake_rewards = AtomicU64::default(); - const ASSERT_STAKE_CACHE: bool = false; // Turn this on to assert that all vote accounts are in the cache - let (stake_rewards, measure_stake_rewards_us) = measure_us!(thread_pool.install(|| { + let mut measure_redeem_rewards = Measure::start("redeem-rewards"); + // For N stake delegations, where N is >1,000,000, we produce: + // * N stake rewards, + // * M vote rewards, where M is a number of stake nodes. Currently, way + // smaller number than 1,000,000. And we can expect it to always be + // significantly smaller than number of delegations. + // + // Producing the stake reward with rayon triggers a lot of + // (re)allocations. To avoid that, we allocate it at the start and + // pass `stake_rewards.spare_capacity_mut()` as one of iterators. + let mut stake_rewards = PartitionedStakeRewards::with_capacity(stake_delegations.len()); + let rewards_accumulator: RewardsAccumulator = thread_pool.install(|| { stake_delegations .par_iter() - .filter_map(|(stake_pubkey, stake_account)| { - // curry closure to add the contextual stake_pubkey - let reward_calc_tracer = reward_calc_tracer.as_ref().map(|outer| { - // inner - move |inner_event: &_| { - outer(&RewardCalculationEvent::Staking(stake_pubkey, inner_event)) - } - }); - - let stake_pubkey = **stake_pubkey; - let vote_pubkey = stake_account.delegation().voter_pubkey; - let vote_account_from_cache = cached_vote_accounts.get(&vote_pubkey); - if ASSERT_STAKE_CACHE && vote_account_from_cache.is_none() { - let account_from_db = self.get_account_with_fixed_root(&vote_pubkey); - if let Some(account_from_db) = account_from_db { - if VoteStateVersions::is_correct_size_and_initialized( - account_from_db.data(), - ) && VoteAccount::try_from(account_from_db.clone()).is_ok() - { - panic!( - "Vote account {} not found in cache, but found in db: {:?}", - vote_pubkey, account_from_db - ); - } - } - } - let vote_account = vote_account_from_cache?; - let vote_state_view = vote_account.vote_state_view(); - let mut stake_state = *stake_account.stake_state(); - - let redeemed = redeem_rewards( + .zip_eq(stake_rewards.spare_capacity_mut()) + .with_min_len(500) + .filter_map(|((stake_pubkey, stake_account), stake_reward_ref)| { + let maybe_reward_record = self.redeem_delegation_rewards( rewarded_epoch, - &mut stake_state, - vote_state_view, + stake_pubkey, + stake_account, &point_value, stake_history, + cached_vote_accounts, reward_calc_tracer.as_ref(), new_warmup_cooldown_rate_epoch, ); - - if let Ok((stakers_reward, voters_reward)) = redeemed { - let commission = vote_state_view.commission(); - - // track voter rewards - let mut voters_reward_entry = vote_account_rewards - .entry(vote_pubkey) - .or_insert(VoteReward { - commission, - vote_account: vote_account.into(), - vote_rewards: 0, - }); - - voters_reward_entry.vote_rewards = voters_reward_entry - .vote_rewards - .saturating_add(voters_reward); - - total_stake_rewards.fetch_add(stakers_reward, Relaxed); - - // Safe to unwrap because all stake_delegations are type - // StakeAccount, which will always only wrap - // a `StakeStateV2::Stake` variant. - let stake = stake_state.stake().unwrap(); - return Some(PartitionedStakeReward { - stake_pubkey, - stake_reward: stakers_reward, - stake, - commission, - }); - } else { - debug!( - "redeem_rewards() failed for {}: {:?}", - stake_pubkey, redeemed - ); - } - None + let (stake_reward, maybe_reward_record) = match maybe_reward_record { + Some(res) => { + let DelegationRewards { + stake_reward, + vote_pubkey, + vote_reward, + } = res; + let stakers_reward = stake_reward.stake_reward; + ( + Some(stake_reward), + Some((stakers_reward, vote_pubkey, vote_reward)), + ) + } + None => (None, None), + }; + // It's important that for every stake delegation, we write + // a value to the cell of the stake rewards vector, + // regardless of whether it's `Some` or `None` variant. + // This allows us to pre-allocate the vector with the known + // size and avoid re-allocations, which were the bottleneck + // in this path. + stake_reward_ref.write(stake_reward); + maybe_reward_record }) - .collect() - })); - let (vote_rewards, measure_vote_rewards_us) = - measure_us!(Self::calc_vote_accounts_to_store(vote_account_rewards)); - - metrics.redeem_rewards_us += measure_stake_rewards_us + measure_vote_rewards_us; + .fold( + RewardsAccumulator::default, + |mut rewards_accumulator, (stake_reward, vote_pubkey, vote_reward)| { + rewards_accumulator.add_reward(vote_pubkey, vote_reward, stake_reward); + rewards_accumulator + }, + ) + .reduce( + RewardsAccumulator::default, + |rewards_accumulator_a, rewards_accumulator_b| { + rewards_accumulator_a + rewards_accumulator_b + }, + ) + }); + let RewardsAccumulator { + vote_rewards, + num_stake_rewards, + total_stake_rewards_lamports, + } = rewards_accumulator; + // SAFETY: We initialized all the `stake_rewards` elements up to the capacity. + unsafe { + stake_rewards.assume_init(num_stake_rewards); + } + let vote_rewards = Self::calc_vote_accounts_to_store(vote_rewards); + measure_redeem_rewards.stop(); + metrics.redeem_rewards_us = measure_redeem_rewards.as_us(); ( vote_rewards, StakeRewardCalculation { stake_rewards: Arc::new(stake_rewards), - total_stake_rewards_lamports: total_stake_rewards.load(Relaxed), + total_stake_rewards_lamports, }, ) } @@ -490,22 +597,22 @@ impl Bank { (points > 0).then_some(PointValue { rewards, points }) } - /// If rewards are active, recalculates partitioned stake rewards and stores - /// a new Bank::epoch_reward_status. This method assumes that vote rewards + /// If rewards are still active, recalculates partitioned stake rewards and + /// updates Bank::epoch_reward_status. This method assumes that vote rewards /// have already been calculated and delivered, and *only* recalculates /// stake rewards - pub(in crate::bank) fn recalculate_partitioned_rewards( + pub(in crate::bank) fn recalculate_partitioned_rewards_if_active( &mut self, - reward_calc_tracer: Option, - thread_pool: &ThreadPool, - ) { + thread_pool_builder: F, + ) where + F: FnOnce() -> TP, + TP: std::borrow::Borrow, + { let epoch_rewards_sysvar = self.get_epoch_rewards_sysvar(); if epoch_rewards_sysvar.active { - let (stake_rewards, partition_indices) = self.recalculate_stake_rewards( - &epoch_rewards_sysvar, - reward_calc_tracer, - thread_pool, - ); + let thread_pool = thread_pool_builder(); + let (stake_rewards, partition_indices) = + self.recalculate_stake_rewards(&epoch_rewards_sysvar, thread_pool.borrow()); self.set_epoch_reward_status_distribution( epoch_rewards_sysvar.distribution_starting_block_height, stake_rewards, @@ -520,9 +627,8 @@ impl Bank { fn recalculate_stake_rewards( &self, epoch_rewards_sysvar: &EpochRewards, - reward_calc_tracer: Option, thread_pool: &ThreadPool, - ) -> (Arc>, Vec>) { + ) -> (Arc, Vec>) { assert!(epoch_rewards_sysvar.active); // If rewards are active, the rewarded epoch is always the immediately // preceding epoch. @@ -546,7 +652,7 @@ impl Bank { rewarded_epoch, point_value, thread_pool, - reward_calc_tracer, + null_tracer(), &mut RewardsMetrics::default(), // This is required, but not reporting anything at the moment ); drop(stakes); @@ -581,12 +687,15 @@ mod tests { stake_account::StakeAccount, stakes::Stakes, }, + agave_feature_set::FeatureSet, rayon::ThreadPoolBuilder, solana_account::{accounts_equal, state_traits::StateMut, ReadableAccount}, - solana_native_token::{sol_to_lamports, LAMPORTS_PER_SOL}, + solana_accounts_db::partitioned_rewards::PartitionedEpochRewardsConfig, + solana_native_token::LAMPORTS_PER_SOL, solana_reward_info::RewardType, solana_stake_interface::state::{Delegation, StakeStateV2}, - solana_vote_interface::state::VoteState, + solana_vote_interface::state::VoteStateV3, + solana_vote_program::vote_state, std::sync::{Arc, RwLockReadGuard}, }; @@ -669,10 +778,21 @@ mod tests { fn test_rewards_computation() { solana_logger::setup(); - let expected_num_delegations = 100; - let bank = create_default_reward_bank(expected_num_delegations, SLOTS_PER_EPOCH) - .0 - .bank; + // Delegations with sufficient stake to get rewards (2 SOL). + let delegations_with_rewards = 100; + // Delegations with insufficient stake (0.5 SOL). + let delegations_without_rewards = 10; + let stakes = (0..delegations_with_rewards) + .map(|_| 2_000_000_000) + .chain((0..delegations_without_rewards).map(|_| 500_000_000)) + .collect::>(); + let bank = create_reward_bank_with_specific_stakes( + stakes, + PartitionedEpochRewardsConfig::default().stake_account_stores_per_block, + SLOTS_PER_EPOCH, + ) + .0 + .bank; // Calculate rewards let thread_pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap(); @@ -706,7 +826,10 @@ mod tests { ); // assert that number of stake rewards matches - assert_eq!(stake_rewards.stake_rewards.len(), expected_num_delegations); + assert_eq!( + stake_rewards.stake_rewards.num_rewards(), + delegations_with_rewards + ); } #[test] @@ -741,7 +864,7 @@ mod tests { solana_logger::setup(); // bank with no rewards to distribute - let (genesis_config, _mint_keypair) = create_genesis_config(sol_to_lamports(1.0)); + let (genesis_config, _mint_keypair) = create_genesis_config(LAMPORTS_PER_SOL); let bank = Bank::new_for_tests(&genesis_config); let thread_pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap(); @@ -803,7 +926,7 @@ mod tests { .load_slow_with_fixed_root(&bank.ancestors, vote_pubkey) .unwrap() .0; - let vote_state = VoteState::deserialize(vote_account.data()).unwrap(); + let vote_state = VoteStateV3::deserialize(vote_account.data()).unwrap(); assert_eq!( vote_rewards_accounts.accounts_with_rewards.len(), @@ -827,7 +950,7 @@ mod tests { ); assert_eq!(vote_pubkey_from_result, vote_pubkey); - assert_eq!(stake_reward_calculation.stake_rewards.len(), 1); + assert_eq!(stake_reward_calculation.stake_rewards.num_rewards(), 1); let expected_reward = { let stake_reward = 8_400_000_000_000; let stake_state: StakeStateV2 = stake_account.state().unwrap(); @@ -841,7 +964,15 @@ mod tests { commission, } }; - assert_eq!(stake_reward_calculation.stake_rewards[0], expected_reward); + assert_eq!( + stake_reward_calculation + .stake_rewards + .get(0) + .unwrap() + .as_ref() + .unwrap(), + &expected_reward + ); } fn compare_stake_rewards( @@ -850,10 +981,7 @@ mod tests { ) { for (i, partition) in received_stake_rewards.iter().enumerate() { let expected_partition = &expected_stake_rewards[i]; - assert_eq!(partition.len(), expected_partition.len()); - for reward in partition { - assert!(expected_partition.iter().any(|x| x == reward)); - } + assert_eq!(partition, expected_partition); } } @@ -887,7 +1015,7 @@ mod tests { let epoch_rewards_sysvar = bank.get_epoch_rewards_sysvar(); let (recalculated_rewards, recalculated_partition_indices) = - bank.recalculate_stake_rewards(&epoch_rewards_sysvar, null_tracer(), &thread_pool); + bank.recalculate_stake_rewards(&epoch_rewards_sysvar, &thread_pool); let recalculated_rewards = build_partitioned_stake_rewards(&recalculated_rewards, &recalculated_partition_indices); @@ -915,7 +1043,7 @@ mod tests { let epoch_rewards_sysvar = bank.get_epoch_rewards_sysvar(); let (recalculated_rewards, recalculated_partition_indices) = - bank.recalculate_stake_rewards(&epoch_rewards_sysvar, null_tracer(), &thread_pool); + bank.recalculate_stake_rewards(&epoch_rewards_sysvar, &thread_pool); // Note that recalculated rewards are **NOT** the same as expected // rewards, which were calculated before any distribution. This is @@ -933,7 +1061,7 @@ mod tests { ); // First partition has already been distributed, so recalculation // returns 0 rewards - assert_eq!(recalculated_rewards[0].len(), 0); + assert_eq!(recalculated_rewards[0].num_rewards(), 0); let starting_index = (bank.block_height() + 1 - epoch_rewards_sysvar.distribution_starting_block_height) as usize; @@ -990,7 +1118,7 @@ mod tests { build_partitioned_stake_rewards(&expected_stake_rewards, &expected_partition_indices); let (recalculated_rewards, recalculated_partition_indices) = - bank.recalculate_stake_rewards(&epoch_rewards_sysvar, null_tracer(), &thread_pool); + bank.recalculate_stake_rewards(&epoch_rewards_sysvar, &thread_pool); let recalculated_rewards = build_partitioned_stake_rewards(&recalculated_rewards, &recalculated_partition_indices); @@ -1005,7 +1133,7 @@ mod tests { assert!(!epoch_rewards_sysvar.active); // Should panic let _recalculated_rewards = - bank.recalculate_stake_rewards(&epoch_rewards_sysvar, null_tracer(), &thread_pool); + bank.recalculate_stake_rewards(&epoch_rewards_sysvar, &thread_pool); } #[test] @@ -1046,7 +1174,7 @@ mod tests { &mut rewards_metrics, ); - bank.recalculate_partitioned_rewards(null_tracer(), &thread_pool); + bank.recalculate_partitioned_rewards_if_active(|| &thread_pool); let EpochRewardStatus::Active(EpochRewardPhase::Distribution( StartBlockHeightAndPartitionedRewards { distribution_starting_block_height, @@ -1084,7 +1212,7 @@ mod tests { let mut bank = Bank::new_from_parent(Arc::new(bank), &Pubkey::default(), SLOTS_PER_EPOCH + 1); - bank.recalculate_partitioned_rewards(null_tracer(), &thread_pool); + bank.recalculate_partitioned_rewards_if_active(|| &thread_pool); let EpochRewardStatus::Active(EpochRewardPhase::Distribution( StartBlockHeightAndPartitionedRewards { distribution_starting_block_height, @@ -1113,7 +1241,7 @@ mod tests { assert_eq!(expected_stake_rewards.len(), recalculated_rewards.len()); // First partition has already been distributed, so recalculation // returns 0 rewards - assert_eq!(recalculated_rewards[0].len(), 0); + assert_eq!(recalculated_rewards[0].num_rewards(), 0); let epoch_rewards_sysvar = bank.get_epoch_rewards_sysvar(); let starting_index = (bank.block_height() + 1 - epoch_rewards_sysvar.distribution_starting_block_height) @@ -1126,8 +1254,146 @@ mod tests { // Advance to last distribution slot let mut bank = Bank::new_from_parent(Arc::new(bank), &Pubkey::default(), SLOTS_PER_EPOCH + 2); - - bank.recalculate_partitioned_rewards(null_tracer(), &thread_pool); + bank.recalculate_partitioned_rewards_if_active(|| &thread_pool); assert_eq!(bank.epoch_reward_status, EpochRewardStatus::Inactive); } + + #[test] + fn test_initialize_after_snapshot_restore() { + let expected_num_stake_rewards = 3; + let num_rewards_per_block = 2; + // Distribute 4 rewards over 2 blocks + let stakes = vec![ + 100_000_000, // under min delegation + 2_000_000_000, // valid delegation + 3_000_000_000, // valid delegation + 4_000_000_000, // valid delegation + ]; + let (RewardBank { bank, .. }, _) = create_reward_bank_with_specific_stakes( + stakes, + num_rewards_per_block, + SLOTS_PER_EPOCH - 1, + ); + + // Advance to next epoch boundary + let new_slot = bank.slot() + 1; + let mut bank = Bank::new_from_parent(bank, &Pubkey::default(), new_slot); + + let EpochRewardStatus::Active(EpochRewardPhase::Calculation(calculation_status)) = + bank.epoch_reward_status.clone() + else { + panic!("{:?} not active calculation", bank.epoch_reward_status); + }; + + // Reset feature set to default, to simulate snapshot restore + bank.feature_set = Arc::new(FeatureSet::default()); + + // Run post snapshot restore initialization which should first apply + // active features and then recalculate rewards + let thread_pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap(); + bank.initialize_after_snapshot_restore(|| &thread_pool); + + let EpochRewardStatus::Active(EpochRewardPhase::Distribution(distribution_status)) = + bank.epoch_reward_status.clone() + else { + panic!("{:?} not active distribution", bank.epoch_reward_status); + }; + + assert_eq!( + calculation_status.all_stake_rewards, + distribution_status.all_stake_rewards + ); + assert_eq!( + calculation_status.distribution_starting_block_height, + distribution_status.distribution_starting_block_height + ); + assert_eq!( + calculation_status.all_stake_rewards.num_rewards(), + expected_num_stake_rewards + ); + } + + #[test] + fn test_reward_accumulator() { + let mut accumulator1 = RewardsAccumulator::default(); + let mut accumulator2 = RewardsAccumulator::default(); + + let vote_pubkey_a = Pubkey::new_unique(); + let vote_account_a = + vote_state::create_account(&vote_pubkey_a, &Pubkey::new_unique(), 20, 100); + let vote_pubkey_b = Pubkey::new_unique(); + let vote_account_b = + vote_state::create_account(&vote_pubkey_b, &Pubkey::new_unique(), 20, 100); + let vote_pubkey_c = Pubkey::new_unique(); + let vote_account_c = + vote_state::create_account(&vote_pubkey_c, &Pubkey::new_unique(), 20, 100); + + accumulator1.add_reward( + vote_pubkey_a, + VoteReward { + vote_account: vote_account_a.clone(), + commission: 10, + vote_rewards: 50, + }, + 50, + ); + accumulator1.add_reward( + vote_pubkey_b, + VoteReward { + vote_account: vote_account_b.clone(), + commission: 10, + vote_rewards: 50, + }, + 50, + ); + accumulator2.add_reward( + vote_pubkey_b, + VoteReward { + vote_account: vote_account_b, + commission: 10, + vote_rewards: 30, + }, + 30, + ); + accumulator2.add_reward( + vote_pubkey_c, + VoteReward { + vote_account: vote_account_c, + commission: 10, + vote_rewards: 50, + }, + 50, + ); + + assert_eq!(accumulator1.num_stake_rewards, 2); + assert_eq!(accumulator1.total_stake_rewards_lamports, 100); + let vote_reward_a_1 = accumulator1.vote_rewards.get(&vote_pubkey_a).unwrap(); + assert_eq!(vote_reward_a_1.commission, 10); + assert_eq!(vote_reward_a_1.vote_rewards, 50); + let vote_reward_b_1 = accumulator1.vote_rewards.get(&vote_pubkey_b).unwrap(); + assert_eq!(vote_reward_b_1.commission, 10); + assert_eq!(vote_reward_b_1.vote_rewards, 50); + + let vote_reward_b_2 = accumulator2.vote_rewards.get(&vote_pubkey_b).unwrap(); + assert_eq!(vote_reward_b_2.commission, 10); + assert_eq!(vote_reward_b_2.vote_rewards, 30); + let vote_reward_c_2 = accumulator2.vote_rewards.get(&vote_pubkey_c).unwrap(); + assert_eq!(vote_reward_c_2.commission, 10); + assert_eq!(vote_reward_c_2.vote_rewards, 50); + + let accumulator = accumulator1 + accumulator2; + + assert_eq!(accumulator.num_stake_rewards, 4); + assert_eq!(accumulator.total_stake_rewards_lamports, 180); + let vote_reward_a = accumulator.vote_rewards.get(&vote_pubkey_a).unwrap(); + assert_eq!(vote_reward_a.commission, 10); + assert_eq!(vote_reward_a.vote_rewards, 50); + let vote_reward_b = accumulator.vote_rewards.get(&vote_pubkey_b).unwrap(); + assert_eq!(vote_reward_b.commission, 10); + // sum of the vote rewards from both accumulators + assert_eq!(vote_reward_b.vote_rewards, 80); + let vote_reward_c = accumulator.vote_rewards.get(&vote_pubkey_c).unwrap(); + assert_eq!(vote_reward_c.commission, 10); + assert_eq!(vote_reward_c.vote_rewards, 50); + } } diff --git a/runtime/src/bank/partitioned_epoch_rewards/distribution.rs b/runtime/src/bank/partitioned_epoch_rewards/distribution.rs index a2a635db2377e6..ed64d110eb10c3 100644 --- a/runtime/src/bank/partitioned_epoch_rewards/distribution.rs +++ b/runtime/src/bank/partitioned_epoch_rewards/distribution.rs @@ -93,7 +93,8 @@ impl Bank { else { // We should never get here. unreachable!( - "epoch rewards status is not in distribution phase, but we are trying to distribute rewards" + "epoch rewards status is not in distribution phase, but we are trying to \ + distribute rewards" ); }; @@ -162,7 +163,7 @@ impl Bank { let metrics = RewardsStoreMetrics { pre_capitalization, post_capitalization: self.capitalization(), - total_stake_accounts_count: partition_rewards.all_stake_rewards.len(), + total_stake_accounts_count: partition_rewards.all_stake_rewards.num_rewards(), total_num_partitions: partition_rewards.partition_indices.len(), partition_index, store_stake_accounts_us, @@ -268,8 +269,12 @@ impl Bank { .unwrap_or_else(|| { panic!( "partition reward out of bound: {index} >= {}", - partition_rewards.all_stake_rewards.len() + partition_rewards.all_stake_rewards.total_len() ) + }) + .as_ref() + .unwrap_or_else(|| { + panic!("partition reward {index} is empty"); }); let stake_pubkey = partitioned_stake_reward.stake_pubkey; let reward_amount = partitioned_stake_reward.stake_reward; @@ -306,7 +311,7 @@ mod tests { bank::{ partitioned_epoch_rewards::{ epoch_rewards_hasher::hash_rewards_into_partitions, tests::convert_rewards, - REWARD_CALCULATION_NUM_BLOCKS, + PartitionedStakeRewards, REWARD_CALCULATION_NUM_BLOCKS, }, tests::create_genesis_config, }, @@ -338,8 +343,8 @@ mod tests { let expected_num = 100; let stake_rewards = (0..expected_num) - .map(|_| PartitionedStakeReward::new_random()) - .collect::>(); + .map(|_| Some(PartitionedStakeReward::new_random())) + .collect::(); let partition_indices = hash_rewards_into_partitions(&stake_rewards, &Hash::new_from_array([1; 32]), 2); @@ -362,8 +367,8 @@ mod tests { let expected_num = 1; let stake_rewards = (0..expected_num) - .map(|_| PartitionedStakeReward::new_random()) - .collect::>(); + .map(|_| Some(PartitionedStakeReward::new_random())) + .collect::(); let partition_indices = hash_rewards_into_partitions( &stake_rewards, @@ -387,7 +392,7 @@ mod tests { bank.set_epoch_reward_status_distribution( bank.block_height() + REWARD_CALCULATION_NUM_BLOCKS, - Arc::new(vec![]), + Arc::new(PartitionedStakeRewards::default()), vec![], ); @@ -744,11 +749,11 @@ mod tests { .map(|_| StakeReward::new_random()) .collect::>(); populate_starting_stake_accounts_from_stake_rewards(&bank, &stake_rewards); - let converted_rewards: Vec<_> = convert_rewards(stake_rewards); + let converted_rewards = convert_rewards(stake_rewards); let expected_total = converted_rewards - .iter() - .map(|stake_reward| stake_reward.stake_reward) + .enumerated_rewards_iter() + .map(|(_, stake_reward)| stake_reward.stake_reward) .sum::(); let partitioned_rewards = StartBlockHeightAndPartitionedRewards { @@ -771,7 +776,7 @@ mod tests { let partitioned_rewards = StartBlockHeightAndPartitionedRewards { distribution_starting_block_height: bank.block_height() + REWARD_CALCULATION_NUM_BLOCKS, - all_stake_rewards: Arc::new(vec![]), + all_stake_rewards: Arc::new(PartitionedStakeRewards::default()), partition_indices: vec![vec![]], }; diff --git a/runtime/src/bank/partitioned_epoch_rewards/epoch_rewards_hasher.rs b/runtime/src/bank/partitioned_epoch_rewards/epoch_rewards_hasher.rs index df81921afe9a88..e834e217362a8a 100644 --- a/runtime/src/bank/partitioned_epoch_rewards/epoch_rewards_hasher.rs +++ b/runtime/src/bank/partitioned_epoch_rewards/epoch_rewards_hasher.rs @@ -1,17 +1,17 @@ use { - crate::bank::partitioned_epoch_rewards::PartitionedStakeReward, itertools::enumerate, + crate::bank::partitioned_epoch_rewards::PartitionedStakeRewards, solana_epoch_rewards_hasher::EpochRewardsHasher, solana_hash::Hash, }; pub(in crate::bank::partitioned_epoch_rewards) fn hash_rewards_into_partitions( - stake_rewards: &[PartitionedStakeReward], + stake_rewards: &PartitionedStakeRewards, parent_blockhash: &Hash, num_partitions: usize, ) -> Vec> { let hasher = EpochRewardsHasher::new(num_partitions, parent_blockhash); let mut indices = vec![vec![]; num_partitions]; - for (i, reward) in enumerate(stake_rewards) { + for (i, reward) in stake_rewards.enumerated_rewards_iter() { // clone here so the hasher's state is re-used on each call to `hash_address_to_partition`. // This prevents us from re-hashing the seed each time. // The clone is explicit (as opposed to an implicit copy) so it is clear this is intended. @@ -43,8 +43,8 @@ mod tests { let expected_num = 12345; let stake_rewards = (0..expected_num) - .map(|_| PartitionedStakeReward::new_random()) - .collect::>(); + .map(|_| Some(PartitionedStakeReward::new_random())) + .collect::(); let partition_indices = hash_rewards_into_partitions(&stake_rewards, &Hash::default(), 5); let total_num_after_hash_partition: usize = partition_indices.iter().map(|x| x.len()).sum(); @@ -55,7 +55,7 @@ mod tests { #[test] fn test_hash_rewards_into_partitions_empty() { - let stake_rewards = vec![]; + let stake_rewards = PartitionedStakeRewards::default(); let num_partitions = 5; let partition_indices = @@ -79,8 +79,8 @@ mod tests { // simulate 40K - 1 rewards, the expected num of credit blocks should be 10. let expected_num = 40959; let stake_rewards = (0..expected_num) - .map(|_| PartitionedStakeReward::new_random()) - .collect::>(); + .map(|_| Some(PartitionedStakeReward::new_random())) + .collect::(); let partition_indices = hash_rewards_into_partitions(&stake_rewards, &Hash::new_from_array([1; 32]), 10); diff --git a/runtime/src/bank/partitioned_epoch_rewards/mod.rs b/runtime/src/bank/partitioned_epoch_rewards/mod.rs index c81ac1c91cb91b..bff6d7760268dc 100644 --- a/runtime/src/bank/partitioned_epoch_rewards/mod.rs +++ b/runtime/src/bank/partitioned_epoch_rewards/mod.rs @@ -20,7 +20,7 @@ use { solana_reward_info::RewardInfo, solana_stake_interface::state::{Delegation, Stake}, solana_vote::vote_account::VoteAccounts, - std::sync::Arc, + std::{mem::MaybeUninit, sync::Arc}, }; /// Number of blocks for reward calculation and storing vote accounts. @@ -39,14 +39,79 @@ pub(crate) struct PartitionedStakeReward { pub commission: u8, } -type PartitionedStakeRewards = Vec; +/// A vector of stake rewards. +#[derive(Debug, Default, PartialEq)] +pub(crate) struct PartitionedStakeRewards { + /// Inner vector. + rewards: Vec>, + /// Number of stake rewards. + num_rewards: usize, +} + +impl PartitionedStakeRewards { + pub(crate) fn with_capacity(capacity: usize) -> Self { + let rewards = Vec::with_capacity(capacity); + Self { + rewards, + num_rewards: 0, + } + } + + /// Number of stake rewards. + pub(crate) fn num_rewards(&self) -> usize { + self.num_rewards + } + + /// Total length, including both `Some` and `None` elements. + pub(crate) fn total_len(&self) -> usize { + self.rewards.len() + } + + pub(crate) fn get(&self, index: usize) -> Option<&Option> { + self.rewards.get(index) + } + + pub(crate) fn enumerated_rewards_iter( + &self, + ) -> impl Iterator { + self.rewards + .iter() + .enumerate() + .filter_map(|(index, reward)| reward.as_ref().map(|reward| (index, reward))) + } + + fn spare_capacity_mut(&mut self) -> &mut [MaybeUninit>] { + self.rewards.spare_capacity_mut() + } + + unsafe fn assume_init(&mut self, num_stake_rewards: usize) { + self.rewards.set_len(self.rewards.capacity()); + self.num_rewards = num_stake_rewards; + } +} + +#[cfg(test)] +impl FromIterator> for PartitionedStakeRewards { + fn from_iter>>(iter: T) -> Self { + let mut len_some: usize = 0; + let rewards = Vec::from_iter(iter.into_iter().inspect(|reward| { + if reward.is_some() { + len_some = len_some.saturating_add(1); + } + })); + Self { + rewards, + num_rewards: len_some, + } + } +} #[derive(Debug, Clone, PartialEq)] pub(crate) struct StartBlockHeightAndRewards { /// the block height of the slot at which rewards distribution began pub(crate) distribution_starting_block_height: u64, /// calculated epoch rewards before partitioning - pub(crate) all_stake_rewards: Arc>, + pub(crate) all_stake_rewards: Arc, } #[derive(Debug, Clone, PartialEq)] @@ -55,7 +120,7 @@ pub(crate) struct StartBlockHeightAndPartitionedRewards { pub(crate) distribution_starting_block_height: u64, /// calculated epoch rewards pending distribution - pub(crate) all_stake_rewards: Arc>, + pub(crate) all_stake_rewards: Arc, /// indices of calculated epoch rewards per partition, outer Vec is by /// partition (one partition per block), inner Vec is the indices for one @@ -196,7 +261,7 @@ pub(super) struct CalculateRewardsAndDistributeVoteRewardsResult { /// vote accounts pub(super) point_value: PointValue, /// stake rewards that still need to be distributed - pub(super) stake_rewards: Arc>, + pub(super) stake_rewards: Arc, } pub(crate) type StakeRewards = Vec; @@ -234,7 +299,7 @@ impl Bank { pub(crate) fn set_epoch_reward_status_calculation( &mut self, distribution_starting_block_height: u64, - stake_rewards: Arc>, + stake_rewards: Arc, ) { self.epoch_reward_status = EpochRewardStatus::Active(EpochRewardPhase::Calculation(StartBlockHeightAndRewards { @@ -246,7 +311,7 @@ impl Bank { pub(crate) fn set_epoch_reward_status_distribution( &mut self, distribution_starting_block_height: u64, - all_stake_rewards: Arc>, + all_stake_rewards: Arc, partition_indices: Vec>, ) { self.epoch_reward_status = EpochRewardStatus::Active(EpochRewardPhase::Distribution( @@ -277,7 +342,7 @@ impl Bank { &self, rewards: &PartitionedStakeRewards, ) -> u64 { - let total_stake_accounts = rewards.len(); + let total_stake_accounts = rewards.num_rewards(); if self.epoch_schedule.warmup && self.epoch < self.first_normal_epoch() { 1 } else { @@ -321,9 +386,8 @@ mod tests { solana_native_token::LAMPORTS_PER_SOL, solana_reward_info::RewardType, solana_signer::Signer, - solana_stake_interface::{error::StakeError, state::StakeStateV2}, + solana_stake_interface::state::StakeStateV2, solana_system_transaction as system_transaction, - solana_transaction::Transaction, solana_vote::vote_transaction, solana_vote_interface::state::{VoteStateVersions, MAX_LOCKOUT_HISTORY}, solana_vote_program::vote_state::{self, TowerSync}, @@ -352,9 +416,9 @@ mod tests { } pub fn build_partitioned_stake_rewards( - stake_rewards: &[PartitionedStakeReward], + stake_rewards: &PartitionedStakeRewards, partition_indices: &[Vec], - ) -> Vec> { + ) -> Vec { partition_indices .iter() .map(|partition_index| { @@ -362,8 +426,8 @@ mod tests { // that belong to this partition partition_index .iter() - .map(|&index| stake_rewards[index].clone()) - .collect::>() + .map(|&index| stake_rewards.get(index).unwrap().clone()) + .collect::() }) .collect::>() } @@ -373,7 +437,7 @@ mod tests { ) -> PartitionedStakeRewards { stake_rewards .into_iter() - .map(|stake_reward| PartitionedStakeReward::maybe_from(&stake_reward).unwrap()) + .map(|stake_reward| Some(PartitionedStakeReward::maybe_from(&stake_reward).unwrap())) .collect() } @@ -475,14 +539,12 @@ mod tests { accounts_db_config.partitioned_epoch_rewards_config = PartitionedEpochRewardsConfig::new_for_test(stake_account_stores_per_block); - let bank = Bank::new_with_paths( + let bank = Bank::new_from_genesis( &genesis_config, Arc::new(RuntimeConfig::default()), Vec::new(), None, - None, - false, - Some(accounts_db_config), + accounts_db_config, None, Some(Pubkey::new_unique()), Arc::default(), @@ -501,10 +563,10 @@ mod tests { if let Some(v) = vote_state.as_mut() { vote_state::process_slot_vote_unchecked(v, i as u64) } - let versioned = VoteStateVersions::Current(Box::new(vote_state.take().unwrap())); + let versioned = VoteStateVersions::V3(Box::new(vote_state.take().unwrap())); vote_state::to(&versioned, &mut vote_account).unwrap(); match versioned { - VoteStateVersions::Current(v) => { + VoteStateVersions::V3(v) => { vote_state = Some(*v); } _ => panic!("Has to be of type Current"), @@ -547,8 +609,8 @@ mod tests { let expected_num = 100; let stake_rewards = (0..expected_num) - .map(|_| PartitionedStakeReward::new_random()) - .collect::>(); + .map(|_| Some(PartitionedStakeReward::new_random())) + .collect::(); let partition_indices = vec![(0..expected_num).collect()]; @@ -576,14 +638,12 @@ mod tests { accounts_db_config.partitioned_epoch_rewards_config = PartitionedEpochRewardsConfig::new_for_test(10); - let bank = Bank::new_with_paths( + let bank = Bank::new_from_genesis( &genesis_config, Arc::new(RuntimeConfig::default()), Vec::new(), None, - None, - false, - Some(accounts_db_config), + accounts_db_config, None, Some(Pubkey::new_unique()), Arc::default(), @@ -599,8 +659,8 @@ mod tests { |num_stakes: u64, expected_num_reward_distribution_blocks: u64| { // Given the short epoch, i.e. 32 slots, we should cap the number of reward distribution blocks to 32/10 = 3. let stake_rewards = (0..num_stakes) - .map(|_| PartitionedStakeReward::new_random()) - .collect::>(); + .map(|_| Some(PartitionedStakeReward::new_random())) + .collect::(); assert_eq!( bank.get_reward_distribution_num_blocks(&stake_rewards), @@ -637,8 +697,8 @@ mod tests { // Given 8k rewards, it will take 2 blocks to credit all the rewards let expected_num = 8192; let stake_rewards = (0..expected_num) - .map(|_| PartitionedStakeReward::new_random()) - .collect::>(); + .map(|_| Some(PartitionedStakeReward::new_random())) + .collect::(); assert_eq!(bank.get_reward_distribution_num_blocks(&stake_rewards), 2); } @@ -650,7 +710,33 @@ mod tests { let (genesis_config, _mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); let bank = Bank::new_for_tests(&genesis_config); - let rewards = vec![]; + let rewards = PartitionedStakeRewards::default(); + assert_eq!(bank.get_reward_distribution_num_blocks(&rewards), 1); + } + + /// Test get_reward_distribution_num_blocks with `None` elements in the + /// partitioned stake rewards. `None` elements can occur if for any stake + /// delegation: + /// * there is no payout or if any deserved payout is < 1 lamport + /// * corresponding vote account was not found in cache and accounts-db + #[test] + fn test_get_reward_distribution_num_blocks_none() { + let rewards_all = 8192; + let expected_rewards_some = 6144; + let rewards = (0..rewards_all) + .map(|i| { + if i % 4 == 0 { + None + } else { + Some(PartitionedStakeReward::new_random()) + } + }) + .collect::(); + assert_eq!(rewards.rewards.len(), rewards_all); + assert_eq!(rewards.num_rewards(), expected_rewards_some); + + let (genesis_config, _mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); + let bank = Bank::new_for_tests(&genesis_config); assert_eq!(bank.get_reward_distribution_num_blocks(&rewards), 1); } @@ -858,13 +944,9 @@ mod tests { } } - /// Test that program execution that attempts to mutate a stake account - /// incorrectly should fail during reward period. A credit should succeed, - /// but a withdrawal should fail. + /// Test that lamports can be sent to stake accounts regardless of rewards period. #[test] - fn test_program_execution_restricted_for_stake_account_in_reward_period() { - use solana_transaction_error::TransactionError::InstructionError; - + fn test_rewards_period_system_transfer() { let validator_vote_keypairs = ValidatorVoteKeypairs::new_rand(); let validator_keypairs = vec![&validator_vote_keypairs]; let GenesisConfigInfo { @@ -939,36 +1021,6 @@ mod tests { // Credits should always succeed assert!(system_result.is_ok()); - // Attempt to withdraw from new stake account to the mint - let stake_ix = solana_stake_interface::instruction::withdraw( - &new_stake_address, - &new_stake_address, - &mint_keypair.pubkey(), - transfer_amount, - None, - ); - let stake_tx = Transaction::new_signed_with_payer( - &[stake_ix], - Some(&mint_keypair.pubkey()), - &[&mint_keypair, &new_stake_signer], - bank.last_blockhash(), - ); - let stake_result = bank.process_transaction(&stake_tx); - - if slot == num_slots_in_epoch { - // When the bank is at the beginning of the new epoch, i.e. slot - // 32, StakeError::EpochRewardsActive should be thrown for - // actions like StakeInstruction::Withdraw - assert_eq!( - stake_result, - Err(InstructionError(0, StakeError::EpochRewardsActive.into())) - ); - } else { - // When the bank is outside of reward interval, the withdraw - // transaction should not be affected and will succeed. - assert!(stake_result.is_ok()); - } - // Push a dummy blockhash, so that the latest_blockhash() for the transfer transaction in each // iteration are different. Otherwise, all those transactions will be the same, and will not be // executed by the bank except the first one. diff --git a/runtime/src/bank/partitioned_epoch_rewards/sysvar.rs b/runtime/src/bank/partitioned_epoch_rewards/sysvar.rs index 2c3378f83daf16..aaddeb1826b421 100644 --- a/runtime/src/bank/partitioned_epoch_rewards/sysvar.rs +++ b/runtime/src/bank/partitioned_epoch_rewards/sysvar.rs @@ -12,7 +12,7 @@ impl Bank { if let Some(account) = self.get_account(&sysvar::epoch_rewards::id()) { let epoch_rewards: sysvar::epoch_rewards::EpochRewards = from_account(&account).unwrap(); - info!("{prefix} epoch_rewards sysvar: {:?}", epoch_rewards); + info!("{prefix} epoch_rewards sysvar: {epoch_rewards:?}"); } else { info!("{prefix} epoch_rewards sysvar: none"); } diff --git a/runtime/src/bank/serde_snapshot.rs b/runtime/src/bank/serde_snapshot.rs index 511f7c8ca00264..2670122a944a03 100644 --- a/runtime/src/bank/serde_snapshot.rs +++ b/runtime/src/bank/serde_snapshot.rs @@ -33,7 +33,7 @@ mod tests { mem, ops::RangeFull, path::Path, - sync::{atomic::Ordering, Arc}, + sync::{atomic::Ordering, Arc, OnceLock}, }, tempfile::TempDir, test_case::{test_case, test_matrix}, @@ -160,9 +160,8 @@ mod tests { &RuntimeConfig::default(), None, None, - None, false, - Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), + ACCOUNTS_DB_CONFIG_FOR_TESTING, None, Arc::default(), ) @@ -207,6 +206,7 @@ mod tests { total_stake: 42, node_id_to_vote_accounts: Arc::::default(), epoch_authorized_voters: Arc::::default(), + bls_pubkey_to_rank_map: OnceLock::new(), }, ); assert_eq!(bank.epoch_stakes.len(), 3); @@ -246,9 +246,8 @@ mod tests { &RuntimeConfig::default(), None, None, - None, false, - Some(solana_accounts_db::accounts_db::ACCOUNTS_DB_CONFIG_FOR_TESTING), + ACCOUNTS_DB_CONFIG_FOR_TESTING, None, Arc::default(), ) @@ -294,7 +293,7 @@ mod tests { .unwrap(); // Deserialize - let (dbank, _) = snapshot_bank_utils::bank_from_snapshot_archives( + let dbank = snapshot_bank_utils::bank_from_snapshot_archives( &[accounts_dir], bank_snapshots_dir.path(), &snapshot_archive_info, @@ -303,11 +302,10 @@ mod tests { &RuntimeConfig::default(), None, None, - None, false, false, false, - Some(solana_accounts_db::accounts_db::ACCOUNTS_DB_CONFIG_FOR_TESTING), + ACCOUNTS_DB_CONFIG_FOR_TESTING, None, Arc::default(), ) @@ -352,7 +350,7 @@ mod tests { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "CCsPFzrwkgWmiYh25nwNvBSRdgiUW45Rh6jGh1XGcNZt") + frozen_abi(digest = "HxmFy4D1VFmq91rp4PDAMsunMnwxqdeQ2CNyaNkStnEw") )] #[derive(Serialize)] pub struct BankAbiTestWrapper { diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 56d725c2ac930a..81149df6acdc45 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -46,6 +46,7 @@ use { BankId, Epoch, Slot, UnixTimestamp, DEFAULT_TICKS_PER_SLOT, INITIAL_RENT_EPOCH, MAX_PROCESSING_AGE, MAX_RECENT_BLOCKHASHES, }, + solana_cluster_type::ClusterType, solana_compute_budget::{ compute_budget::ComputeBudget, compute_budget_limits::ComputeBudgetLimits, }, @@ -58,7 +59,7 @@ use { solana_feature_gate_interface::{self as feature, Feature}, solana_fee_calculator::FeeRateGovernor, solana_fee_structure::FeeStructure, - solana_genesis_config::{ClusterType, GenesisConfig}, + solana_genesis_config::GenesisConfig, solana_hash::Hash, solana_instruction::{error::InstructionError, AccountMeta, Instruction}, solana_keypair::{keypair_from_seed, Keypair}, @@ -70,7 +71,7 @@ use { solana_message::{ compiled_instruction::CompiledInstruction, Message, MessageHeader, SanitizedMessage, }, - solana_native_token::{sol_to_lamports, LAMPORTS_PER_SOL}, + solana_native_token::LAMPORTS_PER_SOL, solana_nonce::{self as nonce, state::DurableNonce}, solana_packet::PACKET_DATA_SIZE, solana_poh_config::PohConfig, @@ -83,7 +84,8 @@ use { solana_rent::Rent, solana_reward_info::RewardType, solana_sdk_ids::{ - bpf_loader, bpf_loader_upgradeable, incinerator, native_loader, secp256k1_program, + bpf_loader, bpf_loader_upgradeable, ed25519_program, incinerator, native_loader, + secp256k1_program, }, solana_sha256_hasher::hash, solana_signature::Signature, @@ -99,6 +101,7 @@ use { transaction_commit_result::TransactionCommitResultExtensions, transaction_execution_result::ExecutedTransaction, }, + solana_svm_timings::ExecuteTimings, solana_svm_transaction::svm_message::SVMMessage, solana_system_interface::{ error::SystemError, @@ -107,7 +110,6 @@ use { MAX_PERMITTED_DATA_LENGTH, }, solana_system_transaction as system_transaction, solana_sysvar as sysvar, - solana_timings::ExecuteTimings, solana_transaction::{ sanitized::SanitizedTransaction, Transaction, TransactionVerificationMode, }, @@ -116,13 +118,13 @@ use { solana_vote_program::{ vote_instruction, vote_state::{ - self, create_account_with_authorized, BlockTimestamp, VoteInit, VoteState, - VoteStateVersions, MAX_LOCKOUT_HISTORY, + self, create_account_with_authorized, BlockTimestamp, VoteAuthorize, VoteInit, + VoteStateV3, VoteStateVersions, MAX_LOCKOUT_HISTORY, }, }, spl_generic_token::token, std::{ - collections::{HashMap, HashSet}, + collections::{BTreeMap, HashMap, HashSet}, convert::TryInto, fs::File, io::Read, @@ -628,7 +630,7 @@ impl Bank { // in practice. let account = self.get_account_with_fixed_root(vote_pubkey)?; if account.owner() == &solana_vote_program - && VoteState::deserialize(account.data()).is_ok() + && VoteStateV3::deserialize(account.data()).is_ok() { vote_accounts_cache_miss_count.fetch_add(1, Relaxed); } @@ -730,11 +732,11 @@ where if let Some(v) = vote_state.as_mut() { vote_state::process_slot_vote_unchecked(v, i as u64) } - let versioned = VoteStateVersions::Current(Box::new(vote_state.take().unwrap())); + let versioned = VoteStateVersions::V3(Box::new(vote_state.take().unwrap())); vote_state::to(&versioned, &mut vote_account).unwrap(); bank0.store_account_and_update_capitalization(&vote_id, &vote_account); match versioned { - VoteStateVersions::Current(v) => { + VoteStateVersions::V3(v) => { vote_state = Some(*v); } _ => panic!("Has to be of type Current"), @@ -880,11 +882,11 @@ fn do_test_bank_update_rewards_determinism() -> u64 { if let Some(v) = vote_state.as_mut() { vote_state::process_slot_vote_unchecked(v, i as u64) } - let versioned = VoteStateVersions::Current(Box::new(vote_state.take().unwrap())); + let versioned = VoteStateVersions::V3(Box::new(vote_state.take().unwrap())); vote_state::to(&versioned, &mut vote_account).unwrap(); bank.store_account_and_update_capitalization(&vote_id, &vote_account); match versioned { - VoteStateVersions::Current(v) => { + VoteStateVersions::V3(v) => { vote_state = Some(*v); } _ => panic!("Has to be of type Current"), @@ -946,7 +948,6 @@ impl VerifyAccountsHashConfig { fn default_for_test() -> Self { Self { require_rooted_bank: false, - run_in_background: false, } } } @@ -958,7 +959,7 @@ fn test_purge_empty_accounts() { // so we have to stop at various points and restart to actively test. for pass in 0..3 { solana_logger::setup(); - let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee(sol_to_lamports(1.)); + let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee(LAMPORTS_PER_SOL); let amount = genesis_config.rent.minimum_balance(0); let (mut bank, bank_forks) = Bank::new_for_tests(&genesis_config).wrap_with_bank_forks_for_tests(); @@ -1020,7 +1021,7 @@ fn test_purge_empty_accounts() { if pass == 0 { add_root_and_flush_write_cache(&bank0); - assert!(bank0.verify_accounts_hash(VerifyAccountsHashConfig::default_for_test(), None)); + assert!(bank0.verify_accounts(VerifyAccountsHashConfig::default_for_test(), None)); continue; } @@ -1029,14 +1030,14 @@ fn test_purge_empty_accounts() { bank0.squash(); add_root_and_flush_write_cache(&bank0); if pass == 1 { - assert!(bank0.verify_accounts_hash(VerifyAccountsHashConfig::default_for_test(), None)); + assert!(bank0.verify_accounts(VerifyAccountsHashConfig::default_for_test(), None)); continue; } bank1.freeze(); bank1.squash(); add_root_and_flush_write_cache(&bank1); - assert!(bank1.verify_accounts_hash(VerifyAccountsHashConfig::default_for_test(), None)); + assert!(bank1.verify_accounts(VerifyAccountsHashConfig::default_for_test(), None)); // keypair should have 0 tokens on both forks assert_eq!(bank0.get_account(&keypair.pubkey()), None); @@ -1044,13 +1045,13 @@ fn test_purge_empty_accounts() { bank1.clean_accounts_for_tests(); - assert!(bank1.verify_accounts_hash(VerifyAccountsHashConfig::default_for_test(), None)); + assert!(bank1.verify_accounts(VerifyAccountsHashConfig::default_for_test(), None)); } } #[test] fn test_two_payments_to_one_party() { - let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); + let (genesis_config, mint_keypair) = create_genesis_config(LAMPORTS_PER_SOL); let pubkey = solana_pubkey::new_rand(); let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let amount = genesis_config.rent.minimum_balance(0); @@ -1067,7 +1068,7 @@ fn test_two_payments_to_one_party() { #[test] fn test_one_source_two_tx_one_batch() { - let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee(sol_to_lamports(1.)); + let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee(LAMPORTS_PER_SOL); let key1 = solana_pubkey::new_rand(); let key2 = solana_pubkey::new_rand(); let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); @@ -1084,7 +1085,7 @@ fn test_one_source_two_tx_one_batch() { assert_eq!(res[1], Err(TransactionError::AccountInUse)); assert_eq!( bank.get_balance(&mint_keypair.pubkey()), - sol_to_lamports(1.) - amount + LAMPORTS_PER_SOL - amount ); assert_eq!(bank.get_balance(&key1), amount); assert_eq!(bank.get_balance(&key2), 0); @@ -1096,7 +1097,7 @@ fn test_one_source_two_tx_one_batch() { #[test] fn test_one_tx_two_out_atomic_fail() { - let amount = sol_to_lamports(1.); + let amount = LAMPORTS_PER_SOL; let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee_no_rent(amount); let key1 = solana_pubkey::new_rand(); let key2 = solana_pubkey::new_rand(); @@ -1118,7 +1119,7 @@ fn test_one_tx_two_out_atomic_fail() { #[test] fn test_one_tx_two_out_atomic_pass() { - let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee(sol_to_lamports(1.)); + let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee(LAMPORTS_PER_SOL); let key1 = solana_pubkey::new_rand(); let key2 = solana_pubkey::new_rand(); let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); @@ -1132,7 +1133,7 @@ fn test_one_tx_two_out_atomic_pass() { bank.process_transaction(&tx).unwrap(); assert_eq!( bank.get_balance(&mint_keypair.pubkey()), - sol_to_lamports(1.) - (2 * amount) + LAMPORTS_PER_SOL - (2 * amount) ); assert_eq!(bank.get_balance(&key1), amount); assert_eq!(bank.get_balance(&key2), amount); @@ -1188,7 +1189,7 @@ fn test_account_not_found() { #[test] fn test_insufficient_funds() { - let mint_amount = sol_to_lamports(1.); + let mint_amount = LAMPORTS_PER_SOL; let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee(mint_amount); let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let pubkey = solana_pubkey::new_rand(); @@ -1216,7 +1217,7 @@ fn test_insufficient_funds() { #[test] fn test_executed_transaction_count_post_bank_transaction_count_fix() { - let mint_amount = sol_to_lamports(1.); + let mint_amount = LAMPORTS_PER_SOL; let (genesis_config, mint_keypair) = create_genesis_config(mint_amount); let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let pubkey = solana_pubkey::new_rand(); @@ -1260,7 +1261,7 @@ fn test_executed_transaction_count_post_bank_transaction_count_fix() { #[test] fn test_transfer_to_newb() { solana_logger::setup(); - let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); + let (genesis_config, mint_keypair) = create_genesis_config(LAMPORTS_PER_SOL); let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let amount = genesis_config.rent.minimum_balance(0); let pubkey = solana_pubkey::new_rand(); @@ -1271,7 +1272,7 @@ fn test_transfer_to_newb() { #[test] fn test_transfer_to_sysvar() { solana_logger::setup(); - let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); + let (genesis_config, mint_keypair) = create_genesis_config(LAMPORTS_PER_SOL); let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let amount = genesis_config.rent.minimum_balance(0); @@ -1696,19 +1697,19 @@ fn test_bank_blockhash_compute_unit_fee_structure() { #[test] fn test_debits_before_credits() { let (genesis_config, mint_keypair) = - create_genesis_config_no_tx_fee_no_rent(sol_to_lamports(2.)); + create_genesis_config_no_tx_fee_no_rent(2 * LAMPORTS_PER_SOL); let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let keypair = Keypair::new(); let tx0 = system_transaction::transfer( &keypair, &mint_keypair.pubkey(), - sol_to_lamports(1.), + LAMPORTS_PER_SOL, genesis_config.hash(), ); let tx1 = system_transaction::transfer( &mint_keypair, &keypair.pubkey(), - sol_to_lamports(2.), + 2 * LAMPORTS_PER_SOL, genesis_config.hash(), ); let txs = vec![tx0, tx1]; @@ -1828,7 +1829,7 @@ fn test_readonly_accounts(relax_intrabatch_account_locks: bool) { #[test] fn test_interleaving_locks() { - let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); + let (genesis_config, mint_keypair) = create_genesis_config(LAMPORTS_PER_SOL); let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let alice = Keypair::new(); let bob = Keypair::new(); @@ -1951,6 +1952,150 @@ fn test_load_and_execute_commit_transactions_fees_only() { loaded_accounts_count: 2, loaded_accounts_data_size: nonce_size as u32, }, + fee_payer_post_balance: genesis_config.rent.minimum_balance(0) - 1 - 5000, + })] + ); +} + +#[test] +fn test_load_and_execute_commit_transactions_failure() { + let GenesisConfigInfo { + mut genesis_config, .. + } = genesis_utils::create_genesis_config(100 * LAMPORTS_PER_SOL); + genesis_config.rent = Rent::default(); + genesis_config.fee_rate_governor = FeeRateGovernor::new(5000, 0); + let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let bank = Bank::new_from_parent( + bank, + &Pubkey::new_unique(), + genesis_config.epoch_schedule.get_first_slot_in_epoch(1), + ); + + let fee_payer = Pubkey::new_unique(); + let starting_balance = 2 * genesis_config.rent.minimum_balance(0) + 10_000; + bank.store_account( + &fee_payer, + &AccountSharedData::new(starting_balance, 0, &system_program::id()), + ); + + let recipient = Pubkey::new_unique(); + let transfer_amount = genesis_config.rent.minimum_balance(0); + + // Invoke transaction with valid system-program instruction followed by a + // failing instruction to trigger a failed execution. + // The system transfer is used to modify the loaded account state to verify the + // fee payer post balance is correct. + let transaction = Transaction::new_unsigned(Message::new_with_blockhash( + &[ + system_instruction::transfer(&fee_payer, &recipient, transfer_amount), + Instruction::new_with_bincode(system_program::id(), &(), vec![]), + ], + Some(&fee_payer), + &bank.last_blockhash(), + )); + + let batch = bank.prepare_batch_for_tests(vec![transaction]); + let commit_results = bank + .load_execute_and_commit_transactions( + &batch, + MAX_PROCESSING_AGE, + ExecutionRecordingConfig::new_single_setting(true), + &mut ExecuteTimings::default(), + None, + ) + .0; + + assert_eq!( + commit_results, + vec![Ok(CommittedTransaction { + status: Err(TransactionError::InstructionError( + 1, + InstructionError::InvalidInstructionData + )), + log_messages: Some(vec![ + "Program 11111111111111111111111111111111 invoke [1]".to_string(), + "Program 11111111111111111111111111111111 success".to_string(), + "Program 11111111111111111111111111111111 invoke [1]".to_string(), + "Program 11111111111111111111111111111111 failed: invalid instruction data" + .to_string() + ]), + inner_instructions: Some(vec![vec![], vec![]]), + return_data: None, + executed_units: 300, + fee_details: FeeDetails::new(5000, 0), + loaded_account_stats: TransactionLoadedAccountsStats { + loaded_accounts_count: 3, + loaded_accounts_data_size: 142, // size of system account (initially recipient does not exist) + }, + fee_payer_post_balance: starting_balance - 5000, + })] + ); +} + +#[test] +fn test_load_and_execute_commit_transactions_success() { + let GenesisConfigInfo { + mut genesis_config, .. + } = genesis_utils::create_genesis_config(100 * LAMPORTS_PER_SOL); + genesis_config.rent = Rent::default(); + genesis_config.fee_rate_governor = FeeRateGovernor::new(5000, 0); + let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let bank = Bank::new_from_parent( + bank, + &Pubkey::new_unique(), + genesis_config.epoch_schedule.get_first_slot_in_epoch(1), + ); + + let fee_payer = Pubkey::new_unique(); + let starting_balance = 2 * genesis_config.rent.minimum_balance(0) + 10_000; + bank.store_account( + &fee_payer, + &AccountSharedData::new(starting_balance, 0, &system_program::id()), + ); + + let recipient = Pubkey::new_unique(); + let transfer_amount = genesis_config.rent.minimum_balance(0); + + // Invoke transaction with valid system-program instruction to trigger + // a successful execution + let transaction = Transaction::new_unsigned(Message::new_with_blockhash( + &[system_instruction::transfer( + &fee_payer, + &recipient, + transfer_amount, + )], + Some(&fee_payer), + &bank.last_blockhash(), + )); + + let batch = bank.prepare_batch_for_tests(vec![transaction]); + let commit_results = bank + .load_execute_and_commit_transactions( + &batch, + MAX_PROCESSING_AGE, + ExecutionRecordingConfig::new_single_setting(true), + &mut ExecuteTimings::default(), + None, + ) + .0; + + assert_eq!( + commit_results, + vec![Ok(CommittedTransaction { + status: Ok(()), + log_messages: Some(vec![ + "Program 11111111111111111111111111111111 invoke [1]".to_string(), + "Program 11111111111111111111111111111111 success".to_string(), + ]), + inner_instructions: Some(vec![vec![]]), + return_data: None, + executed_units: 150, + fee_details: FeeDetails::new(5000, 0), + loaded_account_stats: TransactionLoadedAccountsStats { + loaded_accounts_count: 3, + loaded_accounts_data_size: 142, // size of system account (initially recipient does not exist) + }, + fee_payer_post_balance: starting_balance - 5000 - transfer_amount, })] ); } @@ -2042,7 +2187,7 @@ fn test_bank_invalid_account_index() { #[test] fn test_bank_pay_to_self() { - let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee(sol_to_lamports(1.)); + let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee(LAMPORTS_PER_SOL); let key1 = Keypair::new(); let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let amount = genesis_config.rent.minimum_balance(0); @@ -2083,7 +2228,7 @@ fn test_bank_parents() { /// Verifies that transactions are dropped if they have already been processed #[test] fn test_tx_already_processed() { - let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); + let (genesis_config, mint_keypair) = create_genesis_config(LAMPORTS_PER_SOL); let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let key1 = Keypair::new(); @@ -2117,7 +2262,7 @@ fn test_tx_already_processed() { /// Verifies that last ids and status cache are correctly referenced from parent #[test] fn test_bank_parent_already_processed() { - let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); + let (genesis_config, mint_keypair) = create_genesis_config(LAMPORTS_PER_SOL); let key1 = Keypair::new(); let (parent, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let amount = genesis_config.rent.minimum_balance(0); @@ -2135,7 +2280,7 @@ fn test_bank_parent_already_processed() { /// Verifies that last ids and accounts are correctly referenced from parent #[test] fn test_bank_parent_account_spend() { - let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee(sol_to_lamports(1.0)); + let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee(LAMPORTS_PER_SOL); let key1 = Keypair::new(); let key2 = Keypair::new(); let (parent, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); @@ -2152,8 +2297,7 @@ fn test_bank_parent_account_spend() { #[test] fn test_bank_hash_internal_state() { - let (genesis_config, mint_keypair) = - create_genesis_config_no_tx_fee_no_rent(sol_to_lamports(1.)); + let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee_no_rent(LAMPORTS_PER_SOL); let (bank0, _bank_forks0) = Bank::new_with_bank_forks_for_tests(&genesis_config); let (bank1, bank_forks1) = Bank::new_with_bank_forks_for_tests(&genesis_config); @@ -2180,14 +2324,14 @@ fn test_bank_hash_internal_state() { bank2.transfer(amount, &mint_keypair, &pubkey2).unwrap(); bank2.squash(); bank2.force_flush_accounts_cache(); - assert!(bank2.verify_accounts_hash(VerifyAccountsHashConfig::default_for_test(), None)); + assert!(bank2.verify_accounts(VerifyAccountsHashConfig::default_for_test(), None)); } #[test] fn test_bank_hash_internal_state_verify() { for pass in 0..4 { let (genesis_config, mint_keypair) = - create_genesis_config_no_tx_fee_no_rent(sol_to_lamports(1.)); + create_genesis_config_no_tx_fee_no_rent(LAMPORTS_PER_SOL); let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let amount = genesis_config.rent.minimum_balance(0); @@ -2214,7 +2358,7 @@ fn test_bank_hash_internal_state_verify() { // we later modify bank 2, so this flush is destructive to the test bank2.freeze(); add_root_and_flush_write_cache(&bank2); - assert!(bank2.verify_accounts_hash(VerifyAccountsHashConfig::default_for_test(), None)); + assert!(bank2.verify_accounts(VerifyAccountsHashConfig::default_for_test(), None)); } let bank3 = new_bank_from_parent_with_bank_forks( &bank_forks, @@ -2225,7 +2369,7 @@ fn test_bank_hash_internal_state_verify() { assert_eq!(bank0_state, bank0.hash_internal_state()); if pass == 0 { // this relies on us having set bank2's accounts hash in the pass==0 if above - assert!(bank2.verify_accounts_hash(VerifyAccountsHashConfig::default_for_test(), None)); + assert!(bank2.verify_accounts(VerifyAccountsHashConfig::default_for_test(), None)); continue; } if pass == 1 { @@ -2234,7 +2378,7 @@ fn test_bank_hash_internal_state_verify() { // Doing so throws an assert. So, we can't flush 3 until 2 is flushed. bank3.freeze(); add_root_and_flush_write_cache(&bank3); - assert!(bank3.verify_accounts_hash(VerifyAccountsHashConfig::default_for_test(), None)); + assert!(bank3.verify_accounts(VerifyAccountsHashConfig::default_for_test(), None)); continue; } @@ -2243,7 +2387,7 @@ fn test_bank_hash_internal_state_verify() { bank2.freeze(); // <-- keep freeze() *outside* `if pass == 2 {}` if pass == 2 { add_root_and_flush_write_cache(&bank2); - assert!(bank2.verify_accounts_hash(VerifyAccountsHashConfig::default_for_test(), None)); + assert!(bank2.verify_accounts(VerifyAccountsHashConfig::default_for_test(), None)); // Verifying the accounts lt hash is only intended to be called at startup, and // normally in the background. Since here we're *not* at startup, and doing it @@ -2258,7 +2402,7 @@ fn test_bank_hash_internal_state_verify() { bank3.freeze(); add_root_and_flush_write_cache(&bank3); - assert!(bank3.verify_accounts_hash(VerifyAccountsHashConfig::default_for_test(), None)); + assert!(bank3.verify_accounts(VerifyAccountsHashConfig::default_for_test(), None)); } } @@ -2273,7 +2417,7 @@ fn test_verify_hash_unfrozen() { fn test_verify_snapshot_bank() { solana_logger::setup(); let pubkey = solana_pubkey::new_rand(); - let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); + let (genesis_config, mint_keypair) = create_genesis_config(LAMPORTS_PER_SOL); let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); bank.transfer( genesis_config.rent.minimum_balance(0), @@ -2294,7 +2438,7 @@ fn test_verify_snapshot_bank() { #[test] fn test_bank_hash_same_transactions_different_fork() { solana_logger::setup(); - let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); + let (genesis_config, mint_keypair) = create_genesis_config(LAMPORTS_PER_SOL); let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); bank0.freeze(); @@ -2339,7 +2483,7 @@ fn test_hash_internal_state_genesis() { // of hash_internal_state #[test] fn test_hash_internal_state_order() { - let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); + let (genesis_config, mint_keypair) = create_genesis_config(LAMPORTS_PER_SOL); let amount = genesis_config.rent.minimum_balance(0); let (bank0, _bank_forks0) = Bank::new_with_bank_forks_for_tests(&genesis_config); let (bank1, _bank_forks1) = Bank::new_with_bank_forks_for_tests(&genesis_config); @@ -2358,7 +2502,7 @@ fn test_hash_internal_state_order() { #[test] fn test_hash_internal_state_error() { solana_logger::setup(); - let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); + let (genesis_config, mint_keypair) = create_genesis_config(LAMPORTS_PER_SOL); let amount = genesis_config.rent.minimum_balance(0); let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let key0 = solana_pubkey::new_rand(); @@ -2367,7 +2511,7 @@ fn test_hash_internal_state_error() { // Transfer will error but still take a fee assert!(bank - .transfer(sol_to_lamports(1.), &mint_keypair, &key0) + .transfer(LAMPORTS_PER_SOL, &mint_keypair, &key0) .is_err()); assert_ne!(orig, bank.hash_internal_state()); @@ -2399,7 +2543,7 @@ fn test_bank_hash_internal_state_squash() { #[test] fn test_bank_squash() { solana_logger::setup(); - let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee(sol_to_lamports(2.)); + let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee(2 * LAMPORTS_PER_SOL); let key1 = Keypair::new(); let key2 = Keypair::new(); let (parent, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); @@ -2470,7 +2614,7 @@ fn test_bank_squash() { #[test] fn test_bank_get_account_in_parent_after_squash() { - let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); + let (genesis_config, mint_keypair) = create_genesis_config(LAMPORTS_PER_SOL); let (parent, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let amount = genesis_config.rent.minimum_balance(0); @@ -2488,7 +2632,7 @@ fn test_bank_get_account_in_parent_after_squash() { #[test] fn test_bank_get_account_in_parent_after_squash2() { solana_logger::setup(); - let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); + let (genesis_config, mint_keypair) = create_genesis_config(LAMPORTS_PER_SOL); let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let amount = genesis_config.rent.minimum_balance(0); @@ -2571,7 +2715,7 @@ fn test_bank_get_account_in_parent_after_squash2() { fn test_bank_get_account_modified_since_parent_with_fixed_root() { let pubkey = solana_pubkey::new_rand(); - let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); + let (genesis_config, mint_keypair) = create_genesis_config(LAMPORTS_PER_SOL); let amount = genesis_config.rent.minimum_balance(0); let (bank1, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); bank1.transfer(amount, &mint_keypair, &pubkey).unwrap(); @@ -2907,7 +3051,7 @@ fn test_bank_get_slots_in_epoch() { #[test] fn test_is_delta_true() { - let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.0)); + let (genesis_config, mint_keypair) = create_genesis_config(LAMPORTS_PER_SOL); let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let key1 = Keypair::new(); let tx_transfer_mint_to_1 = system_transaction::transfer( @@ -2931,7 +3075,7 @@ fn test_is_delta_true() { #[test] fn test_is_empty() { - let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.0)); + let (genesis_config, mint_keypair) = create_genesis_config(LAMPORTS_PER_SOL); let (bank0, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let key1 = Keypair::new(); @@ -2951,7 +3095,7 @@ fn test_is_empty() { #[test] fn test_bank_inherit_tx_count() { - let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.0)); + let (genesis_config, mint_keypair) = create_genesis_config(LAMPORTS_PER_SOL); let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); // Bank 1 @@ -3055,7 +3199,7 @@ fn test_bank_vote_accounts() { }, 10, vote_instruction::CreateVoteAccountConfig { - space: VoteStateVersions::vote_state_size_of(true) as u64, + space: VoteStateV3::size_of() as u64, ..vote_instruction::CreateVoteAccountConfig::default() }, ); @@ -3094,7 +3238,18 @@ fn test_bank_cloned_stake_delegations() { 123_000_000_000, ); genesis_config.rent = Rent::default(); + + for (pubkey, account) in + solana_program_binaries::by_id(&solana_stake_program::id(), &genesis_config.rent) + .unwrap() + .into_iter() + { + genesis_config.add_account(pubkey, account); + } + let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + bank.squash(); + let bank = Bank::new_from_parent(bank, &Pubkey::new_unique(), 1); let stake_delegations = bank.stakes_cache.stakes().stake_delegations().clone(); assert_eq!(stake_delegations.len(), 1); // bootstrap validator has @@ -3102,7 +3257,7 @@ fn test_bank_cloned_stake_delegations() { let (vote_balance, stake_balance) = { let rent = &bank.rent_collector().rent; - let vote_rent_exempt_reserve = rent.minimum_balance(VoteState::size_of()); + let vote_rent_exempt_reserve = rent.minimum_balance(VoteStateV3::size_of()); let stake_rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); let minimum_delegation = solana_stake_program::get_minimum_delegation( bank.feature_set @@ -3126,7 +3281,7 @@ fn test_bank_cloned_stake_delegations() { }, vote_balance, vote_instruction::CreateVoteAccountConfig { - space: VoteStateVersions::vote_state_size_of(true) as u64, + space: VoteStateV3::size_of() as u64, ..vote_instruction::CreateVoteAccountConfig::default() }, ); @@ -3409,7 +3564,7 @@ fn test_add_builtin() { declare_process_instruction!(MockBuiltin, 1, |invoke_context| { let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; - let program_id = instruction_context.get_last_program_key(transaction_context)?; + let program_id = instruction_context.get_program_key()?; if mock_vote_program_id() != *program_id { return Err(InstructionError::IncorrectProgramId); } @@ -3431,7 +3586,7 @@ fn test_add_builtin() { }, 1, vote_instruction::CreateVoteAccountConfig { - space: VoteStateVersions::vote_state_size_of(true) as u64, + space: VoteStateV3::size_of() as u64, ..vote_instruction::CreateVoteAccountConfig::default() }, ); @@ -3478,7 +3633,7 @@ fn test_add_duplicate_static_program() { }, 1, vote_instruction::CreateVoteAccountConfig { - space: VoteStateVersions::vote_state_size_of(true) as u64, + space: VoteStateV3::size_of() as u64, ..vote_instruction::CreateVoteAccountConfig::default() }, ); @@ -3556,14 +3711,12 @@ fn test_add_instruction_processor_for_existing_unrelated_accounts() { continue; } - bank.transaction_processor.add_builtin( - &bank, + bank.add_builtin( vote_id, "mock_program1", ProgramCacheEntry::new_builtin(0, 0, MockBuiltin::vm), ); - bank.transaction_processor.add_builtin( - &bank, + bank.add_builtin( stake_id, "mock_program2", ProgramCacheEntry::new_builtin(0, 0, MockBuiltin::vm), @@ -3705,7 +3858,8 @@ fn test_banks_leak() { let pid = std::process::id(); #[cfg(not(target_os = "linux"))] error!( - "\nYou can run this to watch RAM:\n while read -p 'banks: '; do echo $(( $(ps -o vsize= -p {})/$REPLY));done", pid + "\nYou can run this to watch RAM:\n while read -p 'banks: '; do echo $(( $(ps -o vsize= \ + -p {pid})/$REPLY));done" ); loop { num_banks += 1; @@ -3729,7 +3883,7 @@ fn test_banks_leak() { } #[cfg(not(target_os = "linux"))] { - error!("{} banks, sleeping for 5 sec", num_banks); + error!("{num_banks} banks, sleeping for 5 sec"); std::thread::sleep(Duration::from_secs(5)); } } @@ -4178,11 +4332,11 @@ fn test_nonce_authority() { let bad_nonce_authority = bad_nonce_authority_keypair.pubkey(); let custodian_account = bank.get_account(&custodian_pubkey).unwrap(); - debug!("alice: {}", alice_pubkey); - debug!("custodian: {}", custodian_pubkey); - debug!("nonce: {}", nonce_pubkey); + debug!("alice: {alice_pubkey}"); + debug!("custodian: {custodian_pubkey}"); + debug!("nonce: {nonce_pubkey}"); debug!("nonce account: {:?}", bank.get_account(&nonce_pubkey)); - debug!("cust: {:?}", custodian_account); + debug!("cust: {custodian_account:?}"); let nonce_hash = get_nonce_blockhash(&bank, &nonce_pubkey).unwrap(); for _ in 0..MAX_RECENT_BLOCKHASHES + 1 { @@ -4199,7 +4353,7 @@ fn test_nonce_authority() { &[&custodian_keypair, &bad_nonce_authority_keypair], nonce_hash, ); - debug!("{:?}", nonce_tx); + debug!("{nonce_tx:?}"); let initial_custodian_balance = custodian_account.lamports(); assert_eq!( bank.process_transaction(&nonce_tx), @@ -4237,9 +4391,9 @@ fn test_nonce_payer() { let custodian_pubkey = custodian_keypair.pubkey(); let nonce_pubkey = nonce_keypair.pubkey(); - debug!("alice: {}", alice_pubkey); - debug!("custodian: {}", custodian_pubkey); - debug!("nonce: {}", nonce_pubkey); + debug!("alice: {alice_pubkey}"); + debug!("custodian: {custodian_pubkey}"); + debug!("nonce: {nonce_pubkey}"); debug!("nonce account: {:?}", bank.get_account(&nonce_pubkey)); debug!("cust: {:?}", bank.get_account(&custodian_pubkey)); let nonce_hash = get_nonce_blockhash(&bank, &nonce_pubkey).unwrap(); @@ -4258,7 +4412,7 @@ fn test_nonce_payer() { &[&custodian_keypair, &nonce_keypair], nonce_hash, ); - debug!("{:?}", nonce_tx); + debug!("{nonce_tx:?}"); assert_eq!( bank.process_transaction(&nonce_tx), Err(TransactionError::InstructionError( @@ -4303,9 +4457,9 @@ fn test_nonce_payer_tx_wide_cap() { let custodian_pubkey = custodian_keypair.pubkey(); let nonce_pubkey = nonce_keypair.pubkey(); - debug!("alice: {}", alice_pubkey); - debug!("custodian: {}", custodian_pubkey); - debug!("nonce: {}", nonce_pubkey); + debug!("alice: {alice_pubkey}"); + debug!("custodian: {custodian_pubkey}"); + debug!("nonce: {nonce_pubkey}"); debug!("nonce account: {:?}", bank.get_account(&nonce_pubkey)); debug!("cust: {:?}", bank.get_account(&custodian_pubkey)); let nonce_hash = get_nonce_blockhash(&bank, &nonce_pubkey).unwrap(); @@ -4324,7 +4478,7 @@ fn test_nonce_payer_tx_wide_cap() { &[&custodian_keypair, &nonce_keypair], nonce_hash, ); - debug!("{:?}", nonce_tx); + debug!("{nonce_tx:?}"); assert_eq!( bank.process_transaction(&nonce_tx), @@ -4682,16 +4836,16 @@ fn test_transaction_with_duplicate_accounts_in_instruction() { let instruction_data = instruction_context.get_instruction_data(); let lamports = u64::from_le_bytes(instruction_data.try_into().unwrap()); instruction_context - .try_borrow_instruction_account(transaction_context, 2)? + .try_borrow_instruction_account(2)? .checked_sub_lamports(lamports)?; instruction_context - .try_borrow_instruction_account(transaction_context, 1)? + .try_borrow_instruction_account(1)? .checked_add_lamports(lamports)?; instruction_context - .try_borrow_instruction_account(transaction_context, 0)? + .try_borrow_instruction_account(0)? .checked_sub_lamports(lamports)?; instruction_context - .try_borrow_instruction_account(transaction_context, 1)? + .try_borrow_instruction_account(1)? .checked_add_lamports(lamports)?; Ok(()) }); @@ -4699,7 +4853,7 @@ fn test_transaction_with_duplicate_accounts_in_instruction() { let from_pubkey = solana_pubkey::new_rand(); let to_pubkey = solana_pubkey::new_rand(); let dup_pubkey = from_pubkey; - let from_account = AccountSharedData::new(sol_to_lamports(100.), 1, &mock_program_id); + let from_account = AccountSharedData::new(100 * LAMPORTS_PER_SOL, 1, &mock_program_id); let to_account = AccountSharedData::new(0, 1, &mock_program_id); bank.store_account(&from_pubkey, &from_account); bank.store_account(&to_pubkey, &to_account); @@ -4710,7 +4864,7 @@ fn test_transaction_with_duplicate_accounts_in_instruction() { AccountMeta::new(dup_pubkey, false), ]; let instruction = - Instruction::new_with_bincode(mock_program_id, &sol_to_lamports(10.), account_metas); + Instruction::new_with_bincode(mock_program_id, &(10 * LAMPORTS_PER_SOL), account_metas); let tx = Transaction::new_signed_with_payer( &[instruction], Some(&mint_keypair.pubkey()), @@ -4720,8 +4874,8 @@ fn test_transaction_with_duplicate_accounts_in_instruction() { let result = bank.process_transaction(&tx); assert_eq!(result, Ok(())); - assert_eq!(bank.get_balance(&from_pubkey), sol_to_lamports(80.)); - assert_eq!(bank.get_balance(&to_pubkey), sol_to_lamports(20.)); + assert_eq!(bank.get_balance(&from_pubkey), 80 * LAMPORTS_PER_SOL); + assert_eq!(bank.get_balance(&to_pubkey), 20 * LAMPORTS_PER_SOL); } #[test] @@ -4794,7 +4948,7 @@ fn test_account_ids_after_program_ids() { let result = bank.process_transaction(&tx); assert_eq!(result, Ok(())); let account = bank.get_account(&solana_vote_program::id()).unwrap(); - info!("account: {:?}", account); + info!("account: {account:?}"); assert!(account.executable()); } @@ -4988,8 +5142,7 @@ fn test_fuzz_instructions() { .map(|i| { let key = solana_pubkey::new_rand(); let name = format!("program{i:?}"); - bank.transaction_processor.add_builtin( - &bank, + bank.add_builtin( key, name.as_str(), ProgramCacheEntry::new_builtin(0, 0, MockBuiltin::vm), @@ -5129,11 +5282,11 @@ fn test_fuzz_instructions() { assert!(account.executable()); assert_eq!(account.data(), name); } - info!("result: {:?}", result); + info!("result: {result:?}"); let result_key = format!("{result:?}"); *results.entry(result_key).or_insert(0) += 1; } - info!("results: {:?}", results); + info!("results: {results:?}"); } // DEVELOPERS: This test is intended to ensure that the bank hash remains @@ -5143,24 +5296,27 @@ fn test_fuzz_instructions() { // added feature. #[test] fn test_bank_hash_consistency() { - let account = AccountSharedData::new(1_000_000_000_000, 0, &system_program::id()); - let mut genesis_config = GenesisConfig::new(&[(Pubkey::from([42; 32]), account)], &[]); - // Override the creation time to ensure bank hash consistency - genesis_config.creation_time = 0; - genesis_config.cluster_type = ClusterType::MainnetBeta; + let genesis_config = GenesisConfig { + // Override the creation time to ensure bank hash consistency + creation_time: 0, + accounts: BTreeMap::from([( + Pubkey::from([42; 32]), + Account::new(1_000_000_000_000, 0, &system_program::id()), + )]), + cluster_type: ClusterType::MainnetBeta, + ..GenesisConfig::default() + }; // Set the feature set to all enabled so that we detect any inconsistencies // in the hash computation that may arise from feature set changes let feature_set = FeatureSet::all_enabled(); - let mut bank = Arc::new(Bank::new_with_paths( + let mut bank = Arc::new(Bank::new_from_genesis( &genesis_config, Arc::new(RuntimeConfig::default()), vec![], None, - None, - false, - Some(BankTestConfig::default().accounts_db_config), + BankTestConfig::default().accounts_db_config, None, Some(Pubkey::from([42; 32])), Arc::default(), @@ -5173,7 +5329,7 @@ fn test_bank_hash_consistency() { assert_eq!(bank.epoch(), 0); assert_eq!( bank.hash().to_string(), - "AyXhbqmPsC46x7MHAuW89pQcNZVrUZnAND6ABWJ24svx", + "EzyLJJki4ALhQAq5wbmiNctDhytQckGJRXnk9APKXv7r", ); } @@ -5181,14 +5337,14 @@ fn test_bank_hash_consistency() { assert_eq!(bank.epoch(), 1); assert_eq!( bank.hash().to_string(), - "ApbSYzbXgNBobjzp8ytimvVsMBUxtuJR9nFieePdpwj3" + "6h1KzSuTW6MwkgjtEbrv6AyUZ2NHtSxCQi8epjHDFYh8" ); } if bank.slot == 128 { assert_eq!(bank.epoch(), 2); assert_eq!( bank.hash().to_string(), - "FxaFn1Dj7fetY1SXWWi6DyEYidoiDLZexe3hM1tNvkwJ" + "4GX3883TVK7SQfbPUHem4HXcqdHU2DZVAB6yEXspn2qe" ); break; } @@ -5202,9 +5358,10 @@ fn test_same_program_id_uses_unique_executable_accounts() { declare_process_instruction!(MockBuiltin, 1, |invoke_context| { let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; - instruction_context - .try_borrow_program_account(transaction_context, 0)? - .set_data_length(2) + let program_idx = instruction_context.get_index_of_program_account_in_transaction()?; + let mut acc = transaction_context.accounts().try_borrow_mut(program_idx)?; + acc.set_data_from_slice(&[1, 2]); + Ok(()) }); let (genesis_config, mint_keypair) = create_genesis_config(50000); @@ -5268,8 +5425,8 @@ fn test_clean_nonrooted() { let pubkey0 = Pubkey::from([0; 32]); let pubkey1 = Pubkey::from([1; 32]); - info!("pubkey0: {}", pubkey0); - info!("pubkey1: {}", pubkey1); + info!("pubkey0: {pubkey0}"); + info!("pubkey1: {pubkey1}"); // Set root for bank 0, with caching enabled let bank0 = Arc::new(Bank::new_with_config_for_tests( @@ -5321,10 +5478,7 @@ fn test_clean_nonrooted() { bank3.force_flush_accounts_cache(); bank3.clean_accounts_for_tests(); - assert_eq!( - bank3.rc.accounts.accounts_db.ref_count_for_pubkey(&pubkey0), - 2 - ); + bank3.rc.accounts.accounts_db.assert_ref_count(&pubkey0, 2); assert!(bank3 .rc .accounts @@ -5405,7 +5559,7 @@ fn test_shrink_candidate_slots_cached() { // No more slots should be shrunk assert_eq!(bank2.shrink_candidate_slots(), 0); // alive_counts represents the count of alive accounts in the three slots 0,1,2 - assert_eq!(alive_counts, vec![13, 1, 6]); + assert_eq!(alive_counts, vec![12, 1, 6]); } #[test] @@ -5610,9 +5764,9 @@ fn test_add_builtin_account_squatted_while_not_replacing() { #[test] #[should_panic( - expected = "Can't change frozen bank by adding not-existing new builtin \ - program (mock_program, CiXgo2KHKSDmDnV1F6B69eWFgNAPiSBjjYvfB4cvRNre). \ - Maybe, inconsistent program activation is detected on snapshot restore?" + expected = "Can't change frozen bank by adding not-existing new builtin program \ + (mock_program, CiXgo2KHKSDmDnV1F6B69eWFgNAPiSBjjYvfB4cvRNre). Maybe, inconsistent \ + program activation is detected on snapshot restore?" )] fn test_add_builtin_account_after_frozen() { let slot = 123; @@ -5759,9 +5913,9 @@ fn test_add_precompiled_account_squatted_while_not_replacing() { #[test] #[should_panic( - expected = "Can't change frozen bank by adding not-existing new precompiled \ - program (CiXgo2KHKSDmDnV1F6B69eWFgNAPiSBjjYvfB4cvRNre). \ - Maybe, inconsistent program activation is detected on snapshot restore?" + expected = "Can't change frozen bank by adding not-existing new precompiled program \ + (CiXgo2KHKSDmDnV1F6B69eWFgNAPiSBjjYvfB4cvRNre). Maybe, inconsistent program \ + activation is detected on snapshot restore?" )] fn test_add_precompiled_account_after_frozen() { let slot = 123; @@ -5842,7 +5996,11 @@ fn test_bank_load_program() { assert!(bank.process_transaction(&transaction).is_ok()); { - let program_cache = bank.transaction_processor.program_cache.read().unwrap(); + let program_cache = bank + .transaction_processor + .global_program_cache + .read() + .unwrap(); let [program] = program_cache.get_slot_versions_for_tests(&program_key) else { panic!(); }; @@ -5892,7 +6050,11 @@ fn test_bpf_loader_upgradeable_deploy_with_max_len(formalize_loaded_transaction_ ); { // Make sure it is not in the cache because the account owner is not a loader - let program_cache = bank.transaction_processor.program_cache.read().unwrap(); + let program_cache = bank + .transaction_processor + .global_program_cache + .read() + .unwrap(); let slot_versions = program_cache.get_slot_versions_for_tests(&program_keypair.pubkey()); assert!(slot_versions.is_empty()); } @@ -5968,7 +6130,11 @@ fn test_bpf_loader_upgradeable_deploy_with_max_len(formalize_loaded_transaction_ )), ); { - let program_cache = bank.transaction_processor.program_cache.read().unwrap(); + let program_cache = bank + .transaction_processor + .global_program_cache + .read() + .unwrap(); let slot_versions = program_cache.get_slot_versions_for_tests(&program_keypair.pubkey()); assert_eq!(slot_versions.len(), 1); assert_eq!(slot_versions[0].deployment_slot, bank.slot()); @@ -5992,7 +6158,11 @@ fn test_bpf_loader_upgradeable_deploy_with_max_len(formalize_loaded_transaction_ )), ); { - let program_cache = bank.transaction_processor.program_cache.read().unwrap(); + let program_cache = bank + .transaction_processor + .global_program_cache + .read() + .unwrap(); let slot_versions = program_cache.get_slot_versions_for_tests(&buffer_address); assert_eq!(slot_versions.len(), 1); assert_eq!(slot_versions[0].deployment_slot, bank.slot()); @@ -6094,7 +6264,11 @@ fn test_bpf_loader_upgradeable_deploy_with_max_len(formalize_loaded_transaction_ let transaction = Transaction::new(&[&binding], invocation_message, bank.last_blockhash()); assert!(bank.process_transaction(&transaction).is_ok()); { - let program_cache = bank.transaction_processor.program_cache.read().unwrap(); + let program_cache = bank + .transaction_processor + .global_program_cache + .read() + .unwrap(); let slot_versions = program_cache.get_slot_versions_for_tests(&program_keypair.pubkey()); assert_eq!(slot_versions.len(), 2); assert_eq!(slot_versions[0].deployment_slot, bank.slot() - 1); @@ -6195,7 +6369,7 @@ fn test_bpf_loader_upgradeable_deploy_with_max_len(formalize_loaded_transaction_ Some(&mint_keypair.pubkey()), ); assert_eq!( - TransactionError::InstructionError(0, InstructionError::NotEnoughAccountKeys), + TransactionError::InstructionError(0, InstructionError::MissingAccount), bank_client .send_and_confirm_message(&[&mint_keypair], message) .unwrap_err() @@ -6670,7 +6844,7 @@ fn test_compute_active_feature_set() { assert!(new_activations.contains(&test_feature)); // Actually activate the pending activation - bank.apply_feature_activations(ApplyFeatureActivationsCaller::NewFromParent, true); + bank.compute_and_apply_new_feature_activations(); let feature = feature::from_account(&bank.get_account(&test_feature).expect("get_account")) .expect("from_account"); assert_eq!(feature.activated_at, Some(1)); @@ -6704,7 +6878,7 @@ fn test_reserved_account_keys() { &test_feature_id, &feature::create_account(&Feature::default(), 42), ); - bank.apply_feature_activations(ApplyFeatureActivationsCaller::NewFromParent, true); + bank.compute_and_apply_new_feature_activations(); assert_eq!( bank.get_reserved_account_keys().len(), @@ -6743,16 +6917,16 @@ fn test_block_limits() { &feature_set::raise_block_limits_to_100m::id(), &feature::create_account(&Feature::default(), 42), ); - // apply_feature_activations for `FinishInit` will not cause the block limit to be updated - bank.apply_feature_activations(ApplyFeatureActivationsCaller::FinishInit, true); + // compute_and_apply_features_after_snapshot_restore will not cause the block limit to be updated + bank.compute_and_apply_features_after_snapshot_restore(); assert_eq!( bank.read_cost_tracker().unwrap().get_block_limit(), MAX_BLOCK_UNITS, "before activating the feature, bank should have old/default limit" ); - // apply_feature_activations for `NewFromParent` will cause feature to be activated - bank.apply_feature_activations(ApplyFeatureActivationsCaller::NewFromParent, true); + // compute_and_apply_new_feature_activations will cause feature to be activated + bank.compute_and_apply_new_feature_activations(); assert_eq!( bank.read_cost_tracker().unwrap().get_block_limit(), MAX_BLOCK_UNITS_SIMD_0286, @@ -6778,16 +6952,16 @@ fn test_block_limits() { &feature::create_account(&Feature::default(), 42), ); - // apply_feature_activations for `FinishInit` will not cause the block limit to be updated - bank.apply_feature_activations(ApplyFeatureActivationsCaller::FinishInit, true); + // compute_and_apply_features_after_snapshot_restore will not cause the block limit to be updated + bank.compute_and_apply_features_after_snapshot_restore(); assert_eq!( bank.read_cost_tracker().unwrap().get_account_limit(), MAX_WRITABLE_ACCOUNT_UNITS, "before activating the feature, bank should have old/default limit" ); - // apply_feature_activations for `NewFromParent` will cause feature to be activated - bank.apply_feature_activations(ApplyFeatureActivationsCaller::NewFromParent, true); + // compute_and_apply_new_feature_activations will cause feature to be activated + bank.compute_and_apply_new_feature_activations(); assert_eq!( bank.read_cost_tracker().unwrap().get_account_limit(), MAX_WRITABLE_ACCOUNT_UNITS_SIMD_0306_SECOND, @@ -6803,16 +6977,16 @@ fn test_block_limits() { &feature_set::raise_account_cu_limit::id(), &feature::create_account(&Feature::default(), 42), ); - // apply_feature_activations for `FinishInit` will not cause the block limit to be updated - bank.apply_feature_activations(ApplyFeatureActivationsCaller::FinishInit, true); + // compute_and_apply_features_after_snapshot_restore will not cause the block limit to be updated + bank.compute_and_apply_features_after_snapshot_restore(); assert_eq!( bank.read_cost_tracker().unwrap().get_account_limit(), MAX_WRITABLE_ACCOUNT_UNITS, "before activating the feature, bank should have old/default limit" ); - // apply_feature_activations for `NewFromParent` will cause feature to be activated - bank.apply_feature_activations(ApplyFeatureActivationsCaller::NewFromParent, true); + // compute_and_apply_new_feature_activations will cause feature to be activated + bank.compute_and_apply_new_feature_activations(); assert_eq!( bank.read_cost_tracker().unwrap().get_block_limit(), MAX_BLOCK_UNITS, @@ -6829,16 +7003,16 @@ fn test_block_limits() { &feature_set::raise_block_limits_to_100m::id(), &feature::create_account(&Feature::default(), 42), ); - // apply_feature_activations for `FinishInit` will not cause the block limit to be updated - bank.apply_feature_activations(ApplyFeatureActivationsCaller::FinishInit, true); + // compute_and_apply_features_after_snapshot_restore will not cause the block limit to be updated + bank.compute_and_apply_features_after_snapshot_restore(); assert_eq!( bank.read_cost_tracker().unwrap().get_block_limit(), MAX_BLOCK_UNITS, "before activating the feature, bank should have old/default limit" ); - // apply_feature_activations for `NewFromParent` will cause feature to be activated - bank.apply_feature_activations(ApplyFeatureActivationsCaller::NewFromParent, true); + // compute_and_apply_new_feature_activations will cause feature to be activated + bank.compute_and_apply_new_feature_activations(); assert_eq!( bank.read_cost_tracker().unwrap().get_block_limit(), MAX_BLOCK_UNITS_SIMD_0286, @@ -6936,7 +7110,7 @@ fn min_rent_exempt_balance_for_sysvars(bank: &Bank, sysvar_ids: &[Pubkey]) -> u6 sysvar_ids .iter() .map(|sysvar_id| { - trace!("min_rent_excempt_balance_for_sysvars: {}", sysvar_id); + trace!("min_rent_excempt_balance_for_sysvars: {sysvar_id}"); bank.get_minimum_balance_for_rent_exemption( bank.get_account(sysvar_id).unwrap().data().len(), ) @@ -7188,7 +7362,6 @@ fn test_invoke_non_program_account_owned_by_a_builtin( ) { let (genesis_config, mint_keypair) = create_genesis_config(10000000); let mut bank = Bank::new_for_tests(&genesis_config); - bank.activate_feature(&feature_set::remove_accounts_executable_flag_checks::id()); if formalize_loaded_transaction_data_size { bank.activate_feature(&feature_set::formalize_loaded_transaction_data_size::id()); } @@ -7229,8 +7402,7 @@ fn test_invoke_non_program_account_owned_by_a_builtin( #[test] fn test_debug_bank() { let (genesis_config, _mint_keypair) = create_genesis_config(50000); - let mut bank = Bank::new_for_tests(&genesis_config); - bank.finish_init(&genesis_config, None, false); + let bank = Bank::new_for_tests(&genesis_config); let debug = format!("{bank:#?}"); assert!(!debug.is_empty()); } @@ -8057,14 +8229,12 @@ fn test_epoch_schedule_from_genesis_config() { genesis_config.epoch_schedule = EpochSchedule::custom(8192, 100, true); - let bank = Arc::new(Bank::new_with_paths( + let bank = Arc::new(Bank::new_from_genesis( &genesis_config, Arc::::default(), Vec::new(), None, - None, - false, - Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), + ACCOUNTS_DB_CONFIG_FOR_TESTING, None, None, Arc::default(), @@ -8087,14 +8257,12 @@ where &validator_keypairs, vec![LAMPORTS_PER_SOL; 2], ); - let bank = Arc::new(Bank::new_with_paths( + let bank = Arc::new(Bank::new_from_genesis( &genesis_config, Arc::::default(), Vec::new(), None, - None, - false, - Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), + ACCOUNTS_DB_CONFIG_FOR_TESTING, None, None, Arc::default(), @@ -8161,7 +8329,6 @@ fn test_vote_epoch_panic() { let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let vote_keypair = keypair_from_seed(&[1u8; 32]).unwrap(); - let stake_keypair = keypair_from_seed(&[2u8; 32]).unwrap(); let mut setup_ixs = Vec::new(); setup_ixs.extend(vote_instruction::create_account_with_config( @@ -8175,18 +8342,10 @@ fn test_vote_epoch_panic() { }, 1_000_000_000, vote_instruction::CreateVoteAccountConfig { - space: VoteStateVersions::vote_state_size_of(true) as u64, + space: VoteStateV3::size_of() as u64, ..vote_instruction::CreateVoteAccountConfig::default() }, )); - setup_ixs.extend(stake_instruction::create_account_and_delegate_stake( - &mint_keypair.pubkey(), - &stake_keypair.pubkey(), - &vote_keypair.pubkey(), - &Authorized::auto(&mint_keypair.pubkey()), - &Lockup::default(), - 1_000_000_000_000, - )); setup_ixs.push(vote_instruction::withdraw( &vote_keypair.pubkey(), &mint_keypair.pubkey(), @@ -8200,7 +8359,7 @@ fn test_vote_epoch_panic() { )); let result = bank.process_transaction(&Transaction::new( - &[&mint_keypair, &vote_keypair, &stake_keypair], + &[&mint_keypair, &vote_keypair], Message::new(&setup_ixs, Some(&mint_keypair.pubkey())), bank.last_blockhash(), )); @@ -8407,11 +8566,11 @@ fn test_get_largest_accounts() { .iter() .cloned() .zip(vec![ - sol_to_lamports(2.0), - sol_to_lamports(3.0), - sol_to_lamports(3.0), - sol_to_lamports(4.0), - sol_to_lamports(5.0), + 2 * LAMPORTS_PER_SOL, + 3 * LAMPORTS_PER_SOL, + 3 * LAMPORTS_PER_SOL, + 4 * LAMPORTS_PER_SOL, + 5 * LAMPORTS_PER_SOL, ]) .collect(); @@ -8437,17 +8596,17 @@ fn test_get_largest_accounts() { assert_eq!( bank.get_largest_accounts(1, &pubkeys_hashset, AccountAddressFilter::Include, false) .unwrap(), - vec![(pubkeys[4], sol_to_lamports(5.0))] + vec![(pubkeys[4], 5 * LAMPORTS_PER_SOL)] ); assert_eq!( bank.get_largest_accounts(1, &HashSet::new(), AccountAddressFilter::Exclude, false) .unwrap(), - vec![(pubkeys[4], sol_to_lamports(5.0))] + vec![(pubkeys[4], 5 * LAMPORTS_PER_SOL)] ); assert_eq!( bank.get_largest_accounts(1, &exclude4, AccountAddressFilter::Exclude, false) .unwrap(), - vec![(pubkeys[3], sol_to_lamports(4.0))] + vec![(pubkeys[3], 4 * LAMPORTS_PER_SOL)] ); // Return all added accounts @@ -8527,8 +8686,8 @@ fn test_transfer_sysvar() { let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; instruction_context - .try_borrow_instruction_account(transaction_context, 1)? - .set_data(vec![0; 40])?; + .try_borrow_instruction_account(1)? + .set_data_from_slice(&[0; 40])?; Ok(()) }); @@ -8607,7 +8766,7 @@ fn do_test_clean_dropped_unrooted_banks(freeze_bank1: FreezeBank1) { //! 4. A key with zero lamports is in both an unrooted _and_ rooted bank (key5) //! - In this case, key5's ref-count should be decremented correctly - let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); + let (genesis_config, mint_keypair) = create_genesis_config(LAMPORTS_PER_SOL); let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let amount = genesis_config.rent.minimum_balance(0); @@ -8659,43 +8818,24 @@ fn do_test_clean_dropped_unrooted_banks(freeze_bank1: FreezeBank1) { let expected_ref_count_for_cleaned_up_keys = 0; let expected_ref_count_for_keys_in_both_slot1_and_slot2 = 1; - assert_eq!( - bank2 - .rc - .accounts - .accounts_db - .accounts_index - .ref_count_from_storage(&key1.pubkey()), - expected_ref_count_for_cleaned_up_keys - ); - assert_ne!( - bank2 - .rc - .accounts - .accounts_db - .accounts_index - .ref_count_from_storage(&key3.pubkey()), - expected_ref_count_for_cleaned_up_keys - ); - assert_eq!( - bank2 - .rc - .accounts - .accounts_db - .accounts_index - .ref_count_from_storage(&key4.pubkey()), - expected_ref_count_for_cleaned_up_keys + bank2 + .rc + .accounts + .accounts_db + .assert_ref_count(&key1.pubkey(), expected_ref_count_for_cleaned_up_keys); + bank2.rc.accounts.accounts_db.assert_ref_count( + &key3.pubkey(), + expected_ref_count_for_keys_in_both_slot1_and_slot2, ); - assert_eq!( - bank2 - .rc - .accounts - .accounts_db - .accounts_index - .ref_count_from_storage(&key5.pubkey()), + bank2 + .rc + .accounts + .accounts_db + .assert_ref_count(&key4.pubkey(), expected_ref_count_for_cleaned_up_keys); + bank2.rc.accounts.accounts_db.assert_ref_count( + &key5.pubkey(), expected_ref_count_for_keys_in_both_slot1_and_slot2, ); - assert_eq!( bank2.rc.accounts.accounts_db.alive_account_count_in_slot(1), 0 @@ -9441,10 +9581,10 @@ declare_process_instruction!(MockTransferBuiltin, 1, |invoke_context| { match instruction { MockTransferInstruction::Transfer(amount) => { instruction_context - .try_borrow_instruction_account(transaction_context, 1)? + .try_borrow_instruction_account(1)? .checked_sub_lamports(amount)?; instruction_context - .try_borrow_instruction_account(transaction_context, 2)? + .try_borrow_instruction_account(2)? .checked_add_lamports(amount)?; Ok(()) } @@ -9486,7 +9626,7 @@ fn test_invalid_rent_state_changes_existing_accounts() { mut genesis_config, mint_keypair, .. - } = create_genesis_config_with_leader(sol_to_lamports(100.), &Pubkey::new_unique(), 42); + } = create_genesis_config_with_leader(100 * LAMPORTS_PER_SOL, &Pubkey::new_unique(), 42); genesis_config.rent = Rent::default(); let mock_program_id = Pubkey::new_unique(); @@ -9594,7 +9734,7 @@ fn test_invalid_rent_state_changes_new_accounts() { mut genesis_config, mint_keypair, .. - } = create_genesis_config_with_leader(sol_to_lamports(100.), &Pubkey::new_unique(), 42); + } = create_genesis_config_with_leader(100 * LAMPORTS_PER_SOL, &Pubkey::new_unique(), 42); genesis_config.rent = Rent::default(); let mock_program_id = Pubkey::new_unique(); @@ -9648,7 +9788,7 @@ fn test_drained_created_account() { mut genesis_config, mint_keypair, .. - } = create_genesis_config_with_leader(sol_to_lamports(100.), &Pubkey::new_unique(), 42); + } = create_genesis_config_with_leader(100 * LAMPORTS_PER_SOL, &Pubkey::new_unique(), 42); genesis_config.rent = Rent::default(); activate_all_features(&mut genesis_config); @@ -9738,12 +9878,12 @@ fn test_rent_state_changes_sysvars() { mut genesis_config, mint_keypair, .. - } = create_genesis_config_with_leader(sol_to_lamports(100.), &Pubkey::new_unique(), 42); + } = create_genesis_config_with_leader(100 * LAMPORTS_PER_SOL, &Pubkey::new_unique(), 42); genesis_config.rent = Rent::default(); - let validator_pubkey = solana_pubkey::new_rand(); - let validator_stake_lamports = sol_to_lamports(1.); - let validator_staking_keypair = Keypair::new(); + let validator_pubkey = Pubkey::new_unique(); + let validator_stake_lamports = LAMPORTS_PER_SOL; + let validator_vote_account_pubkey = Pubkey::new_unique(); let validator_voting_keypair = Keypair::new(); let validator_vote_account = vote_state::create_account( @@ -9753,14 +9893,6 @@ fn test_rent_state_changes_sysvars() { validator_stake_lamports, ); - let validator_stake_account = stake_state::create_account( - &validator_staking_keypair.pubkey(), - &validator_voting_keypair.pubkey(), - &validator_vote_account, - &genesis_config.rent, - validator_stake_lamports, - ); - genesis_config.accounts.insert( validator_pubkey, Account::new( @@ -9770,11 +9902,7 @@ fn test_rent_state_changes_sysvars() { ), ); genesis_config.accounts.insert( - validator_staking_keypair.pubkey(), - Account::from(validator_stake_account), - ); - genesis_config.accounts.insert( - validator_voting_keypair.pubkey(), + validator_vote_account_pubkey, Account::from(validator_vote_account), ); @@ -9782,12 +9910,14 @@ fn test_rent_state_changes_sysvars() { // Ensure transactions with sysvars succeed, even though sysvars appear RentPaying by balance let tx = Transaction::new_signed_with_payer( - &[stake_instruction::deactivate_stake( - &validator_staking_keypair.pubkey(), - &validator_staking_keypair.pubkey(), + &[vote_instruction::authorize( + &validator_vote_account_pubkey, + &validator_voting_keypair.pubkey(), + &Pubkey::new_unique(), + VoteAuthorize::Voter, )], Some(&mint_keypair.pubkey()), - &[&mint_keypair, &validator_staking_keypair], + &[&mint_keypair, &validator_voting_keypair], bank.last_blockhash(), ); let result = bank.process_transaction(&tx); @@ -9800,7 +9930,7 @@ fn test_invalid_rent_state_changes_fee_payer() { mut genesis_config, mint_keypair, .. - } = create_genesis_config_with_leader(sol_to_lamports(100.), &Pubkey::new_unique(), 42); + } = create_genesis_config_with_leader(100 * LAMPORTS_PER_SOL, &Pubkey::new_unique(), 42); genesis_config.rent = Rent::default(); genesis_config.fee_rate_governor = FeeRateGovernor::new( solana_fee_calculator::DEFAULT_TARGET_LAMPORTS_PER_SIGNATURE, @@ -9843,7 +9973,7 @@ fn test_invalid_rent_state_changes_fee_payer() { &[system_instruction::transfer( &rent_exempt_fee_payer.pubkey(), &recipient, - sol_to_lamports(1.), + LAMPORTS_PER_SOL, )], Some(&rent_exempt_fee_payer.pubkey()), &recent_blockhash, @@ -10046,7 +10176,7 @@ fn test_rent_state_incinerator() { mut genesis_config, mint_keypair, .. - } = create_genesis_config_with_leader(sol_to_lamports(100.), &Pubkey::new_unique(), 42); + } = create_genesis_config_with_leader(100 * LAMPORTS_PER_SOL, &Pubkey::new_unique(), 42); genesis_config.rent = Rent::default(); let rent_exempt_minimum = genesis_config.rent.minimum_balance(0); @@ -10118,28 +10248,28 @@ declare_process_instruction!(MockReallocBuiltin, 1, |invoke_context| { MockReallocInstruction::Realloc(new_size, new_balance, _) => { // Set data length instruction_context - .try_borrow_instruction_account(transaction_context, 1)? + .try_borrow_instruction_account(1)? .set_data_length(new_size)?; // set balance let current_balance = instruction_context - .try_borrow_instruction_account(transaction_context, 1)? + .try_borrow_instruction_account(1)? .get_lamports(); let diff_balance = (new_balance as i64).saturating_sub(current_balance as i64); let amount = diff_balance.unsigned_abs(); if diff_balance.is_positive() { instruction_context - .try_borrow_instruction_account(transaction_context, 0)? + .try_borrow_instruction_account(0)? .checked_sub_lamports(amount)?; instruction_context - .try_borrow_instruction_account(transaction_context, 1)? + .try_borrow_instruction_account(1)? .set_lamports(new_balance)?; } else { instruction_context - .try_borrow_instruction_account(transaction_context, 0)? + .try_borrow_instruction_account(0)? .checked_add_lamports(amount)?; instruction_context - .try_borrow_instruction_account(transaction_context, 1)? + .try_borrow_instruction_account(1)? .set_lamports(new_balance)?; } Ok(()) @@ -10783,14 +10913,14 @@ fn test_feature_activation_loaded_programs_cache_preparation_phase( let bank = new_bank_from_parent_with_bank_forks(&bank_forks, bank, &Pubkey::default(), 16); let current_env = bank .transaction_processor - .program_cache + .global_program_cache .read() .unwrap() .get_environments_for_epoch(0) .program_runtime_v1; let upcoming_env = bank .transaction_processor - .program_cache + .global_program_cache .read() .unwrap() .get_environments_for_epoch(1) @@ -10798,7 +10928,11 @@ fn test_feature_activation_loaded_programs_cache_preparation_phase( // Advance the bank to recompile the program. { - let program_cache = bank.transaction_processor.program_cache.read().unwrap(); + let program_cache = bank + .transaction_processor + .global_program_cache + .read() + .unwrap(); let slot_versions = program_cache.get_slot_versions_for_tests(&program_keypair.pubkey()); assert_eq!(slot_versions.len(), 1); assert!(Arc::ptr_eq( @@ -10809,7 +10943,11 @@ fn test_feature_activation_loaded_programs_cache_preparation_phase( goto_end_of_slot(bank.clone()); let bank = new_from_parent_with_fork_next_slot(bank, bank_forks.as_ref()); { - let program_cache = bank.transaction_processor.program_cache.write().unwrap(); + let program_cache = bank + .transaction_processor + .global_program_cache + .write() + .unwrap(); let slot_versions = program_cache.get_slot_versions_for_tests(&program_keypair.pubkey()); assert_eq!(slot_versions.len(), 2); assert!(Arc::ptr_eq( @@ -10896,7 +11034,11 @@ fn test_feature_activation_loaded_programs_epoch_transition() { { // Prune for rerooting and thus finishing the recompilation phase. - let mut program_cache = bank.transaction_processor.program_cache.write().unwrap(); + let mut program_cache = bank + .transaction_processor + .global_program_cache + .write() + .unwrap(); program_cache.prune(bank.slot(), bank.epoch()); // Unload all (which is only the entry with the new environment) @@ -10911,7 +11053,7 @@ fn test_feature_activation_loaded_programs_epoch_transition() { } #[test] -fn test_verify_accounts_hash() { +fn test_verify_accounts() { let GenesisConfigInfo { mut genesis_config, mint_keypair: mint, @@ -10962,8 +11104,8 @@ fn test_verify_accounts_hash() { bank.squash(); bank.force_flush_accounts_cache(); - // ensure the accounts hash verifies - assert!(bank.verify_accounts_hash(VerifyAccountsHashConfig::default_for_test(), None)); + // ensure the accounts verify successfully + assert!(bank.verify_accounts(VerifyAccountsHashConfig::default_for_test(), None)); } #[test] @@ -10974,7 +11116,6 @@ fn test_squash_timing_add_assign() { squash_accounts_ms: 1, squash_accounts_cache_ms: 2, squash_accounts_index_ms: 3, - squash_accounts_store_ms: 4, squash_cache_ms: 5, }; @@ -10982,7 +11123,6 @@ fn test_squash_timing_add_assign() { squash_accounts_ms: 2, squash_accounts_cache_ms: 2 * 2, squash_accounts_index_ms: 3 * 2, - squash_accounts_store_ms: 4 * 2, squash_cache_ms: 5 * 2, }; @@ -10994,7 +11134,7 @@ fn test_squash_timing_add_assign() { #[test] fn test_system_instruction_allocate() { - let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee(sol_to_lamports(1.0)); + let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee(LAMPORTS_PER_SOL); let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let bank_client = BankClient::new_shared(bank); let data_len = 2; @@ -11047,7 +11187,7 @@ where let program = Pubkey::new_unique(); let collector = Pubkey::new_unique(); - let mint_lamports = sol_to_lamports(1.0); + let mint_lamports = LAMPORTS_PER_SOL; let len1 = 123; let len2 = 456; @@ -11140,7 +11280,7 @@ fn test_create_zero_lamport_without_clean() { #[test] fn test_system_instruction_assign_with_seed() { - let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee(sol_to_lamports(1.0)); + let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee(LAMPORTS_PER_SOL); let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let bank_client = BankClient::new_shared(bank); @@ -11175,7 +11315,7 @@ fn test_system_instruction_assign_with_seed() { #[test] fn test_system_instruction_unsigned_transaction() { - let (genesis_config, alice_keypair) = create_genesis_config_no_tx_fee(sol_to_lamports(1.0)); + let (genesis_config, alice_keypair) = create_genesis_config_no_tx_fee(LAMPORTS_PER_SOL); let alice_pubkey = alice_keypair.pubkey(); let mallory_keypair = Keypair::new(); let mallory_pubkey = mallory_keypair.pubkey(); @@ -11208,14 +11348,14 @@ fn test_system_instruction_unsigned_transaction() { ); assert_eq!( bank_client.get_balance(&alice_pubkey).unwrap(), - sol_to_lamports(1.0) - amount + LAMPORTS_PER_SOL - amount ); assert_eq!(bank_client.get_balance(&mallory_pubkey).unwrap(), amount); } #[test] fn test_calc_vote_accounts_to_store_empty() { - let vote_account_rewards = DashMap::default(); + let vote_account_rewards = HashMap::default(); let result = Bank::calc_vote_accounts_to_store(vote_account_rewards); assert_eq!( result.accounts_with_rewards.len(), @@ -11226,7 +11366,7 @@ fn test_calc_vote_accounts_to_store_empty() { #[test] fn test_calc_vote_accounts_to_store_overflow() { - let vote_account_rewards = DashMap::default(); + let mut vote_account_rewards = HashMap::default(); let pubkey = solana_pubkey::new_rand(); let mut vote_account = AccountSharedData::default(); vote_account.set_lamports(u64::MAX); @@ -11251,7 +11391,7 @@ fn test_calc_vote_accounts_to_store_normal() { let pubkey = solana_pubkey::new_rand(); for commission in 0..2 { for vote_rewards in 0..2 { - let vote_account_rewards = DashMap::default(); + let mut vote_account_rewards = HashMap::default(); let mut vote_account = AccountSharedData::default(); vote_account.set_lamports(1); vote_account_rewards.insert( @@ -11435,6 +11575,7 @@ fn test_failed_simulation_load_error() { let transaction = Transaction::new(&[&mint_keypair], message, bank.last_blockhash()); bank.freeze(); + let mint_balance = bank.get_account(&mint_keypair.pubkey()).unwrap().lamports(); let sanitized = RuntimeTransaction::from_transaction_for_tests(transaction); let simulation = bank.simulate_transaction(&sanitized, false); assert_eq!( @@ -11447,6 +11588,11 @@ fn test_failed_simulation_load_error() { loaded_accounts_data_size: 0, return_data: None, inner_instructions: None, + fee: Some(0), + pre_balances: Some(vec![mint_balance, 0]), + post_balances: Some(vec![mint_balance, 0]), + pre_token_balances: Some(vec![]), + post_token_balances: Some(vec![]), } ); } @@ -11518,11 +11664,7 @@ fn test_deploy_last_epoch_slot() { &mut genesis_config, agave_feature_set::enable_loader_v4::id(), ); - genesis_config - .accounts - .remove(&feature_set::remove_accounts_executable_flag_checks::id()); - let mut bank = Bank::new_for_tests(&genesis_config); - bank.activate_feature(&feature_set::remove_accounts_executable_flag_checks::id()); + let bank = Bank::new_for_tests(&genesis_config); // go to the last slot in the epoch let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); @@ -11595,7 +11737,7 @@ fn test_deploy_last_epoch_slot() { let signers = &[&payer_keypair, &upgrade_authority_keypair]; let transaction = Transaction::new(signers, message.clone(), bank.last_blockhash()); let ret = bank.process_transaction(&transaction); - assert!(ret.is_ok(), "ret: {:?}", ret); + assert!(ret.is_ok(), "ret: {ret:?}"); goto_end_of_slot(bank.clone()); // go to the first slot in the new epoch @@ -11624,7 +11766,6 @@ fn test_loader_v3_to_v4_migration(formalize_loaded_transaction_data_size: bool) agave_feature_set::enable_loader_v4::id(), ); let mut bank = Bank::new_for_tests(&genesis_config); - bank.activate_feature(&feature_set::remove_accounts_executable_flag_checks::id()); if formalize_loaded_transaction_data_size { bank.activate_feature(&feature_set::formalize_loaded_transaction_data_size::id()); } @@ -11819,10 +11960,7 @@ fn test_loader_v3_to_v4_migration(formalize_loaded_transaction_data_size: bool) AccountMeta::new_readonly(upgrade_authority_keypair.pubkey(), true), ]; for (instruction_accounts, expected_error) in [ - ( - case_too_few_accounts, - InstructionError::NotEnoughAccountKeys, - ), + (case_too_few_accounts, InstructionError::MissingAccount), (case_readonly_programdata, InstructionError::InvalidArgument), ( case_incorrect_authority, @@ -11912,7 +12050,7 @@ fn test_loader_v3_to_v4_migration(formalize_loaded_transaction_data_size: bool) bank.store_account(&programdata_address, &programdata_account); bank.store_account(&payer_keypair.pubkey(), &payer_account); let result = bank.process_transaction(&transaction); - assert!(result.is_ok(), "result: {:?}", result); + assert!(result.is_ok(), "result: {result:?}"); goto_end_of_slot(bank.clone()); let bank = @@ -12223,3 +12361,68 @@ fn test_should_use_vote_keyed_leader_schedule() { } } } + +#[test] +fn test_apply_builtin_program_feature_transitions_for_new_epoch() { + let (genesis_config, _mint_keypair) = create_genesis_config(100_000); + + let mut bank = Bank::new_for_tests(&genesis_config); + bank.feature_set = Arc::new(FeatureSet::all_enabled()); + bank.compute_and_apply_genesis_features(); + + // Overwrite precompile accounts to simulate a cluster which already added precompiles. + for precompile in get_precompiles() { + bank.store_account(&precompile.program_id, &AccountSharedData::default()); + // Simulate cluster which added ed25519 precompile with a system program owner + if precompile.program_id == ed25519_program::id() { + bank.add_precompiled_account_with_owner( + &precompile.program_id, + solana_system_interface::program::id(), + ); + } else { + bank.add_precompiled_account(&precompile.program_id); + } + } + + // Normally feature transitions are applied to a bank that hasn't been + // frozen yet. Freeze the bank early to ensure that no account changes + // are made. + bank.freeze(); + + // Simulate crossing an epoch boundary for a new bank + bank.compute_and_apply_new_feature_activations(); +} + +#[test] +fn test_startup_from_snapshot_after_precompile_transition() { + let (genesis_config, _mint_keypair) = create_genesis_config(100_000); + + let mut bank = Bank::new_for_tests(&genesis_config); + bank.feature_set = Arc::new(FeatureSet::all_enabled()); + bank.compute_and_apply_genesis_features(); + + // Overwrite precompile accounts to simulate a cluster which already added precompiles. + for precompile in get_precompiles() { + bank.store_account(&precompile.program_id, &AccountSharedData::default()); + bank.add_precompiled_account(&precompile.program_id); + } + + bank.freeze(); + + // Simulate starting up from snapshot finishing the initialization for a frozen bank + bank.compute_and_apply_features_after_snapshot_restore(); +} + +#[test] +fn test_parent_block_id() { + // Setup parent bank and populate block ID. + let (genesis_config, _mint_keypair) = create_genesis_config(100_000); + let parent_bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let parent_block_id = Some(Hash::new_unique()); + parent_bank.set_block_id(parent_block_id); + + // Create child from parent and ensure parent block ID links back to the + // expected value. + let child_bank = Bank::new_from_parent(parent_bank, &Pubkey::new_unique(), 1); + assert_eq!(parent_block_id, child_bank.parent_block_id()); +} diff --git a/runtime/src/bank_client.rs b/runtime/src/bank_client.rs index f36adaa7a4fc2f..24fd38a783893f 100644 --- a/runtime/src/bank_client.rs +++ b/runtime/src/bank_client.rs @@ -13,8 +13,7 @@ use { solana_signature::Signature, solana_signer::{signers::Signers, Signer}, solana_system_interface::instruction as system_instruction, - solana_sysvar::Sysvar, - solana_sysvar_id::SysvarId, + solana_sysvar::SysvarSerialize, solana_transaction::{versioned::VersionedTransaction, Transaction}, solana_transaction_error::{TransportError, TransportResult as Result}, std::{ @@ -162,7 +161,11 @@ impl SyncClient for BankClient { min_confirmed_blocks: usize, ) -> Result { // https://github.com/solana-labs/solana/issues/7199 - assert_eq!(min_confirmed_blocks, 1, "BankClient cannot observe the passage of multiple blocks, so min_confirmed_blocks must be 1"); + assert_eq!( + min_confirmed_blocks, 1, + "BankClient cannot observe the passage of multiple blocks, so min_confirmed_blocks \ + must be 1" + ); let now = Instant::now(); let confirmed_blocks; loop { @@ -268,7 +271,7 @@ impl BankClient { Self::new_shared(Arc::new(bank)) } - pub fn set_sysvar_for_tests(&self, sysvar: &T) { + pub fn set_sysvar_for_tests(&self, sysvar: &T) { self.bank.set_sysvar_for_tests(sysvar); } @@ -302,12 +305,12 @@ impl BankClient { mod tests { use { super::*, solana_genesis_config::create_genesis_config, solana_instruction::AccountMeta, - solana_native_token::sol_to_lamports, + solana_native_token::LAMPORTS_PER_SOL, }; #[test] fn test_bank_client_new_with_keypairs() { - let (genesis_config, john_doe_keypair) = create_genesis_config(sol_to_lamports(1.0)); + let (genesis_config, john_doe_keypair) = create_genesis_config(LAMPORTS_PER_SOL); let john_pubkey = john_doe_keypair.pubkey(); let jane_doe_keypair = Keypair::new(); let jane_pubkey = jane_doe_keypair.pubkey(); diff --git a/runtime/src/bank_forks.rs b/runtime/src/bank_forks.rs index 94f88d0aefd6ff..bf2e79adc80c28 100644 --- a/runtime/src/bank_forks.rs +++ b/runtime/src/bank_forks.rs @@ -44,17 +44,33 @@ impl ReadOnlyAtomicSlot { } } +/// Convenience type since often root/working banks are fetched together. #[derive(Clone)] -pub struct SharableBank(Arc>); +pub struct SharableBanks { + root_bank: Arc>, + working_bank: Arc>, +} -impl SharableBank { - pub fn load(&self) -> Arc { - self.0.load_full() +impl SharableBanks { + pub fn root(&self) -> Arc { + self.root_bank.load_full() } - fn store(&self, bank: Arc) { - self.0.store(bank); + pub fn working(&self) -> Arc { + self.working_bank.load_full() } + + pub fn load(&self) -> BankPair { + BankPair { + root_bank: self.root(), + working_bank: self.working(), + } + } +} + +pub struct BankPair { + pub root_bank: Arc, + pub working_bank: Arc, } #[derive(Error, Debug)] @@ -83,7 +99,8 @@ pub struct BankForks { banks: HashMap, descendants: HashMap>, root: Arc, - root_bank: SharableBank, + working_slot: Slot, + sharable_banks: SharableBanks, in_vote_only_mode: Arc, highest_slot_at_startup: Slot, scheduler_pool: Option, @@ -130,7 +147,13 @@ impl BankForks { let bank_forks = Arc::new(RwLock::new(Self { root: Arc::new(AtomicSlot::new(root_slot)), - root_bank: SharableBank(Arc::new(ArcSwap::from(Arc::clone(&root_bank)))), + working_slot: root_slot, + sharable_banks: SharableBanks { + root_bank: Arc::new(ArcSwap::from(root_bank.clone())), + // working bank is initially the same as root - all banks are either the root + // or its ancestors. + working_bank: Arc::new(ArcSwap::from(root_bank.clone())), + }, banks, descendants, in_vote_only_mode: Arc::new(AtomicBool::new(false)), @@ -215,16 +238,16 @@ impl BankForks { self.get(slot).map(|bank| bank.hash()) } - pub fn sharable_root_bank(&self) -> SharableBank { - self.root_bank.clone() + pub fn sharable_banks(&self) -> SharableBanks { + self.sharable_banks.clone() } pub fn root_bank(&self) -> Arc { - self.root_bank.load() + self.sharable_banks.root() } pub fn install_scheduler_pool(&mut self, pool: InstalledSchedulerPoolArc) { - info!("Installed new scheduler_pool into bank_forks: {:?}", pool); + info!("Installed new scheduler_pool into bank_forks: {pool:?}"); assert!( self.scheduler_pool.replace(pool).is_none(), "Reinstalling scheduler pool isn't supported" @@ -257,6 +280,11 @@ impl BankForks { for parent in bank.proper_ancestors() { self.descendants.entry(parent).or_default().insert(slot); } + + // Update sharable working bank and cached slot. + self.working_slot = self.find_highest_slot(); + self.sharable_banks.working_bank.store(self.working_bank()); + bank } @@ -300,15 +328,25 @@ impl BankForks { if entry.get().is_empty() { entry.remove_entry(); } + + // Update sharable working bank and cached slot. + // The previous working bank (highest slot) may have been removed. + self.working_slot = self.find_highest_slot(); + self.sharable_banks.working_bank.store(self.working_bank()); + Some(bank) } pub fn highest_slot(&self) -> Slot { + self.working_slot + } + + fn find_highest_slot(&self) -> Slot { self.banks.values().map(|bank| bank.slot()).max().unwrap() } pub fn working_bank(&self) -> Arc { - self[self.highest_slot()].clone() + self.banks[&self.highest_slot()].clone_without_scheduler() } pub fn working_bank_with_scheduler(&self) -> BankWithScheduler { @@ -356,7 +394,7 @@ impl BankForks { snapshot_controller: Option<&SnapshotController>, highest_super_majority_root: Option, ) -> Result<(Vec, SetRootMetrics), SetRootError> { - let old_epoch = self.root_bank.load().epoch(); + let old_epoch = self.sharable_banks.root().epoch(); let root_bank = &self .get(root) @@ -366,7 +404,7 @@ impl BankForks { // BankForks first *and* from a different thread, this store *must* be at least Release to // ensure atomic ordering correctness. self.root.store(root, Ordering::Release); - self.root_bank.store(Arc::clone(root_bank)); + self.sharable_banks.root_bank.store(Arc::clone(root_bank)); let new_epoch = root_bank.epoch(); if old_epoch != new_epoch { @@ -383,7 +421,8 @@ impl BankForks { // Now we have rooted a bank in a new epoch, there are no needs to // keep the epoch rewards cache for current epoch any longer. info!( - "Clearing epoch rewards cache for epoch {old_epoch} after setting root to slot {root}" + "Clearing epoch rewards cache for epoch {old_epoch} after setting root to slot \ + {root}" ); root_bank.clear_epoch_rewards_cache(); } @@ -500,14 +539,6 @@ impl BankForks { .squash_accounts_cache_ms, i64 ), - ( - "total_squash_accounts_store_ms", - set_root_metrics - .timings - .total_squash_time - .squash_accounts_store_ms, - i64 - ), ( "total_snapshot_ms", set_root_metrics.timings.total_snapshot_ms, diff --git a/runtime/src/bank_hash_cache.rs b/runtime/src/bank_hash_cache.rs index 3002f68d90baa3..856f118a11eb32 100644 --- a/runtime/src/bank_hash_cache.rs +++ b/runtime/src/bank_hash_cache.rs @@ -10,7 +10,7 @@ use { crate::{ bank::Bank, - bank_forks::{BankForks, SharableBank}, + bank_forks::{BankForks, SharableBanks}, }, solana_clock::Slot, solana_hash::Hash, @@ -26,14 +26,14 @@ pub type DumpedSlotSubscription = Arc>; pub struct BankHashCache { hashes: BTreeMap, bank_forks: Arc>, - root_bank: SharableBank, + sharable_banks: SharableBanks, last_root: Slot, dumped_slot_subscription: DumpedSlotSubscription, } impl BankHashCache { pub fn new(bank_forks: Arc>) -> Self { - let root_bank = bank_forks.read().unwrap().sharable_root_bank(); + let sharable_banks = bank_forks.read().unwrap().sharable_banks(); let dumped_slot_subscription = DumpedSlotSubscription::default(); bank_forks .write() @@ -42,7 +42,7 @@ impl BankHashCache { Self { hashes: BTreeMap::default(), bank_forks, - root_bank, + sharable_banks, last_root: 0, dumped_slot_subscription, } @@ -82,8 +82,8 @@ impl BankHashCache { let prev_hash = self.hashes.insert(slot, hash); debug_assert!( prev_hash.is_none(), - "Programmer error, this indicates we have dumped and replayed \ - a block however the cache was not invalidated" + "Programmer error, this indicates we have dumped and replayed a block however the \ + cache was not invalidated" ); Some(hash) } @@ -94,7 +94,7 @@ impl BankHashCache { /// Returns the root bank and also prunes cache of any slots < root pub fn get_root_bank_and_prune_cache(&mut self) -> Arc { - let root_bank = self.root_bank.load(); + let root_bank = self.sharable_banks.root(); if root_bank.slot() != self.last_root { self.last_root = root_bank.slot(); self.hashes = self.hashes.split_off(&self.last_root); diff --git a/runtime/src/dependency_tracker.rs b/runtime/src/dependency_tracker.rs index f8418952aa3d34..a1bca8e15d9f53 100644 --- a/runtime/src/dependency_tracker.rs +++ b/runtime/src/dependency_tracker.rs @@ -4,10 +4,10 @@ use std::sync::{atomic::AtomicU64, Condvar, Mutex}; #[derive(Debug, Default)] pub struct DependencyTracker { - /// The current work sequence number - work_sequence: AtomicU64, - /// The processed work sequence number, if it is None, no work has been processed - processed_work_sequence: Mutex>, + /// The current work id + work_id: AtomicU64, + /// The processed work id, if it is None, no work has been processed + processed_work_id: Mutex>, condvar: Condvar, } @@ -16,40 +16,40 @@ fn less_than(a: &Option, b: u64) -> bool { } impl DependencyTracker { - /// Acquire the next work sequence number. - /// The sequence starts from 0 and increments by 1 each time it is called. + /// Acquire the next work id number. + /// The work id starts from 0 and increments by 1 each time it is called. pub fn declare_work(&self) -> u64 { - self.work_sequence + self.work_id .fetch_add(1, std::sync::atomic::Ordering::SeqCst) + 1 } - /// Notify all waiting threads that a work has occurred with the given sequence number. - /// This function will update the work sequence and notify all waiting threads only if the work - /// sequence is greater than the work sequence. Notify a work of sequence number 's' will - /// implicitly imply that all work with sequence number less than 's' have been processed. - pub fn mark_this_and_all_previous_work_processed(&self, sequence: u64) { - let mut work_sequence = self.processed_work_sequence.lock().unwrap(); - if less_than(&work_sequence, sequence) { - *work_sequence = Some(sequence); + /// Notify all waiting threads that a work has been processed with the given work id. + /// This function will update the processed work id and notify all waiting threads only if the work + /// id is greater than the procsessed work id. Notify a work of id number 's' will + /// implicitly imply that all work with id number less than 's' have been processed. + pub fn mark_this_and_all_previous_work_processed(&self, work_id: u64) { + let mut processed_work_id = self.processed_work_id.lock().unwrap(); + if less_than(&processed_work_id, work_id) { + *processed_work_id = Some(work_id); self.condvar.notify_all(); } } - /// To wait for the dependency work with 'sequence' to be processed. - pub fn wait_for_dependency(&self, sequence: u64) { - if sequence == 0 { - return; // No need to wait for sequence 0 as real work starts from 1. + /// To wait for the dependency work with 'work_id' to be processed. + pub fn wait_for_dependency(&self, work_id: u64) { + if work_id == 0 { + return; // No need to wait for work id 0 as real work starts from 1. } - let mut processed_sequence = self.processed_work_sequence.lock().unwrap(); - while less_than(&processed_sequence, sequence) { - processed_sequence = self.condvar.wait(processed_sequence).unwrap(); + let mut processed_work_id = self.processed_work_id.lock().unwrap(); + while less_than(&processed_work_id, work_id) { + processed_work_id = self.condvar.wait(processed_work_id).unwrap(); } } - /// Get the current work sequence number. + /// Get the current work id number. pub fn get_current_declared_work(&self) -> u64 { - self.work_sequence.load(std::sync::atomic::Ordering::SeqCst) + self.work_id.load(std::sync::atomic::Ordering::SeqCst) } } @@ -69,7 +69,7 @@ mod tests { } #[test] - fn test_get_new_work_sequence() { + fn test_get_new_work_id() { let dependency_tracker = DependencyTracker::default(); assert_eq!(dependency_tracker.declare_work(), 1); assert_eq!(dependency_tracker.declare_work(), 2); @@ -81,21 +81,21 @@ mod tests { let dependency_tracker = DependencyTracker::default(); dependency_tracker.mark_this_and_all_previous_work_processed(1); - let processed_sequence = *dependency_tracker.processed_work_sequence.lock().unwrap(); - assert_eq!(processed_sequence, Some(1)); + let processed_work_id = *dependency_tracker.processed_work_id.lock().unwrap(); + assert_eq!(processed_work_id, Some(1)); - // notify a smaller sequence number, should not change the processed sequence + // notify a smaller work id number, should not change the processed work id dependency_tracker.mark_this_and_all_previous_work_processed(0); - let processed_sequence = *dependency_tracker.processed_work_sequence.lock().unwrap(); - assert_eq!(processed_sequence, Some(1)); - // notify a larger sequence number, should change the processed sequence + let processed_work_id = *dependency_tracker.processed_work_id.lock().unwrap(); + assert_eq!(processed_work_id, Some(1)); + // notify a larger work id number, should change the processed work id dependency_tracker.mark_this_and_all_previous_work_processed(2); - let processed_sequence = *dependency_tracker.processed_work_sequence.lock().unwrap(); - assert_eq!(processed_sequence, Some(2)); - // notify the same sequence number, should not change the processed sequence + let processed_work_id = *dependency_tracker.processed_work_id.lock().unwrap(); + assert_eq!(processed_work_id, Some(2)); + // notify the same work id number, should not change the processed work id dependency_tracker.mark_this_and_all_previous_work_processed(2); - let processed_sequence = *dependency_tracker.processed_work_sequence.lock().unwrap(); - assert_eq!(processed_sequence, Some(2)); + let processed_work_id = *dependency_tracker.processed_work_id.lock().unwrap(); + assert_eq!(processed_work_id, Some(2)); } #[test] @@ -116,7 +116,7 @@ mod tests { dependency_tracker.mark_this_and_all_previous_work_processed(work); handle.join().unwrap(); - let processed_sequence = *dependency_tracker.processed_work_sequence.lock().unwrap(); - assert_eq!(processed_sequence, Some(2)); + let processed_work_id = *dependency_tracker.processed_work_id.lock().unwrap(); + assert_eq!(processed_work_id, Some(2)); } } diff --git a/runtime/src/epoch_stakes.rs b/runtime/src/epoch_stakes.rs index c61fa0304f1756..25039bce66ee66 100644 --- a/runtime/src/epoch_stakes.rs +++ b/runtime/src/epoch_stakes.rs @@ -1,15 +1,81 @@ use { crate::stakes::SerdeStakesToStakeFormat, serde::{Deserialize, Serialize}, + solana_bls_signatures::{Pubkey as BLSPubkey, PubkeyCompressed as BLSPubkeyCompressed}, solana_clock::Epoch, solana_pubkey::Pubkey, solana_vote::vote_account::VoteAccountsHashMap, - std::{collections::HashMap, sync::Arc}, + std::{ + collections::HashMap, + sync::{Arc, OnceLock}, + }, }; pub type NodeIdToVoteAccounts = HashMap; pub type EpochAuthorizedVoters = HashMap; +#[derive(Clone, Debug, Default)] +#[cfg_attr(feature = "frozen-abi", derive(AbiExample))] +#[cfg_attr(feature = "dev-context-only-utils", derive(PartialEq))] +pub struct BLSPubkeyToRankMap { + rank_map: HashMap, + //TODO(wen): We can make SortedPubkeys a Vec after we remove ed25519 + // pubkey from certificate pool. + sorted_pubkeys: Vec<(Pubkey, BLSPubkey)>, +} + +impl BLSPubkeyToRankMap { + pub fn new(epoch_vote_accounts_hash_map: &VoteAccountsHashMap) -> Self { + let mut pubkey_stake_pair_vec: Vec<(Pubkey, BLSPubkey, u64)> = epoch_vote_accounts_hash_map + .iter() + .filter_map(|(pubkey, (stake, account))| { + if *stake > 0 { + account + .vote_state_view() + .bls_pubkey_compressed() + .and_then(|bls_pubkey_compressed_bytes| { + let bls_pubkey_compressed = + BLSPubkeyCompressed(bls_pubkey_compressed_bytes); + BLSPubkey::try_from(bls_pubkey_compressed).ok() + }) + .map(|bls_pubkey| (*pubkey, bls_pubkey, *stake)) + } else { + None + } + }) + .collect(); + pubkey_stake_pair_vec.sort_by(|(_, a_pubkey, a_stake), (_, b_pubkey, b_stake)| { + b_stake.cmp(a_stake).then(a_pubkey.cmp(b_pubkey)) + }); + let mut sorted_pubkeys = Vec::new(); + let mut bls_pubkey_to_rank_map = HashMap::new(); + for (rank, (pubkey, bls_pubkey, _stake)) in pubkey_stake_pair_vec.into_iter().enumerate() { + sorted_pubkeys.push((pubkey, bls_pubkey)); + bls_pubkey_to_rank_map.insert(bls_pubkey, rank as u16); + } + Self { + rank_map: bls_pubkey_to_rank_map, + sorted_pubkeys, + } + } + + pub fn is_empty(&self) -> bool { + self.rank_map.is_empty() + } + + pub fn len(&self) -> usize { + self.rank_map.len() + } + + pub fn get_rank(&self, bls_pubkey: &BLSPubkey) -> Option<&u16> { + self.rank_map.get(bls_pubkey) + } + + pub fn get_pubkey(&self, index: usize) -> Option<&(Pubkey, BLSPubkey)> { + self.sorted_pubkeys.get(index) + } +} + #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] #[derive(Clone, Serialize, Debug, Deserialize, Default, PartialEq, Eq)] pub struct NodeVoteAccounts { @@ -26,6 +92,8 @@ pub enum VersionedEpochStakes { total_stake: u64, node_id_to_vote_accounts: Arc, epoch_authorized_voters: Arc, + #[serde(skip)] + bls_pubkey_to_rank_map: OnceLock>, }, } @@ -39,6 +107,7 @@ impl VersionedEpochStakes { total_stake, node_id_to_vote_accounts: Arc::new(node_id_to_vote_accounts), epoch_authorized_voters: Arc::new(epoch_authorized_voters), + bls_pubkey_to_rank_map: OnceLock::new(), } } @@ -105,6 +174,19 @@ impl VersionedEpochStakes { } } + pub fn bls_pubkey_to_rank_map(&self) -> &Arc { + match self { + Self::Current { + bls_pubkey_to_rank_map, + .. + } => bls_pubkey_to_rank_map.get_or_init(|| { + Arc::new(BLSPubkeyToRankMap::new( + self.stakes().vote_accounts().as_ref(), + )) + }), + } + } + pub fn vote_account_stake(&self, vote_account: &Pubkey) -> u64 { self.stakes() .vote_accounts() @@ -156,8 +238,15 @@ impl VersionedEpochStakes { #[cfg(test)] pub(crate) mod tests { use { - super::*, solana_account::AccountSharedData, solana_vote::vote_account::VoteAccount, - solana_vote_program::vote_state::create_account_with_authorized, std::iter, + super::*, + solana_account::AccountSharedData, + solana_bls_signatures::keypair::Keypair as BLSKeypair, + solana_vote::vote_account::VoteAccount, + solana_vote_program::vote_state::{ + create_account_with_authorized, create_v4_account_with_authorized, + }, + std::iter, + test_case::test_case, }; struct VoteAccountInfo { @@ -169,6 +258,7 @@ pub(crate) mod tests { fn new_vote_accounts( num_nodes: usize, num_vote_accounts_per_node: usize, + is_alpenglow: bool, ) -> HashMap> { // Create some vote accounts for each pubkey (0..num_nodes) @@ -178,15 +268,35 @@ pub(crate) mod tests { node_id, iter::repeat_with(|| { let authorized_voter = solana_pubkey::new_rand(); - VoteAccountInfo { - vote_account: solana_pubkey::new_rand(), - account: create_account_with_authorized( + let bls_pubkey_compressed: BLSPubkeyCompressed = + BLSKeypair::new().public.try_into().unwrap(); + let bls_pubkey_compressed_serialized = + bincode::serialize(&bls_pubkey_compressed) + .unwrap() + .try_into() + .unwrap(); + + let account = if is_alpenglow { + create_v4_account_with_authorized( &node_id, &authorized_voter, &node_id, + Some(bls_pubkey_compressed_serialized), 0, 100, - ), + ) + } else { + create_account_with_authorized( + &node_id, + &authorized_voter, + &node_id, + 0, + 100, + ) + }; + VoteAccountInfo { + vote_account: solana_pubkey::new_rand(), + account, authorized_voter, } }) @@ -213,13 +323,15 @@ pub(crate) mod tests { .collect() } - #[test] - fn test_parse_epoch_vote_accounts() { + #[test_case(true; "alpenglow")] + #[test_case(false; "towerbft")] + fn test_parse_epoch_vote_accounts(is_alpenglow: bool) { let stake_per_account = 100; let num_vote_accounts_per_node = 2; let num_nodes = 10; - let vote_accounts_map = new_vote_accounts(num_nodes, num_vote_accounts_per_node); + let vote_accounts_map = + new_vote_accounts(num_nodes, num_vote_accounts_per_node, is_alpenglow); let expected_authorized_voters: HashMap<_, _> = vote_accounts_map .iter() @@ -275,12 +387,14 @@ pub(crate) mod tests { ); } - #[test] - fn test_node_id_to_stake() { + #[test_case(true; "alpenglow")] + #[test_case(false; "towerbft")] + fn test_node_id_to_stake(is_alpenglow: bool) { let num_nodes = 10; let num_vote_accounts_per_node = 2; - let vote_accounts_map = new_vote_accounts(num_nodes, num_vote_accounts_per_node); + let vote_accounts_map = + new_vote_accounts(num_nodes, num_vote_accounts_per_node, is_alpenglow); let node_id_to_stake_map = vote_accounts_map .keys() .enumerate() @@ -299,4 +413,48 @@ pub(crate) mod tests { ); } } + + #[test_case(1; "single_vote_account")] + #[test_case(2; "multiple_vote_accounts")] + fn test_bls_pubkey_rank_map(num_vote_accounts_per_node: usize) { + solana_logger::setup(); + let num_nodes = 10; + let num_vote_accounts = num_nodes * num_vote_accounts_per_node; + + let vote_accounts_map = new_vote_accounts(num_nodes, num_vote_accounts_per_node, true); + let node_id_to_stake_map = vote_accounts_map + .keys() + .enumerate() + .map(|(index, node_id)| (*node_id, ((index + 1) * 100) as u64)) + .collect::>(); + let epoch_vote_accounts = new_epoch_vote_accounts(&vote_accounts_map, |node_id| { + *node_id_to_stake_map.get(node_id).unwrap() + }); + let epoch_stakes = VersionedEpochStakes::new_for_tests(epoch_vote_accounts.clone(), 0); + let bls_pubkey_to_rank_map = epoch_stakes.bls_pubkey_to_rank_map(); + assert_eq!(bls_pubkey_to_rank_map.len(), num_vote_accounts); + for (pubkey, (_, vote_account)) in epoch_vote_accounts { + let vote_state_view = vote_account.vote_state_view(); + let bls_pubkey_compressed = bincode::deserialize::( + &vote_state_view.bls_pubkey_compressed().unwrap(), + ) + .unwrap(); + let bls_pubkey = BLSPubkey::try_from(bls_pubkey_compressed).unwrap(); + let index = bls_pubkey_to_rank_map.get_rank(&bls_pubkey).unwrap(); + assert!(index >= &0 && index < &(num_vote_accounts as u16)); + assert_eq!( + bls_pubkey_to_rank_map.get_pubkey(*index as usize), + Some(&(pubkey, bls_pubkey)) + ); + } + + // Convert it to versioned and back, we should get the same rank map + let mut bank_epoch_stakes = HashMap::new(); + bank_epoch_stakes.insert(0, epoch_stakes.clone()); + let epoch_stakes = bank_epoch_stakes + .get(&0) + .expect("Epoch stakes should exist"); + let bls_pubkey_to_rank_map2 = epoch_stakes.bls_pubkey_to_rank_map(); + assert_eq!(bls_pubkey_to_rank_map2, bls_pubkey_to_rank_map); + } } diff --git a/runtime/src/genesis_utils.rs b/runtime/src/genesis_utils.rs index a2cf453b13375f..31c24b3906686a 100644 --- a/runtime/src/genesis_utils.rs +++ b/runtime/src/genesis_utils.rs @@ -2,11 +2,12 @@ use { agave_feature_set::{FeatureSet, FEATURE_NAMES}, log::*, solana_account::{Account, AccountSharedData}, + solana_cluster_type::ClusterType, solana_feature_gate_interface::{self as feature, Feature}, solana_fee_calculator::FeeRateGovernor, - solana_genesis_config::{ClusterType, GenesisConfig}, + solana_genesis_config::GenesisConfig, solana_keypair::Keypair, - solana_native_token::sol_to_lamports, + solana_native_token::LAMPORTS_PER_SOL, solana_pubkey::Pubkey, solana_rent::Rent, solana_seed_derivable::SeedDerivable, @@ -28,7 +29,7 @@ pub fn bootstrap_validator_stake_lamports() -> u64 { // Number of lamports automatically used for genesis accounts pub const fn genesis_sysvar_and_builtin_program_lamports() -> u64 { - const NUM_BUILTIN_PROGRAMS: u64 = 7; + const NUM_BUILTIN_PROGRAMS: u64 = 6; const NUM_PRECOMPILES: u64 = 2; const STAKE_HISTORY_MIN_BALANCE: u64 = 114_979_200; const CLOCK_SYSVAR_MIN_BALANCE: u64 = 1_169_280; @@ -246,8 +247,8 @@ pub fn deactivate_features( genesis_config.accounts.remove(deactivate_feature_pk); } else { warn!( - "Feature {:?} set for deactivation is not a known Feature public key", - deactivate_feature_pk + "Feature {deactivate_feature_pk:?} set for deactivation is not a known Feature \ + public key" ); } } @@ -308,7 +309,7 @@ pub fn create_genesis_config_with_leader_ex_no_features( let native_mint_account = solana_account::AccountSharedData::from(Account { owner: spl_generic_token::token::id(), data: spl_generic_token::token::native_mint::ACCOUNT_DATA.to_vec(), - lamports: sol_to_lamports(1.), + lamports: LAMPORTS_PER_SOL, executable: false, rent_epoch: 1, }); diff --git a/runtime/src/inflation_rewards/mod.rs b/runtime/src/inflation_rewards/mod.rs index 348b7cf2fbaf35..ff6bb9bcf8f828 100644 --- a/runtime/src/inflation_rewards/mod.rs +++ b/runtime/src/inflation_rewards/mod.rs @@ -7,9 +7,8 @@ use { }, solana_clock::Epoch, solana_instruction::error::InstructionError, - solana_stake_interface::error::StakeError, + solana_stake_interface::{error::StakeError, stake_history::StakeHistory}, solana_stake_program::stake_state::{Stake, StakeStateV2}, - solana_sysvar::stake_history::StakeHistory, solana_vote::vote_state_view::VoteStateView, }; @@ -22,17 +21,20 @@ struct CalculatedStakeRewards { new_credits_observed: u64, } -// utility function -// returns a tuple of (stakers_reward,voters_reward) +/// Redeems rewards for the given epoch, stake state and vote state. +/// Returns a tuple of: +/// * Stakers reward +/// * Voters reward +/// * Updated stake information pub fn redeem_rewards( rewarded_epoch: Epoch, - stake_state: &mut StakeStateV2, + stake_state: &StakeStateV2, vote_state: &VoteStateView, point_value: &PointValue, stake_history: &StakeHistory, inflation_point_calc_tracer: Option, new_rate_activation_epoch: Option, -) -> Result<(u64, u64), InstructionError> { +) -> Result<(u64, u64, Stake), InstructionError> { if let StakeStateV2::Stake(meta, stake, _stake_flags) = stake_state { if let Some(inflation_point_calc_tracer) = inflation_point_calc_tracer.as_ref() { inflation_point_calc_tracer( @@ -50,16 +52,17 @@ pub fn redeem_rewards( )); } + let mut stake = *stake; if let Some((stakers_reward, voters_reward)) = redeem_stake_rewards( rewarded_epoch, - stake, + &mut stake, point_value, vote_state, stake_history, inflation_point_calc_tracer, new_rate_activation_epoch, ) { - Ok((stakers_reward, voters_reward)) + Ok((stakers_reward, voters_reward, stake)) } else { Err(StakeError::NoCreditsToRedeem.into()) } @@ -255,7 +258,7 @@ fn commission_split(commission: u8, on: u64) -> (u64, u64, bool) { #[cfg(test)] mod tests { use { - self::points::null_tracer, super::*, solana_native_token::sol_to_lamports, + self::points::null_tracer, super::*, solana_native_token::LAMPORTS_PER_SOL, solana_pubkey::Pubkey, solana_stake_interface::state::Delegation, solana_vote_program::vote_state::VoteStateV3, test_case::test_case, }; @@ -676,7 +679,7 @@ mod tests { // bootstrap means fully-vested stake at epoch 0 with // 10_000_000 SOL is a big but not unreasaonable stake let stake = new_stake( - sol_to_lamports(10_000_000f64), + 10_000_000 * LAMPORTS_PER_SOL, &Pubkey::default(), &vote_state, u64::MAX, diff --git a/runtime/src/inflation_rewards/points.rs b/runtime/src/inflation_rewards/points.rs index 05a03b0ffd341d..d4bb93f122f157 100644 --- a/runtime/src/inflation_rewards/points.rs +++ b/runtime/src/inflation_rewards/points.rs @@ -4,8 +4,10 @@ use { solana_clock::Epoch, solana_instruction::error::InstructionError, solana_pubkey::Pubkey, - solana_stake_program::stake_state::{Delegation, Stake, StakeStateV2}, - solana_sysvar::stake_history::StakeHistory, + solana_stake_interface::{ + stake_history::StakeHistory, + state::{Delegation, Stake, StakeStateV2}, + }, solana_vote::vote_state_view::VoteStateView, std::cmp::Ordering, }; @@ -207,7 +209,7 @@ pub(crate) fn calculate_stake_points_and_credits( #[cfg(test)] mod tests { use { - super::*, solana_native_token::sol_to_lamports, + super::*, solana_native_token::LAMPORTS_PER_SOL, solana_vote_program::vote_state::VoteStateV3, }; @@ -230,7 +232,7 @@ mod tests { // bootstrap means fully-vested stake at epoch 0 with // 10_000_000 SOL is a big but not unreasonable stake let stake = new_stake( - sol_to_lamports(10_000_000f64), + 10_000_000 * LAMPORTS_PER_SOL, &Pubkey::default(), &vote_state, u64::MAX, diff --git a/runtime/src/installed_scheduler_pool.rs b/runtime/src/installed_scheduler_pool.rs index 9977a6baed2fc1..a596cc2bf51c3a 100644 --- a/runtime/src/installed_scheduler_pool.rs +++ b/runtime/src/installed_scheduler_pool.rs @@ -27,7 +27,7 @@ use { solana_clock::Slot, solana_hash::Hash, solana_runtime_transaction::runtime_transaction::RuntimeTransaction, - solana_timings::ExecuteTimings, + solana_svm_timings::ExecuteTimings, solana_transaction::sanitized::SanitizedTransaction, solana_transaction_error::{TransactionError, TransactionResult as Result}, solana_unified_scheduler_logic::SchedulingMode, @@ -614,11 +614,12 @@ impl BankWithSchedulerInner { // unconditional context construction for verification is okay here. let context = SchedulingContext::for_verification(self.bank.clone()); let mut scheduler = self.scheduler.write().unwrap(); - trace!("with_active_scheduler: {:?}", scheduler); + trace!("with_active_scheduler: {scheduler:?}"); scheduler.transition_from_stale_to_active(|pool, result_with_timings| { let scheduler = pool.take_resumed_scheduler(context, result_with_timings); info!( - "with_active_scheduler: bank (slot: {}) got active, taking scheduler (id: {})", + "with_active_scheduler: bank (slot: {}) got active, taking scheduler (id: \ + {})", self.bank.slot(), scheduler.id(), ); @@ -674,7 +675,7 @@ impl BankWithSchedulerInner { ); (pool, result_with_timings) }); - trace!("timeout_listener: {:?}", scheduler); + trace!("timeout_listener: {scheduler:?}"); }) } @@ -738,17 +739,15 @@ impl BankWithSchedulerInner { SchedulerStatus::Unavailable => (true, None), }; debug!( - "wait_for_scheduler_termination(slot: {}, reason: {:?}): noop: {:?}, result: {:?} at {:?}...", + "wait_for_scheduler_termination(slot: {}, reason: {:?}): noop: {:?}, result: {:?} at \ + {:?}...", bank.slot(), reason, was_noop, result_with_timings.as_ref().map(|(result, _)| result), thread::current(), ); - trace!( - "wait_for_scheduler_termination(result_with_timings: {:?})", - result_with_timings, - ); + trace!("wait_for_scheduler_termination(result_with_timings: {result_with_timings:?})",); result_with_timings } @@ -756,7 +755,8 @@ impl BankWithSchedulerInner { fn drop_scheduler(&self) { if thread::panicking() { error!( - "BankWithSchedulerInner::drop_scheduler(): slot: {} skipping due to already panicking...", + "BankWithSchedulerInner::drop_scheduler(): slot: {} skipping due to already \ + panicking...", self.bank.slot(), ); return; @@ -768,7 +768,8 @@ impl BankWithSchedulerInner { .map(|(result, _timings)| result) { warn!( - "BankWithSchedulerInner::drop_scheduler(): slot: {} discarding error from scheduler: {:?}", + "BankWithSchedulerInner::drop_scheduler(): slot: {} discarding error from \ + scheduler: {:?}", self.bank.slot(), err, ); diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index fdf00e6d6d1901..2060c6e357322c 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -18,6 +18,7 @@ pub mod loader_utils; pub mod non_circulating_supply; pub mod prioritization_fee; pub mod prioritization_fee_cache; +pub mod rent_collector; pub mod runtime_config; pub mod serde_snapshot; pub mod snapshot_archive_info; diff --git a/runtime/src/non_circulating_supply.rs b/runtime/src/non_circulating_supply.rs index b931dc51fa0ffb..c6c71ca4d48a6e 100644 --- a/runtime/src/non_circulating_supply.rs +++ b/runtime/src/non_circulating_supply.rs @@ -218,8 +218,9 @@ mod tests { super::*, crate::genesis_utils::genesis_sysvar_and_builtin_program_lamports, solana_account::{Account, AccountSharedData}, + solana_cluster_type::ClusterType, solana_epoch_schedule::EpochSchedule, - solana_genesis_config::{ClusterType, GenesisConfig}, + solana_genesis_config::GenesisConfig, solana_stake_interface::state::{Authorized, Lockup, Meta}, std::{collections::BTreeMap, sync::Arc}, }; diff --git a/runtime/src/prioritization_fee_cache.rs b/runtime/src/prioritization_fee_cache.rs index 6d63e040a5c122..790ed0d8a2a387 100644 --- a/runtime/src/prioritization_fee_cache.rs +++ b/runtime/src/prioritization_fee_cache.rs @@ -266,10 +266,7 @@ impl PrioritizationFeeCache { writable_accounts, }) .unwrap_or_else(|err| { - warn!( - "prioritization fee cache transaction updates failed: {:?}", - err - ); + warn!("prioritization fee cache transaction updates failed: {err:?}"); }); } }); @@ -284,10 +281,7 @@ impl PrioritizationFeeCache { self.sender .send(CacheServiceUpdate::BankFinalized { slot, bank_id }) .unwrap_or_else(|err| { - warn!( - "prioritization fee cache signalling bank frozen failed: {:?}", - err - ) + warn!("prioritization fee cache signalling bank frozen failed: {err:?}") }); } @@ -345,15 +339,15 @@ impl PrioritizationFeeCache { // It should be rare that optimistically confirmed bank had no prioritized // transactions, but duplicated and unconfirmed bank had. if pre_purge_bank_count > 0 && post_purge_bank_count == 0 { - warn!("Finalized bank has empty prioritization fee cache. slot {slot} bank id {bank_id}"); + warn!( + "Finalized bank has empty prioritization fee cache. slot {slot} bank id \ + {bank_id}" + ); } if let Some(prioritization_fee) = &mut prioritization_fee { if let Err(err) = prioritization_fee.mark_block_completed() { - error!( - "Unsuccessful finalizing slot {slot}, bank ID {bank_id}: {:?}", - err - ); + error!("Unsuccessful finalizing slot {slot}, bank ID {bank_id}: {err:?}"); } prioritization_fee.report_metrics(slot); } diff --git a/runtime/src/rent_collector.rs b/runtime/src/rent_collector.rs new file mode 100644 index 00000000000000..7f96af39b120f8 --- /dev/null +++ b/runtime/src/rent_collector.rs @@ -0,0 +1,48 @@ +use { + solana_clock::Epoch, solana_epoch_schedule::EpochSchedule, + solana_genesis_config::GenesisConfig, solana_rent::Rent, +}; + +#[cfg_attr(feature = "frozen-abi", derive(solana_frozen_abi_macro::AbiExample))] +#[derive(Clone, Debug, PartialEq, serde_derive::Deserialize, serde_derive::Serialize)] +pub struct RentCollector { + pub epoch: Epoch, + pub epoch_schedule: EpochSchedule, + pub slots_per_year: f64, + pub rent: Rent, +} + +impl Default for RentCollector { + fn default() -> Self { + Self { + epoch: Epoch::default(), + epoch_schedule: EpochSchedule::default(), + // derive default value using GenesisConfig::default() + slots_per_year: GenesisConfig::default().slots_per_year(), + rent: Rent::default(), + } + } +} + +impl RentCollector { + pub(crate) fn new( + epoch: Epoch, + epoch_schedule: EpochSchedule, + slots_per_year: f64, + rent: Rent, + ) -> Self { + Self { + epoch, + epoch_schedule, + slots_per_year, + rent, + } + } + + pub(crate) fn clone_with_epoch(&self, epoch: Epoch) -> Self { + Self { + epoch, + ..self.clone() + } + } +} diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index 7de4df5e637d1e..8ae2ceacd4763f 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -4,6 +4,7 @@ use { crate::{ bank::{Bank, BankFieldsToDeserialize, BankFieldsToSerialize, BankHashStats, BankRc}, epoch_stakes::VersionedEpochStakes, + rent_collector::RentCollector, runtime_config::RuntimeConfig, snapshot_utils::{SnapshotError, StorageAndNextAccountsFileId}, stake_account::StakeAccount, @@ -16,7 +17,7 @@ use { accounts::Accounts, accounts_db::{ AccountStorageEntry, AccountsDb, AccountsDbConfig, AccountsFileId, - AtomicAccountsFileId, DuplicatesLtHash, IndexGenerationInfo, + AtomicAccountsFileId, IndexGenerationInfo, }, accounts_file::{AccountsFile, StorageAccess}, accounts_hash::AccountsLtHash, @@ -24,7 +25,6 @@ use { ancestors::AncestorsForSerialization, blockhash_queue::BlockhashQueue, }, - solana_builtins::prototype::BuiltinPrototype, solana_clock::{Epoch, Slot, UnixTimestamp}, solana_epoch_schedule::EpochSchedule, solana_fee_calculator::{FeeCalculator, FeeRateGovernor}, @@ -35,7 +35,6 @@ use { solana_lattice_hash::lt_hash::LtHash, solana_measure::measure::Measure, solana_pubkey::Pubkey, - solana_rent_collector::RentCollector, solana_serde::default_on_eof, solana_stake_interface::state::Delegation, std::{ @@ -48,19 +47,22 @@ use { atomic::{AtomicBool, AtomicUsize, Ordering}, Arc, }, - thread::Builder, time::Instant, }, storage::SerializableStorage, types::SerdeAccountsLtHash, }; +mod status_cache; mod storage; mod tests; mod types; mod utils; -pub(crate) use storage::{SerializableAccountStorageEntry, SerializedAccountsFileId}; +pub(crate) use { + status_cache::{deserialize_status_cache, serialize_status_cache}, + storage::{SerializableAccountStorageEntry, SerializedAccountsFileId}, +}; const MAX_STREAM_SIZE: u64 = 32 * 1024 * 1024 * 1024; @@ -340,8 +342,14 @@ impl SnapshotAccountsDbFields { // There must not be any overlap in the slots of storages between the full snapshot and the incremental snapshot incremental_snapshot_storages .iter() - .all(|storage_entry| !full_snapshot_storages.contains_key(storage_entry.0)).then_some(()).ok_or_else(|| { - io::Error::new(io::ErrorKind::InvalidData, "Snapshots are incompatible: There are storages for the same slot in both the full snapshot and the incremental snapshot!") + .all(|storage_entry| !full_snapshot_storages.contains_key(storage_entry.0)) + .then_some(()) + .ok_or_else(|| { + io::Error::new( + io::ErrorKind::InvalidData, + "Snapshots are incompatible: There are storages for the same slot in \ + both the full snapshot and the incremental snapshot!", + ) })?; let mut combined_storages = full_snapshot_storages; @@ -435,9 +443,7 @@ where let deserializable_bank = deserialize_from::<_, DeserializableVersionedBank>(&mut stream)?; if !deserializable_bank.unused_epoch_stakes.is_empty() { return Err(Box::new(bincode::ErrorKind::Custom( - "Expected deserialized bank's unused_epoch_stakes field \ - to be empty" - .to_string(), + "Expected deserialized bank's unused_epoch_stakes field to be empty".to_string(), ))); } let mut bank_fields = BankFieldsToDeserialize::from(deserializable_bank); @@ -447,7 +453,7 @@ where // Process extra fields let ExtraFieldsToDeserialize { lamports_per_signature, - _obsolete_incremental_snapshot_persistence: _incremental_snapshot_persistence, + _obsolete_incremental_snapshot_persistence, _obsolete_epoch_accounts_hash, versioned_epoch_stakes, accounts_lt_hash, @@ -464,24 +470,6 @@ where Ok((bank_fields, accounts_db_fields)) } -/// used by tests to compare contents of serialized bank fields -/// serialized format is not deterministic - likely due to randomness in structs like hashmaps -#[cfg(feature = "dev-context-only-utils")] -pub(crate) fn compare_two_serialized_banks( - path1: impl AsRef, - path2: impl AsRef, -) -> std::result::Result { - use std::fs::File; - let file1 = File::open(path1)?; - let mut stream1 = BufReader::new(file1); - let file2 = File::open(path2)?; - let mut stream2 = BufReader::new(file2); - - let fields1 = deserialize_bank_fields(&mut stream1)?; - let fields2 = deserialize_bank_fields(&mut stream2)?; - Ok(fields1 == fields2) -} - /// Get snapshot storage lengths from accounts_db_fields pub(crate) fn snapshot_storage_lengths_from_fields( accounts_db_fields: &AccountsDbFields, @@ -547,7 +535,9 @@ pub(crate) fn fields_from_streams( /// This struct contains side-info while reconstructing the bank from streams #[derive(Debug)] pub struct BankFromStreamsInfo { - pub duplicates_lt_hash: Option>, + /// The accounts lt hash calculated during index generation. + /// Will be used when verifying accounts, after rebuilding a Bank. + pub calculated_accounts_lt_hash: AccountsLtHash, } #[allow(clippy::too_many_arguments)] @@ -559,10 +549,9 @@ pub(crate) fn bank_from_streams( genesis_config: &GenesisConfig, runtime_config: &RuntimeConfig, debug_keys: Option>>, - additional_builtins: Option<&[BuiltinPrototype]>, limit_load_slot_count_from_snapshot: Option, verify_index: bool, - accounts_db_config: Option, + accounts_db_config: AccountsDbConfig, accounts_update_notifier: Option, exit: Arc, ) -> std::result::Result<(Bank, BankFromStreamsInfo), Error> @@ -578,7 +567,6 @@ where account_paths, storage_and_next_append_vec_id, debug_keys, - additional_builtins, limit_load_slot_count_from_snapshot, verify_index, accounts_db_config, @@ -588,7 +576,7 @@ where Ok(( bank, BankFromStreamsInfo { - duplicates_lt_hash: info.duplicates_lt_hash, + calculated_accounts_lt_hash: info.calculated_accounts_lt_hash, }, )) } @@ -803,7 +791,9 @@ impl solana_frozen_abi::abi_example::TransparentAsHelper for SerializableAccount /// This struct contains side-info while reconstructing the bank from fields #[derive(Debug)] pub(crate) struct ReconstructedBankInfo { - pub(crate) duplicates_lt_hash: Option>, + /// The accounts lt hash calculated during index generation. + /// Will be used when verifying accounts, after rebuilding a Bank. + pub(crate) calculated_accounts_lt_hash: AccountsLtHash, } #[allow(clippy::too_many_arguments)] @@ -815,10 +805,9 @@ pub(crate) fn reconstruct_bank_from_fields( account_paths: &[PathBuf], storage_and_next_append_vec_id: StorageAndNextAccountsFileId, debug_keys: Option>>, - additional_builtins: Option<&[BuiltinPrototype]>, limit_load_slot_count_from_snapshot: Option, verify_index: bool, - accounts_db_config: Option, + accounts_db_config: AccountsDbConfig, accounts_update_notifier: Option, exit: Arc, ) -> Result<(Bank, ReconstructedBankInfo), Error> @@ -841,16 +830,12 @@ where let bank_rc = BankRc::new(Accounts::new(Arc::new(accounts_db))); let runtime_config = Arc::new(runtime_config.clone()); - // if limit_load_slot_count_from_snapshot is set, then we need to side-step some correctness checks beneath this call - let debug_do_not_add_builtins = limit_load_slot_count_from_snapshot.is_some(); - let bank = Bank::new_from_fields( + let bank = Bank::new_from_snapshot( bank_rc, genesis_config, runtime_config, bank_fields, debug_keys, - additional_builtins, - debug_do_not_add_builtins, reconstructed_accounts_db_info.accounts_data_len, ); @@ -858,7 +843,7 @@ where Ok(( bank, ReconstructedBankInfo { - duplicates_lt_hash: reconstructed_accounts_db_info.duplicates_lt_hash, + calculated_accounts_lt_hash: reconstructed_accounts_db_info.calculated_accounts_lt_hash, }, )) } @@ -979,10 +964,12 @@ pub(crate) fn remap_and_reconstruct_single_storage( } /// This struct contains side-info while reconstructing the accounts DB from fields. -#[derive(Debug, Default, Clone)] +#[derive(Debug)] pub struct ReconstructedAccountsDbInfo { pub accounts_data_len: u64, - pub duplicates_lt_hash: Option>, + /// The accounts lt hash calculated during index generation. + /// Will be used when verifying accounts, after rebuilding a Bank. + pub calculated_accounts_lt_hash: AccountsLtHash, pub bank_hash_stats: BankHashStats, } @@ -993,7 +980,7 @@ fn reconstruct_accountsdb_from_fields( storage_and_next_append_vec_id: StorageAndNextAccountsFileId, limit_load_slot_count_from_snapshot: Option, verify_index: bool, - accounts_db_config: Option, + accounts_db_config: AccountsDbConfig, accounts_update_notifier: Option, exit: Arc, ) -> Result<(AccountsDb, ReconstructedAccountsDbInfo), Error> @@ -1048,38 +1035,19 @@ where .write_version .fetch_add(snapshot_version, Ordering::Release); - let mut measure_notify = Measure::start("accounts_notify"); - - let accounts_db = Arc::new(accounts_db); - let accounts_db_clone = accounts_db.clone(); - let handle = Builder::new() - .name("solNfyAccRestor".to_string()) - .spawn(move || { - accounts_db_clone.notify_account_restore_from_snapshot(); - }) - .unwrap(); - info!("Building accounts index..."); let start = Instant::now(); let IndexGenerationInfo { accounts_data_len, - duplicates_lt_hash, + calculated_accounts_lt_hash, } = accounts_db.generate_index(limit_load_slot_count_from_snapshot, verify_index); info!("Building accounts index... Done in {:?}", start.elapsed()); - handle.join().unwrap(); - measure_notify.stop(); - - datapoint_info!( - "reconstruct_accountsdb_from_fields()", - ("accountsdb-notify-at-start-us", measure_notify.as_us(), i64), - ); - Ok(( - Arc::try_unwrap(accounts_db).unwrap(), + accounts_db, ReconstructedAccountsDbInfo { accounts_data_len, - duplicates_lt_hash, + calculated_accounts_lt_hash, bank_hash_stats: snapshot_bank_hash_info.stats, }, )) diff --git a/runtime/src/serde_snapshot/status_cache.rs b/runtime/src/serde_snapshot/status_cache.rs new file mode 100644 index 00000000000000..ec6fca95b89545 --- /dev/null +++ b/runtime/src/serde_snapshot/status_cache.rs @@ -0,0 +1,494 @@ +//! Serialize and deserialize the status cache for snapshots + +use { + crate::{bank::BankSlotDelta, snapshot_utils, status_cache::KeySlice}, + bincode::{self, Options as _}, + solana_clock::Slot, + solana_hash::Hash, + solana_instruction::error::InstructionError, + solana_transaction_error::TransactionError, + std::{ + collections::HashMap, + path::Path, + sync::{Arc, Mutex}, + }, +}; + +#[cfg_attr( + feature = "frozen-abi", + frozen_abi(digest = "AardUUq1At4qq6oNNp9V2JZFsMR5k54RZmBmZkxUfk7m") +)] +type SerdeBankSlotDelta = SerdeSlotDelta>; +type SerdeSlotDelta = (Slot, bool, SerdeStatus); +type SerdeStatus = ahash::HashMap)>; + +/// Serializes the status cache's `slot_deltas` to file at `status_cache_path` +/// +/// This fn serializes the status cache into the binary format requried by snapshots. +pub fn serialize_status_cache( + slot_deltas: &[BankSlotDelta], + status_cache_path: &Path, +) -> snapshot_utils::Result { + snapshot_utils::serialize_snapshot_data_file(status_cache_path, |stream| { + let snapshot_slot_deltas = slot_deltas + .iter() + .map(|slot_delta| { + let status_map = slot_delta.2.lock().unwrap(); + let snapshot_status_map = status_map + .iter() + .map(|(key, value)| { + ( + *key, + ( + value.0, + value + .1 + .iter() + .map(|(key_slice, result)| { + ( + *key_slice, + result.clone().map_err(SerdeTransactionError::from), + ) + }) + .collect::>(), + ), + ) + }) + .collect::>(); + (slot_delta.0, slot_delta.1, snapshot_status_map) + }) + .collect::>(); + bincode::serialize_into(stream, &snapshot_slot_deltas)?; + Ok(()) + }) +} + +/// Deserializes the status cache from file at `status_cache_path` +/// +/// This fn deserializes the status cache from a snapshot. +pub fn deserialize_status_cache( + status_cache_path: &Path, +) -> snapshot_utils::Result> { + snapshot_utils::deserialize_snapshot_data_file(status_cache_path, |stream| { + let snapshot_slot_deltas: Vec = bincode::options() + .with_limit(snapshot_utils::MAX_SNAPSHOT_DATA_FILE_SIZE) + .with_fixint_encoding() + .allow_trailing_bytes() + .deserialize_from(stream)?; + + let slot_deltas = snapshot_slot_deltas + .iter() + .map(|slot_delta| { + let status_map = slot_delta + .2 + .iter() + .map(|(key, value)| { + ( + *key, + ( + value.0, + value + .1 + .iter() + .map(|(key_slice, result)| { + (*key_slice, result.clone().map_err(TransactionError::from)) + }) + .collect::>(), + ), + ) + }) + .collect::>(); + (slot_delta.0, slot_delta.1, Arc::new(Mutex::new(status_map))) + }) + .collect::>(); + Ok(slot_deltas) + }) +} + +/// Copy of `TransactionError` that uses a different `InstructionError` type to +/// contain a string in the BorshIoError variant. +#[cfg_attr( + feature = "frozen-abi", + frozen_abi(digest = "5pMgydVNgsYbg64Trhjxbftsug5La7fRDmooyrsHd4wy"), + derive(AbiExample, AbiEnumVisitor) +)] +#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] +enum SerdeTransactionError { + AccountInUse, + AccountLoadedTwice, + AccountNotFound, + ProgramAccountNotFound, + InsufficientFundsForFee, + InvalidAccountForFee, + AlreadyProcessed, + BlockhashNotFound, + InstructionError(u8, SerdeInstructionError), + CallChainTooDeep, + MissingSignatureForFee, + InvalidAccountIndex, + SignatureFailure, + InvalidProgramForExecution, + SanitizeFailure, + ClusterMaintenance, + AccountBorrowOutstanding, + WouldExceedMaxBlockCostLimit, + UnsupportedVersion, + InvalidWritableAccount, + WouldExceedMaxAccountCostLimit, + WouldExceedAccountDataBlockLimit, + TooManyAccountLocks, + AddressLookupTableNotFound, + InvalidAddressLookupTableOwner, + InvalidAddressLookupTableData, + InvalidAddressLookupTableIndex, + InvalidRentPayingAccount, + WouldExceedMaxVoteCostLimit, + WouldExceedAccountDataTotalLimit, + DuplicateInstruction(u8), + InsufficientFundsForRent { account_index: u8 }, + MaxLoadedAccountsDataSizeExceeded, + InvalidLoadedAccountsDataSizeLimit, + ResanitizationNeeded, + ProgramExecutionTemporarilyRestricted { account_index: u8 }, + UnbalancedTransaction, + ProgramCacheHitMaxLimit, + CommitCancelled, +} + +impl From for SerdeTransactionError { + fn from(err: TransactionError) -> Self { + match err { + TransactionError::AccountInUse => Self::AccountInUse, + TransactionError::AccountLoadedTwice => Self::AccountLoadedTwice, + TransactionError::AccountNotFound => Self::AccountNotFound, + TransactionError::ProgramAccountNotFound => Self::ProgramAccountNotFound, + TransactionError::InsufficientFundsForFee => Self::InsufficientFundsForFee, + TransactionError::InvalidAccountForFee => Self::InvalidAccountForFee, + TransactionError::AlreadyProcessed => Self::AlreadyProcessed, + TransactionError::BlockhashNotFound => Self::BlockhashNotFound, + TransactionError::InstructionError(i, inner) => Self::InstructionError(i, inner.into()), + TransactionError::CallChainTooDeep => Self::CallChainTooDeep, + TransactionError::MissingSignatureForFee => Self::MissingSignatureForFee, + TransactionError::InvalidAccountIndex => Self::InvalidAccountIndex, + TransactionError::SignatureFailure => Self::SignatureFailure, + TransactionError::InvalidProgramForExecution => Self::InvalidProgramForExecution, + TransactionError::SanitizeFailure => Self::SanitizeFailure, + TransactionError::ClusterMaintenance => Self::ClusterMaintenance, + TransactionError::AccountBorrowOutstanding => Self::AccountBorrowOutstanding, + TransactionError::WouldExceedMaxBlockCostLimit => Self::WouldExceedMaxBlockCostLimit, + TransactionError::UnsupportedVersion => Self::UnsupportedVersion, + TransactionError::InvalidWritableAccount => Self::InvalidWritableAccount, + TransactionError::WouldExceedMaxAccountCostLimit => { + Self::WouldExceedMaxAccountCostLimit + } + TransactionError::WouldExceedAccountDataBlockLimit => { + Self::WouldExceedAccountDataBlockLimit + } + TransactionError::TooManyAccountLocks => Self::TooManyAccountLocks, + TransactionError::AddressLookupTableNotFound => Self::AddressLookupTableNotFound, + TransactionError::InvalidAddressLookupTableOwner => { + Self::InvalidAddressLookupTableOwner + } + TransactionError::InvalidAddressLookupTableData => Self::InvalidAddressLookupTableData, + TransactionError::InvalidAddressLookupTableIndex => { + Self::InvalidAddressLookupTableIndex + } + TransactionError::InvalidRentPayingAccount => Self::InvalidRentPayingAccount, + TransactionError::WouldExceedMaxVoteCostLimit => Self::WouldExceedMaxVoteCostLimit, + TransactionError::WouldExceedAccountDataTotalLimit => { + Self::WouldExceedAccountDataTotalLimit + } + TransactionError::DuplicateInstruction(i) => Self::DuplicateInstruction(i), + TransactionError::InsufficientFundsForRent { account_index } => { + Self::InsufficientFundsForRent { account_index } + } + TransactionError::MaxLoadedAccountsDataSizeExceeded => { + Self::MaxLoadedAccountsDataSizeExceeded + } + TransactionError::InvalidLoadedAccountsDataSizeLimit => { + Self::InvalidLoadedAccountsDataSizeLimit + } + TransactionError::ResanitizationNeeded => Self::ResanitizationNeeded, + TransactionError::ProgramExecutionTemporarilyRestricted { account_index } => { + Self::ProgramExecutionTemporarilyRestricted { account_index } + } + TransactionError::UnbalancedTransaction => Self::UnbalancedTransaction, + TransactionError::ProgramCacheHitMaxLimit => Self::ProgramCacheHitMaxLimit, + TransactionError::CommitCancelled => Self::CommitCancelled, + } + } +} + +impl From for TransactionError { + fn from(err: SerdeTransactionError) -> Self { + match err { + SerdeTransactionError::AccountInUse => Self::AccountInUse, + SerdeTransactionError::AccountLoadedTwice => Self::AccountLoadedTwice, + SerdeTransactionError::AccountNotFound => Self::AccountNotFound, + SerdeTransactionError::ProgramAccountNotFound => Self::ProgramAccountNotFound, + SerdeTransactionError::InsufficientFundsForFee => Self::InsufficientFundsForFee, + SerdeTransactionError::InvalidAccountForFee => Self::InvalidAccountForFee, + SerdeTransactionError::AlreadyProcessed => Self::AlreadyProcessed, + SerdeTransactionError::BlockhashNotFound => Self::BlockhashNotFound, + SerdeTransactionError::InstructionError(i, inner) => { + Self::InstructionError(i, inner.into()) + } + SerdeTransactionError::CallChainTooDeep => Self::CallChainTooDeep, + SerdeTransactionError::MissingSignatureForFee => Self::MissingSignatureForFee, + SerdeTransactionError::InvalidAccountIndex => Self::InvalidAccountIndex, + SerdeTransactionError::SignatureFailure => Self::SignatureFailure, + SerdeTransactionError::InvalidProgramForExecution => Self::InvalidProgramForExecution, + SerdeTransactionError::SanitizeFailure => Self::SanitizeFailure, + SerdeTransactionError::ClusterMaintenance => Self::ClusterMaintenance, + SerdeTransactionError::AccountBorrowOutstanding => Self::AccountBorrowOutstanding, + SerdeTransactionError::WouldExceedMaxBlockCostLimit => { + Self::WouldExceedMaxBlockCostLimit + } + SerdeTransactionError::UnsupportedVersion => Self::UnsupportedVersion, + SerdeTransactionError::InvalidWritableAccount => Self::InvalidWritableAccount, + SerdeTransactionError::WouldExceedMaxAccountCostLimit => { + Self::WouldExceedMaxAccountCostLimit + } + SerdeTransactionError::WouldExceedAccountDataBlockLimit => { + Self::WouldExceedAccountDataBlockLimit + } + SerdeTransactionError::TooManyAccountLocks => Self::TooManyAccountLocks, + SerdeTransactionError::AddressLookupTableNotFound => Self::AddressLookupTableNotFound, + SerdeTransactionError::InvalidAddressLookupTableOwner => { + Self::InvalidAddressLookupTableOwner + } + SerdeTransactionError::InvalidAddressLookupTableData => { + Self::InvalidAddressLookupTableData + } + SerdeTransactionError::InvalidAddressLookupTableIndex => { + Self::InvalidAddressLookupTableIndex + } + SerdeTransactionError::InvalidRentPayingAccount => Self::InvalidRentPayingAccount, + SerdeTransactionError::WouldExceedMaxVoteCostLimit => Self::WouldExceedMaxVoteCostLimit, + SerdeTransactionError::WouldExceedAccountDataTotalLimit => { + Self::WouldExceedAccountDataTotalLimit + } + SerdeTransactionError::DuplicateInstruction(i) => Self::DuplicateInstruction(i), + SerdeTransactionError::InsufficientFundsForRent { account_index } => { + Self::InsufficientFundsForRent { account_index } + } + SerdeTransactionError::MaxLoadedAccountsDataSizeExceeded => { + Self::MaxLoadedAccountsDataSizeExceeded + } + SerdeTransactionError::InvalidLoadedAccountsDataSizeLimit => { + Self::InvalidLoadedAccountsDataSizeLimit + } + SerdeTransactionError::ResanitizationNeeded => Self::ResanitizationNeeded, + SerdeTransactionError::ProgramExecutionTemporarilyRestricted { account_index } => { + Self::ProgramExecutionTemporarilyRestricted { account_index } + } + SerdeTransactionError::UnbalancedTransaction => Self::UnbalancedTransaction, + SerdeTransactionError::ProgramCacheHitMaxLimit => Self::ProgramCacheHitMaxLimit, + SerdeTransactionError::CommitCancelled => Self::CommitCancelled, + } + } +} + +/// Copy of `InstructionError` type in which the `BorshIoError` variant +/// contains a string. +#[cfg_attr(test, derive(strum_macros::FromRepr, strum_macros::EnumIter))] +#[cfg_attr(feature = "frozen-abi", derive(AbiExample, AbiEnumVisitor))] +#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] +enum SerdeInstructionError { + GenericError, + InvalidArgument, + InvalidInstructionData, + InvalidAccountData, + AccountDataTooSmall, + InsufficientFunds, + IncorrectProgramId, + MissingRequiredSignature, + AccountAlreadyInitialized, + UninitializedAccount, + UnbalancedInstruction, + ModifiedProgramId, + ExternalAccountLamportSpend, + ExternalAccountDataModified, + ReadonlyLamportChange, + ReadonlyDataModified, + DuplicateAccountIndex, + ExecutableModified, + RentEpochModified, + NotEnoughAccountKeys, + AccountDataSizeChanged, + AccountNotExecutable, + AccountBorrowFailed, + AccountBorrowOutstanding, + DuplicateAccountOutOfSync, + Custom(u32), + InvalidError, + ExecutableDataModified, + ExecutableLamportChange, + ExecutableAccountNotRentExempt, + UnsupportedProgramId, + CallDepth, + MissingAccount, + ReentrancyNotAllowed, + MaxSeedLengthExceeded, + InvalidSeeds, + InvalidRealloc, + ComputationalBudgetExceeded, + PrivilegeEscalation, + ProgramEnvironmentSetupFailure, + ProgramFailedToComplete, + ProgramFailedToCompile, + Immutable, + IncorrectAuthority, + BorshIoError(String), + AccountNotRentExempt, + InvalidAccountOwner, + ArithmeticOverflow, + UnsupportedSysvar, + IllegalOwner, + MaxAccountsDataAllocationsExceeded, + MaxAccountsExceeded, + MaxInstructionTraceLengthExceeded, + BuiltinProgramsMustConsumeComputeUnits, +} + +impl From for InstructionError { + fn from(err: SerdeInstructionError) -> Self { + match err { + SerdeInstructionError::GenericError => Self::GenericError, + SerdeInstructionError::InvalidArgument => Self::InvalidArgument, + SerdeInstructionError::InvalidInstructionData => Self::InvalidInstructionData, + SerdeInstructionError::InvalidAccountData => Self::InvalidAccountData, + SerdeInstructionError::AccountDataTooSmall => Self::AccountDataTooSmall, + SerdeInstructionError::InsufficientFunds => Self::InsufficientFunds, + SerdeInstructionError::IncorrectProgramId => Self::IncorrectProgramId, + SerdeInstructionError::MissingRequiredSignature => Self::MissingRequiredSignature, + SerdeInstructionError::AccountAlreadyInitialized => Self::AccountAlreadyInitialized, + SerdeInstructionError::UninitializedAccount => Self::UninitializedAccount, + SerdeInstructionError::UnbalancedInstruction => Self::UnbalancedInstruction, + SerdeInstructionError::ModifiedProgramId => Self::ModifiedProgramId, + SerdeInstructionError::ExternalAccountLamportSpend => Self::ExternalAccountLamportSpend, + SerdeInstructionError::ExternalAccountDataModified => Self::ExternalAccountDataModified, + SerdeInstructionError::ReadonlyLamportChange => Self::ReadonlyLamportChange, + SerdeInstructionError::ReadonlyDataModified => Self::ReadonlyDataModified, + SerdeInstructionError::DuplicateAccountIndex => Self::DuplicateAccountIndex, + SerdeInstructionError::ExecutableModified => Self::ExecutableModified, + SerdeInstructionError::RentEpochModified => Self::RentEpochModified, + SerdeInstructionError::NotEnoughAccountKeys => Self::NotEnoughAccountKeys, + SerdeInstructionError::AccountDataSizeChanged => Self::AccountDataSizeChanged, + SerdeInstructionError::AccountNotExecutable => Self::AccountNotExecutable, + SerdeInstructionError::AccountBorrowFailed => Self::AccountBorrowFailed, + SerdeInstructionError::AccountBorrowOutstanding => Self::AccountBorrowOutstanding, + SerdeInstructionError::DuplicateAccountOutOfSync => Self::DuplicateAccountOutOfSync, + SerdeInstructionError::Custom(n) => Self::Custom(n), + SerdeInstructionError::InvalidError => Self::InvalidError, + SerdeInstructionError::ExecutableDataModified => Self::ExecutableDataModified, + SerdeInstructionError::ExecutableLamportChange => Self::ExecutableLamportChange, + SerdeInstructionError::ExecutableAccountNotRentExempt => { + Self::ExecutableAccountNotRentExempt + } + SerdeInstructionError::UnsupportedProgramId => Self::UnsupportedProgramId, + SerdeInstructionError::CallDepth => Self::CallDepth, + SerdeInstructionError::MissingAccount => Self::MissingAccount, + SerdeInstructionError::ReentrancyNotAllowed => Self::ReentrancyNotAllowed, + SerdeInstructionError::MaxSeedLengthExceeded => Self::MaxSeedLengthExceeded, + SerdeInstructionError::InvalidSeeds => Self::InvalidSeeds, + SerdeInstructionError::InvalidRealloc => Self::InvalidRealloc, + SerdeInstructionError::ComputationalBudgetExceeded => Self::ComputationalBudgetExceeded, + SerdeInstructionError::PrivilegeEscalation => Self::PrivilegeEscalation, + SerdeInstructionError::ProgramEnvironmentSetupFailure => { + Self::ProgramEnvironmentSetupFailure + } + SerdeInstructionError::ProgramFailedToComplete => Self::ProgramFailedToComplete, + SerdeInstructionError::ProgramFailedToCompile => Self::ProgramFailedToCompile, + SerdeInstructionError::Immutable => Self::Immutable, + SerdeInstructionError::IncorrectAuthority => Self::IncorrectAuthority, + SerdeInstructionError::BorshIoError(_) => Self::BorshIoError, + SerdeInstructionError::AccountNotRentExempt => Self::AccountNotRentExempt, + SerdeInstructionError::InvalidAccountOwner => Self::InvalidAccountOwner, + SerdeInstructionError::ArithmeticOverflow => Self::ArithmeticOverflow, + SerdeInstructionError::UnsupportedSysvar => Self::UnsupportedSysvar, + SerdeInstructionError::IllegalOwner => Self::IllegalOwner, + SerdeInstructionError::MaxAccountsDataAllocationsExceeded => { + Self::MaxAccountsDataAllocationsExceeded + } + SerdeInstructionError::MaxAccountsExceeded => Self::MaxAccountsExceeded, + SerdeInstructionError::MaxInstructionTraceLengthExceeded => { + Self::MaxInstructionTraceLengthExceeded + } + SerdeInstructionError::BuiltinProgramsMustConsumeComputeUnits => { + Self::BuiltinProgramsMustConsumeComputeUnits + } + } + } +} + +impl From for SerdeInstructionError { + fn from(err: InstructionError) -> Self { + match err { + InstructionError::GenericError => Self::GenericError, + InstructionError::InvalidArgument => Self::InvalidArgument, + InstructionError::InvalidInstructionData => Self::InvalidInstructionData, + InstructionError::InvalidAccountData => Self::InvalidAccountData, + InstructionError::AccountDataTooSmall => Self::AccountDataTooSmall, + InstructionError::InsufficientFunds => Self::InsufficientFunds, + InstructionError::IncorrectProgramId => Self::IncorrectProgramId, + InstructionError::MissingRequiredSignature => Self::MissingRequiredSignature, + InstructionError::AccountAlreadyInitialized => Self::AccountAlreadyInitialized, + InstructionError::UninitializedAccount => Self::UninitializedAccount, + InstructionError::UnbalancedInstruction => Self::UnbalancedInstruction, + InstructionError::ModifiedProgramId => Self::ModifiedProgramId, + InstructionError::ExternalAccountLamportSpend => Self::ExternalAccountLamportSpend, + InstructionError::ExternalAccountDataModified => Self::ExternalAccountDataModified, + InstructionError::ReadonlyLamportChange => Self::ReadonlyLamportChange, + InstructionError::ReadonlyDataModified => Self::ReadonlyDataModified, + InstructionError::DuplicateAccountIndex => Self::DuplicateAccountIndex, + InstructionError::ExecutableModified => Self::ExecutableModified, + InstructionError::RentEpochModified => Self::RentEpochModified, + InstructionError::NotEnoughAccountKeys => Self::NotEnoughAccountKeys, + InstructionError::AccountDataSizeChanged => Self::AccountDataSizeChanged, + InstructionError::AccountNotExecutable => Self::AccountNotExecutable, + InstructionError::AccountBorrowFailed => Self::AccountBorrowFailed, + InstructionError::AccountBorrowOutstanding => Self::AccountBorrowOutstanding, + InstructionError::DuplicateAccountOutOfSync => Self::DuplicateAccountOutOfSync, + InstructionError::Custom(n) => Self::Custom(n), + InstructionError::InvalidError => Self::InvalidError, + InstructionError::ExecutableDataModified => Self::ExecutableDataModified, + InstructionError::ExecutableLamportChange => Self::ExecutableLamportChange, + InstructionError::ExecutableAccountNotRentExempt => { + Self::ExecutableAccountNotRentExempt + } + InstructionError::UnsupportedProgramId => Self::UnsupportedProgramId, + InstructionError::CallDepth => Self::CallDepth, + InstructionError::MissingAccount => Self::MissingAccount, + InstructionError::ReentrancyNotAllowed => Self::ReentrancyNotAllowed, + InstructionError::MaxSeedLengthExceeded => Self::MaxSeedLengthExceeded, + InstructionError::InvalidSeeds => Self::InvalidSeeds, + InstructionError::InvalidRealloc => Self::InvalidRealloc, + InstructionError::ComputationalBudgetExceeded => Self::ComputationalBudgetExceeded, + InstructionError::PrivilegeEscalation => Self::PrivilegeEscalation, + InstructionError::ProgramEnvironmentSetupFailure => { + Self::ProgramEnvironmentSetupFailure + } + InstructionError::ProgramFailedToComplete => Self::ProgramFailedToComplete, + InstructionError::ProgramFailedToCompile => Self::ProgramFailedToCompile, + InstructionError::Immutable => Self::Immutable, + InstructionError::IncorrectAuthority => Self::IncorrectAuthority, + InstructionError::BorshIoError => Self::BorshIoError(String::new()), + InstructionError::AccountNotRentExempt => Self::AccountNotRentExempt, + InstructionError::InvalidAccountOwner => Self::InvalidAccountOwner, + InstructionError::ArithmeticOverflow => Self::ArithmeticOverflow, + InstructionError::UnsupportedSysvar => Self::UnsupportedSysvar, + InstructionError::IllegalOwner => Self::IllegalOwner, + InstructionError::MaxAccountsDataAllocationsExceeded => { + Self::MaxAccountsDataAllocationsExceeded + } + InstructionError::MaxAccountsExceeded => Self::MaxAccountsExceeded, + InstructionError::MaxInstructionTraceLengthExceeded => { + Self::MaxInstructionTraceLengthExceeded + } + InstructionError::BuiltinProgramsMustConsumeComputeUnits => { + Self::BuiltinProgramsMustConsumeComputeUnits + } + } + } +} diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index d71aaada0f3070..8fa44d10a51e86 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -18,8 +18,8 @@ mod serde_snapshot_tests { account_storage_reader::AccountStorageReader, accounts::Accounts, accounts_db::{ - get_temp_accounts_paths, test_utils::create_test_accounts, AccountStorageEntry, - AccountsDb, AtomicAccountsFileId, + get_temp_accounts_paths, AccountStorageEntry, AccountsDb, AccountsDbConfig, + AtomicAccountsFileId, MarkObsoleteAccounts, ACCOUNTS_DB_CONFIG_FOR_TESTING, }, accounts_file::{AccountsFile, AccountsFileError, StorageAccess}, ancestors::Ancestors, @@ -39,7 +39,7 @@ mod serde_snapshot_tests { }, }, tempfile::TempDir, - test_case::test_case, + test_case::{test_case, test_matrix}, }; fn linear_ancestors(end_slot: u64) -> Ancestors { @@ -54,6 +54,7 @@ mod serde_snapshot_tests { stream: &mut BufReader, account_paths: &[PathBuf], storage_and_next_append_vec_id: StorageAndNextAccountsFileId, + accounts_db_config: AccountsDbConfig, ) -> Result where R: Read, @@ -70,7 +71,7 @@ mod serde_snapshot_tests { storage_and_next_append_vec_id, None, false, - Some(solana_accounts_db::accounts_db::ACCOUNTS_DB_CONFIG_FOR_TESTING), + accounts_db_config, None, Arc::default(), ) @@ -81,11 +82,17 @@ mod serde_snapshot_tests { stream: &mut BufReader, account_paths: &[PathBuf], storage_and_next_append_vec_id: StorageAndNextAccountsFileId, + accounts_db_config: AccountsDbConfig, ) -> Result where R: Read, { - context_accountsdb_from_stream::(stream, account_paths, storage_and_next_append_vec_id) + context_accountsdb_from_stream::( + stream, + account_paths, + storage_and_next_append_vec_id, + accounts_db_config, + ) } fn accountsdb_to_stream( @@ -152,6 +159,7 @@ mod serde_snapshot_tests { accounts: &AccountsDb, slot: Slot, storage_access: StorageAccess, + accounts_db_config: AccountsDbConfig, ) -> AccountsDb { let mut writer = Cursor::new(vec![]); let snapshot_storages = accounts.get_storages(..=slot).0; @@ -170,8 +178,13 @@ mod serde_snapshot_tests { // Simulate obtaining a copy of the AppendVecs from a tarball let storage_and_next_append_vec_id = copy_append_vecs(accounts, copied_accounts.path(), storage_access).unwrap(); - let mut accounts_db = - accountsdb_from_stream(&mut reader, &[], storage_and_next_append_vec_id).unwrap(); + let mut accounts_db = accountsdb_from_stream( + &mut reader, + &[], + storage_and_next_append_vec_id, + accounts_db_config, + ) + .unwrap(); // The append vecs will be used from `copied_accounts` directly by the new AccountsDb so keep // its TempDir alive @@ -205,8 +218,13 @@ mod serde_snapshot_tests { let accounts = Accounts::new(Arc::new(accounts_db)); let slot = 0; - let mut pubkeys: Vec = vec![]; - create_test_accounts(&accounts, &mut pubkeys, 100, slot); + let pubkeys: Vec<_> = std::iter::repeat_with(solana_pubkey::new_rand) + .take(100) + .collect(); + for (i, pubkey) in pubkeys.iter().enumerate() { + let account = AccountSharedData::new(i as u64 + 1, 0, &Pubkey::default()); + accounts.store_accounts_seq((slot, [(pubkey, &account)].as_slice()), None); + } check_accounts_local(&accounts, &pubkeys, 100); accounts.accounts_db.add_root_and_flush_write_cache(slot); let accounts_hash = accounts @@ -240,6 +258,7 @@ mod serde_snapshot_tests { &mut reader, &daccounts_paths, storage_and_next_append_vec_id, + ACCOUNTS_DB_CONFIG_FOR_TESTING, ) .unwrap(), )); @@ -259,7 +278,7 @@ mod serde_snapshot_tests { let db = AccountsDb::new_single_for_tests(); let key = solana_pubkey::new_rand(); let account0 = AccountSharedData::new(1, 0, &key); - db.store_for_tests(unrooted_slot, &[(&key, &account0)]); + db.store_for_tests((unrooted_slot, [(&key, &account0)].as_slice())); // Purge the slot db.remove_unrooted_slots(&[(unrooted_slot, unrooted_bank_id)]); @@ -267,11 +286,16 @@ mod serde_snapshot_tests { // Add a new root let key2 = solana_pubkey::new_rand(); let new_root = unrooted_slot + 1; - db.store_for_tests(new_root, &[(&key2, &account0)]); + db.store_for_tests((new_root, [(&key2, &account0)].as_slice())); db.add_root_and_flush_write_cache(new_root); // Simulate reconstruction from snapshot - let db = reconstruct_accounts_db_via_serialization(&db, new_root, storage_access); + let db = reconstruct_accounts_db_via_serialization( + &db, + new_root, + storage_access, + ACCOUNTS_DB_CONFIG_FOR_TESTING, + ); // Check root account exists db.assert_load_account(new_root, key2, 1); @@ -283,12 +307,27 @@ mod serde_snapshot_tests { .is_none()); } - #[test_case(StorageAccess::Mmap)] - #[test_case(StorageAccess::File)] - fn test_accounts_db_serialize1(storage_access: StorageAccess) { + #[test_matrix( + [StorageAccess::File, StorageAccess::Mmap], + [MarkObsoleteAccounts::Enabled, MarkObsoleteAccounts::Disabled], + [MarkObsoleteAccounts::Enabled, MarkObsoleteAccounts::Disabled] + )] + fn test_accounts_db_serialize1( + storage_access: StorageAccess, + mark_obsolete_accounts_initial: MarkObsoleteAccounts, + mark_obsolete_accounts_restore: MarkObsoleteAccounts, + ) { for pass in 0..2 { solana_logger::setup(); - let accounts = AccountsDb::new_single_for_tests(); + let accounts = AccountsDb::new_with_config( + Vec::new(), + AccountsDbConfig { + mark_obsolete_accounts: mark_obsolete_accounts_initial, + ..ACCOUNTS_DB_CONFIG_FOR_TESTING + }, + None, + Arc::default(), + ); let mut pubkeys: Vec = vec![]; // Create 100 accounts in slot 0 @@ -318,7 +357,7 @@ mod serde_snapshot_tests { // Overwrite account 30 from slot 0 with lamports=0 into slot 1. // Slot 1 should now have 10 + 1 = 11 accounts let account = AccountSharedData::new(0, 0, AccountSharedData::default().owner()); - accounts.store_for_tests(latest_slot, &[(&pubkeys[30], &account)]); + accounts.store_for_tests((latest_slot, [(&pubkeys[30], &account)].as_slice())); // Create 10 new accounts in slot 1, should now have 11 + 10 = 21 // accounts @@ -337,7 +376,7 @@ mod serde_snapshot_tests { // Overwrite account 31 from slot 0 with lamports=0 into slot 2. // Slot 2 should now have 20 + 1 = 21 accounts let account = AccountSharedData::new(0, 0, AccountSharedData::default().owner()); - accounts.store_for_tests(latest_slot, &[(&pubkeys[31], &account)]); + accounts.store_for_tests((latest_slot, [(&pubkeys[31], &account)].as_slice())); // Create 10 new accounts in slot 2. Slot 2 should now have // 21 + 10 = 31 accounts @@ -358,8 +397,16 @@ mod serde_snapshot_tests { accounts.check_storage(1, 11, 21); accounts.check_storage(2, 31, 31); - let daccounts = - reconstruct_accounts_db_via_serialization(&accounts, latest_slot, storage_access); + let accounts_db_config = AccountsDbConfig { + mark_obsolete_accounts: mark_obsolete_accounts_restore, + ..ACCOUNTS_DB_CONFIG_FOR_TESTING + }; + let daccounts = reconstruct_accounts_db_via_serialization( + &accounts, + latest_slot, + storage_access, + accounts_db_config, + ); assert_eq!( daccounts.write_version.load(Ordering::Acquire), @@ -371,8 +418,23 @@ mod serde_snapshot_tests { // Don't check the first 35 accounts which have not been modified on slot 0 daccounts.check_accounts(&pubkeys[35..], 0, 65, 37); daccounts.check_accounts(&pubkeys1, 1, 10, 1); - daccounts.check_storage(0, 100, 100); - daccounts.check_storage(1, 21, 21); + + // If accounts are marked obsolete at initial save time, then the accounts will be + // shrunk during snapshot archive + if mark_obsolete_accounts_initial == MarkObsoleteAccounts::Enabled { + daccounts.check_storage(0, 78, 78); + daccounts.check_storage(1, 11, 11); + // If accounts are marked obsolete at restore time, then the accounts will be marked + // obsolete and cleaned during snapshot restore but not removed from the storages until + // the next shrink + } else if mark_obsolete_accounts_restore == MarkObsoleteAccounts::Enabled { + daccounts.check_storage(0, 78, 100); + daccounts.check_storage(1, 11, 21); + } else { + daccounts.check_storage(0, 100, 100); + daccounts.check_storage(1, 21, 21); + } + daccounts.check_storage(2, 31, 31); assert_eq!( @@ -402,12 +464,12 @@ mod serde_snapshot_tests { let accounts = AccountsDb::new_single_for_tests(); let mut current_slot = 1; - accounts.store_for_tests(current_slot, &[(&pubkey, &account)]); + accounts.store_for_tests((current_slot, [(&pubkey, &account)].as_slice())); accounts.add_root(current_slot); current_slot += 1; - accounts.store_for_tests(current_slot, &[(&pubkey, &zero_lamport_account)]); - accounts.store_for_tests(current_slot, &[(&pubkey2, &account2)]); + accounts.store_for_tests((current_slot, [(&pubkey, &zero_lamport_account)].as_slice())); + accounts.store_for_tests((current_slot, [(&pubkey2, &account2)].as_slice())); accounts.add_root_and_flush_write_cache(current_slot); @@ -419,8 +481,12 @@ mod serde_snapshot_tests { accounts.print_accounts_stats("accounts_post_purge"); - let accounts = - reconstruct_accounts_db_via_serialization(&accounts, current_slot, storage_access); + let accounts = reconstruct_accounts_db_via_serialization( + &accounts, + current_slot, + storage_access, + ACCOUNTS_DB_CONFIG_FOR_TESTING, + ); accounts.print_accounts_stats("reconstructed"); @@ -452,21 +518,27 @@ mod serde_snapshot_tests { let accounts = AccountsDb::new_single_for_tests(); let mut current_slot = 1; - accounts.store_for_tests(current_slot, &[(&pubkey, &account)]); - accounts.store_for_tests(current_slot, &[(&purged_pubkey1, &account2)]); + accounts.store_for_tests((current_slot, [(&pubkey, &account)].as_slice())); + accounts.store_for_tests((current_slot, [(&purged_pubkey1, &account2)].as_slice())); accounts.add_root_and_flush_write_cache(current_slot); current_slot += 1; - accounts.store_for_tests(current_slot, &[(&purged_pubkey1, &zero_lamport_account)]); - accounts.store_for_tests(current_slot, &[(&purged_pubkey2, &account3)]); + accounts.store_for_tests(( + current_slot, + [(&purged_pubkey1, &zero_lamport_account)].as_slice(), + )); + accounts.store_for_tests((current_slot, [(&purged_pubkey2, &account3)].as_slice())); accounts.add_root_and_flush_write_cache(current_slot); current_slot += 1; - accounts.store_for_tests(current_slot, &[(&purged_pubkey2, &zero_lamport_account)]); + accounts.store_for_tests(( + current_slot, + [(&purged_pubkey2, &zero_lamport_account)].as_slice(), + )); accounts.add_root_and_flush_write_cache(current_slot); current_slot += 1; - accounts.store_for_tests(current_slot, &[(&dummy_pubkey, &dummy_account)]); + accounts.store_for_tests((current_slot, [(&dummy_pubkey, &dummy_account)].as_slice())); accounts.add_root_and_flush_write_cache(current_slot); accounts.print_accounts_stats("pre_f"); @@ -491,8 +563,17 @@ mod serde_snapshot_tests { fn test_accounts_purge_chained_purge_before_snapshot_restore(storage_access: StorageAccess) { solana_logger::setup(); with_chained_zero_lamport_accounts(|accounts, current_slot| { + // If there is no latest full snapshot, zero lamport accounts can be cleaned and + // removed immediately. Set latest full snapshot slot to zero to avoid cleaning + // zero lamport accounts + accounts.set_latest_full_snapshot_slot(0); accounts.clean_accounts_for_tests(); - reconstruct_accounts_db_via_serialization(&accounts, current_slot, storage_access) + reconstruct_accounts_db_via_serialization( + &accounts, + current_slot, + storage_access, + ACCOUNTS_DB_CONFIG_FOR_TESTING, + ) }); } @@ -501,11 +582,21 @@ mod serde_snapshot_tests { fn test_accounts_purge_chained_purge_after_snapshot_restore(storage_access: StorageAccess) { solana_logger::setup(); with_chained_zero_lamport_accounts(|accounts, current_slot| { - let accounts = - reconstruct_accounts_db_via_serialization(&accounts, current_slot, storage_access); + let accounts = reconstruct_accounts_db_via_serialization( + &accounts, + current_slot, + storage_access, + ACCOUNTS_DB_CONFIG_FOR_TESTING, + ); accounts.print_accounts_stats("after_reconstruct"); + accounts.set_latest_full_snapshot_slot(0); accounts.clean_accounts_for_tests(); - reconstruct_accounts_db_via_serialization(&accounts, current_slot, storage_access) + reconstruct_accounts_db_via_serialization( + &accounts, + current_slot, + storage_access, + ACCOUNTS_DB_CONFIG_FOR_TESTING, + ) }); } @@ -535,34 +626,45 @@ mod serde_snapshot_tests { // create intermediate updates to purged_pubkey1 so that // generate_index must add slots as root last at once current_slot += 1; - accounts.store_for_tests(current_slot, &[(&pubkey, &account)]); - accounts.store_for_tests(current_slot, &[(&purged_pubkey1, &account2)]); + accounts.store_for_tests((current_slot, [(&pubkey, &account)].as_slice())); + accounts.store_for_tests((current_slot, [(&purged_pubkey1, &account2)].as_slice())); accounts.add_root_and_flush_write_cache(current_slot); current_slot += 1; - accounts.store_for_tests(current_slot, &[(&purged_pubkey1, &account2)]); + accounts.store_for_tests((current_slot, [(&purged_pubkey1, &account2)].as_slice())); accounts.add_root_and_flush_write_cache(current_slot); current_slot += 1; - accounts.store_for_tests(current_slot, &[(&purged_pubkey1, &account2)]); + accounts.store_for_tests((current_slot, [(&purged_pubkey1, &account2)].as_slice())); accounts.add_root_and_flush_write_cache(current_slot); current_slot += 1; - accounts.store_for_tests(current_slot, &[(&purged_pubkey1, &zero_lamport_account)]); - accounts.store_for_tests(current_slot, &[(&purged_pubkey2, &account3)]); + accounts.store_for_tests(( + current_slot, + [(&purged_pubkey1, &zero_lamport_account)].as_slice(), + )); + accounts.store_for_tests((current_slot, [(&purged_pubkey2, &account3)].as_slice())); accounts.add_root_and_flush_write_cache(current_slot); current_slot += 1; - accounts.store_for_tests(current_slot, &[(&purged_pubkey2, &zero_lamport_account)]); + accounts.store_for_tests(( + current_slot, + [(&purged_pubkey2, &zero_lamport_account)].as_slice(), + )); accounts.add_root_and_flush_write_cache(current_slot); current_slot += 1; - accounts.store_for_tests(current_slot, &[(&dummy_pubkey, &dummy_account)]); + accounts.store_for_tests((current_slot, [(&dummy_pubkey, &dummy_account)].as_slice())); accounts.add_root_and_flush_write_cache(current_slot); accounts.print_count_and_status("before reconstruct"); - let accounts = - reconstruct_accounts_db_via_serialization(&accounts, current_slot, storage_access); + let accounts = reconstruct_accounts_db_via_serialization( + &accounts, + current_slot, + storage_access, + ACCOUNTS_DB_CONFIG_FOR_TESTING, + ); + accounts.set_latest_full_snapshot_slot(0); accounts.print_count_and_status("before purge zero"); accounts.clean_accounts_for_tests(); accounts.print_count_and_status("after purge zero"); @@ -595,37 +697,38 @@ mod serde_snapshot_tests { let mut current_slot = 0; let accounts = AccountsDb::new_single_for_tests(); + accounts.set_latest_full_snapshot_slot(0); + // A: Initialize AccountsDb with pubkey1 and pubkey2 current_slot += 1; - accounts.store_for_tests(current_slot, &[(&pubkey1, &account)]); - accounts.store_for_tests(current_slot, &[(&pubkey2, &account)]); + accounts.store_for_tests((current_slot, [(&pubkey1, &account)].as_slice())); + accounts.store_for_tests((current_slot, [(&pubkey2, &account)].as_slice())); accounts.add_root(current_slot); // B: Test multiple updates to pubkey1 in a single slot/storage current_slot += 1; assert_eq!(0, accounts.alive_account_count_in_slot(current_slot)); accounts.add_root_and_flush_write_cache(current_slot - 1); - assert_eq!(1, accounts.ref_count_for_pubkey(&pubkey1)); - accounts.store_for_tests(current_slot, &[(&pubkey1, &account2)]); - accounts.store_for_tests(current_slot, &[(&pubkey1, &account2)]); + accounts.assert_ref_count(&pubkey1, 1); + accounts.store_for_tests((current_slot, [(&pubkey1, &account2)].as_slice())); + accounts.store_for_tests((current_slot, [(&pubkey1, &account2)].as_slice())); accounts.add_root_and_flush_write_cache(current_slot); assert_eq!(1, accounts.alive_account_count_in_slot(current_slot)); // Stores to same pubkey, same slot only count once towards the - // ref count - assert_eq!(2, accounts.ref_count_for_pubkey(&pubkey1)); + accounts.assert_ref_count(&pubkey1, 2); // C: Yet more update to trigger lazy clean of step A current_slot += 1; - assert_eq!(2, accounts.ref_count_for_pubkey(&pubkey1)); - accounts.store_for_tests(current_slot, &[(&pubkey1, &account3)]); + accounts.assert_ref_count(&pubkey1, 2); + accounts.store_for_tests((current_slot, [(&pubkey1, &account3)].as_slice())); accounts.add_root_and_flush_write_cache(current_slot); - assert_eq!(3, accounts.ref_count_for_pubkey(&pubkey1)); + accounts.assert_ref_count(&pubkey1, 3); accounts.add_root_and_flush_write_cache(current_slot); // D: Make pubkey1 0-lamport; also triggers clean of step B current_slot += 1; - assert_eq!(3, accounts.ref_count_for_pubkey(&pubkey1)); - accounts.store_for_tests(current_slot, &[(&pubkey1, &zero_lamport_account)]); + accounts.assert_ref_count(&pubkey1, 3); + accounts.store_for_tests((current_slot, [(&pubkey1, &zero_lamport_account)].as_slice())); accounts.add_root_and_flush_write_cache(current_slot); // had to be a root to flush, but clean won't work as this test expects if it is a root // so, remove the root from alive_roots, then restore it after clean @@ -645,17 +748,14 @@ mod serde_snapshot_tests { .alive_roots .insert(current_slot); - assert_eq!( - // Removed one reference from the dead slot (reference only counted once - // even though there were two stores to the pubkey in that slot) - 3, /* == 3 - 1 + 1 */ - accounts.ref_count_for_pubkey(&pubkey1) - ); + // Removed one reference from the dead slot (reference only counted once + // even though there were two stores to the pubkey in that slot) + accounts.assert_ref_count(&pubkey1, 3); accounts.add_root(current_slot); // E: Avoid missing bank hash error current_slot += 1; - accounts.store_for_tests(current_slot, &[(&dummy_pubkey, &dummy_account)]); + accounts.store_for_tests((current_slot, [(&dummy_pubkey, &dummy_account)].as_slice())); accounts.add_root(current_slot); accounts.assert_load_account(current_slot, pubkey1, zero_lamport); @@ -668,11 +768,18 @@ mod serde_snapshot_tests { // So, prevent that from happening by introducing refcount ((current_slot - 1)..=current_slot).for_each(|slot| accounts.flush_root_write_cache(slot)); accounts.clean_accounts_for_tests(); - let accounts = - reconstruct_accounts_db_via_serialization(&accounts, current_slot, storage_access); + let accounts = reconstruct_accounts_db_via_serialization( + &accounts, + current_slot, + storage_access, + ACCOUNTS_DB_CONFIG_FOR_TESTING, + ); + + // Set snapshot to zero to avoid cleaning zero-lamport pubkey1 + accounts.set_latest_full_snapshot_slot(0); accounts.clean_accounts_for_tests(); - info!("pubkey: {}", pubkey1); + info!("pubkey: {pubkey1}"); accounts.print_accounts_stats("pre_clean"); accounts.assert_load_account(current_slot, pubkey1, zero_lamport); accounts.assert_load_account(current_slot, pubkey2, old_lamport); @@ -680,11 +787,14 @@ mod serde_snapshot_tests { // F: Finally, make Step A cleanable current_slot += 1; - accounts.store_for_tests(current_slot, &[(&pubkey2, &account)]); + accounts.store_for_tests((current_slot, [(&pubkey2, &account)].as_slice())); accounts.add_root(current_slot); // Do clean accounts.flush_root_write_cache(current_slot); + + // Make zero-lamport pubkey1 cleanable by setting the latest snapshot slot + accounts.set_latest_full_snapshot_slot(current_slot); accounts.clean_accounts_for_tests(); // 2nd clean needed to clean-up pubkey1 @@ -719,7 +829,7 @@ mod serde_snapshot_tests { current_slot += 1; for pubkey in &pubkeys { - accounts.store_for_tests(current_slot, &[(pubkey, &account)]); + accounts.store_for_tests((current_slot, [(pubkey, &account)].as_slice())); } let shrink_slot = current_slot; accounts.add_root_and_flush_write_cache(current_slot); @@ -729,7 +839,7 @@ mod serde_snapshot_tests { let updated_pubkeys = &pubkeys[0..pubkey_count - pubkey_count_after_shrink]; for pubkey in updated_pubkeys { - accounts.store_for_tests(current_slot, &[(pubkey, &account)]); + accounts.store_for_tests((current_slot, [(pubkey, &account)].as_slice())); } accounts.add_root_and_flush_write_cache(current_slot); @@ -755,8 +865,12 @@ mod serde_snapshot_tests { let accounts_lt_hash_pre = accounts .calculate_accounts_lt_hash_at_startup_from_index(&no_ancestors, current_slot); - let accounts = - reconstruct_accounts_db_via_serialization(&accounts, current_slot, storage_access); + let accounts = reconstruct_accounts_db_via_serialization( + &accounts, + current_slot, + storage_access, + ACCOUNTS_DB_CONFIG_FOR_TESTING, + ); let accounts_lt_hash_post = accounts .calculate_accounts_lt_hash_at_startup_from_index(&no_ancestors, current_slot); assert_eq!(accounts_lt_hash_pre, accounts_lt_hash_post); diff --git a/runtime/src/snapshot_bank_utils.rs b/runtime/src/snapshot_bank_utils.rs index d825c4746120d2..9dcbb395451adc 100644 --- a/runtime/src/snapshot_bank_utils.rs +++ b/runtime/src/snapshot_bank_utils.rs @@ -8,7 +8,6 @@ use { SnapshotRootPaths, UnpackedSnapshotsDirAndVersion, }, }, - solana_accounts_db::accounts_file::StorageAccess, tempfile::TempDir, }; use { @@ -17,7 +16,7 @@ use { epoch_stakes::VersionedEpochStakes, runtime_config::RuntimeConfig, serde_snapshot::{ - reconstruct_bank_from_fields, SnapshotAccountsDbFields, SnapshotBankFields, + self, reconstruct_bank_from_fields, SnapshotAccountsDbFields, SnapshotBankFields, }, snapshot_archive_info::{ FullSnapshotArchiveInfo, IncrementalSnapshotArchiveInfo, SnapshotArchiveInfoGetter, @@ -26,23 +25,20 @@ use { snapshot_hash::SnapshotHash, snapshot_package::{SnapshotKind, SnapshotPackage}, snapshot_utils::{ - self, deserialize_snapshot_data_file, get_highest_bank_snapshot_post, - get_highest_full_snapshot_archive_info, get_highest_incremental_snapshot_archive_info, - rebuild_storages_from_snapshot_dir, serialize_snapshot_data_file, + self, get_highest_bank_snapshot, get_highest_full_snapshot_archive_info, + get_highest_incremental_snapshot_archive_info, rebuild_storages_from_snapshot_dir, verify_and_unarchive_snapshots, ArchiveFormat, BankSnapshotInfo, SnapshotError, SnapshotVersion, StorageAndNextAccountsFileId, UnarchivedSnapshots, - VerifyEpochStakesError, VerifySlotDeltasError, + VerifyEpochStakesError, VerifySlotDeltasError, VerifySlotHistoryError, }, status_cache, }, - bincode::{config::Options, serialize_into}, log::*, solana_accounts_db::{ accounts_db::{AccountsDbConfig, AtomicAccountsFileId}, accounts_update_notifier_interface::AccountsUpdateNotifier, utils::remove_dir_contents, }, - solana_builtins::prototype::BuiltinPrototype, solana_clock::{Epoch, Slot}, solana_genesis_config::GenesisConfig, solana_measure::{measure::Measure, measure_time}, @@ -56,37 +52,13 @@ use { }, }; -pub fn serialize_status_cache( - slot_deltas: &[BankSlotDelta], - status_cache_path: &Path, -) -> snapshot_utils::Result { - serialize_snapshot_data_file(status_cache_path, |stream| { - serialize_into(stream, slot_deltas)?; - Ok(()) - }) -} - -#[derive(Debug)] -pub struct BankFromArchivesTimings { - pub untar_full_snapshot_archive_us: u64, - pub untar_incremental_snapshot_archive_us: u64, - pub rebuild_bank_us: u64, - pub verify_bank_us: u64, -} - -#[derive(Debug)] -pub struct BankFromDirTimings { - pub rebuild_storages_us: u64, - pub rebuild_bank_us: u64, -} - /// Parses out bank specific information from a snapshot archive including the leader schedule. /// epoch schedule, etc. #[cfg(feature = "dev-context-only-utils")] pub fn bank_fields_from_snapshot_archives( full_snapshot_archives_dir: impl AsRef, incremental_snapshot_archives_dir: impl AsRef, - storage_access: StorageAccess, + accounts_db_config: &AccountsDbConfig, ) -> snapshot_utils::Result { let full_snapshot_archive_info = get_highest_full_snapshot_archive_info(&full_snapshot_archives_dir).ok_or_else(|| { @@ -115,7 +87,7 @@ pub fn bank_fields_from_snapshot_archives( &full_snapshot_archive_info, incremental_snapshot_archive_info.as_ref(), &account_paths, - storage_access, + accounts_db_config, )?; bank_fields_from_snapshots( @@ -161,15 +133,14 @@ pub fn bank_from_snapshot_archives( genesis_config: &GenesisConfig, runtime_config: &RuntimeConfig, debug_keys: Option>>, - additional_builtins: Option<&[BuiltinPrototype]>, limit_load_slot_count_from_snapshot: Option, accounts_db_skip_shrink: bool, accounts_db_force_initial_clean: bool, verify_index: bool, - accounts_db_config: Option, + accounts_db_config: AccountsDbConfig, accounts_update_notifier: Option, exit: Arc, -) -> snapshot_utils::Result<(Bank, BankFromArchivesTimings)> { +) -> snapshot_utils::Result { info!( "Loading bank from full snapshot archive: {}, and incremental snapshot archive: {:?}", full_snapshot_archive_info.path().display(), @@ -201,10 +172,7 @@ pub fn bank_from_snapshot_archives( full_snapshot_archive_info, incremental_snapshot_archive_info, account_paths, - accounts_db_config - .as_ref() - .map(|config| config.storage_access) - .unwrap_or_default(), + &accounts_db_config, )?; if let Some(incremental_storage) = incremental_storage { @@ -225,7 +193,6 @@ pub fn bank_from_snapshot_archives( account_paths, storage_and_next_append_vec_id, debug_keys, - additional_builtins, limit_load_slot_count_from_snapshot, verify_index, accounts_db_config, @@ -233,7 +200,7 @@ pub fn bank_from_snapshot_archives( exit, )?; measure_rebuild.stop(); - info!("{}", measure_rebuild); + info!("{measure_rebuild}"); verify_epoch_stakes(&bank)?; @@ -241,20 +208,14 @@ pub fn bank_from_snapshot_archives( // snapshot, use that. Otherwise use the full snapshot. let status_cache_path = incremental_unpacked_snapshots_dir_and_version .as_ref() - .map_or_else( - || { - full_unpacked_snapshots_dir_and_version - .unpacked_snapshots_dir - .as_path() - }, - |unarchived_incremental_snapshot| { - unarchived_incremental_snapshot - .unpacked_snapshots_dir - .as_path() - }, - ) + .unwrap_or(&full_unpacked_snapshots_dir_and_version) + .unpacked_snapshots_dir .join(snapshot_utils::SNAPSHOT_STATUS_CACHE_FILENAME); - let slot_deltas = deserialize_status_cache(&status_cache_path)?; + info!( + "Rebuilding status cache from {}", + status_cache_path.display() + ); + let slot_deltas = serde_snapshot::deserialize_status_cache(&status_cache_path)?; verify_slot_deltas(slot_deltas.as_slice(), &bank)?; @@ -277,38 +238,29 @@ pub fn bank_from_snapshot_archives( accounts_db_skip_shrink || !full_snapshot_archive_info.is_remote(), accounts_db_force_initial_clean, full_snapshot_archive_info.slot(), - info.duplicates_lt_hash, + Some(&info.calculated_accounts_lt_hash), ) && limit_load_slot_count_from_snapshot.is_none() { panic!("Snapshot bank for slot {} failed to verify", bank.slot()); } measure_verify.stop(); - let timings = BankFromArchivesTimings { - untar_full_snapshot_archive_us: full_measure_untar.as_us(), - untar_incremental_snapshot_archive_us: incremental_measure_untar - .map_or(0, |incremental_measure_untar| { - incremental_measure_untar.as_us() - }), - rebuild_bank_us: measure_rebuild.as_us(), - verify_bank_us: measure_verify.as_us(), - }; datapoint_info!( "bank_from_snapshot_archives", ( "untar_full_snapshot_archive_us", - timings.untar_full_snapshot_archive_us, + full_measure_untar.as_us(), i64 ), ( "untar_incremental_snapshot_archive_us", - timings.untar_incremental_snapshot_archive_us, - i64 + incremental_measure_untar.as_ref().map(Measure::as_us), + Option ), - ("rebuild_bank_us", timings.rebuild_bank_us, i64), - ("verify_bank_us", timings.verify_bank_us, i64), + ("rebuild_bank_us", measure_rebuild.as_us(), i64), + ("verify_bank_us", measure_verify.as_us(), i64), ); - Ok((bank, timings)) + Ok(bank) } /// Rebuild bank from snapshot archives @@ -324,12 +276,11 @@ pub fn bank_from_latest_snapshot_archives( genesis_config: &GenesisConfig, runtime_config: &RuntimeConfig, debug_keys: Option>>, - additional_builtins: Option<&[BuiltinPrototype]>, limit_load_slot_count_from_snapshot: Option, accounts_db_skip_shrink: bool, accounts_db_force_initial_clean: bool, verify_index: bool, - accounts_db_config: Option, + accounts_db_config: AccountsDbConfig, accounts_update_notifier: Option, exit: Arc, ) -> snapshot_utils::Result<( @@ -347,7 +298,7 @@ pub fn bank_from_latest_snapshot_archives( full_snapshot_archive_info.slot(), ); - let (bank, _) = bank_from_snapshot_archives( + let bank = bank_from_snapshot_archives( account_paths, bank_snapshots_dir.as_ref(), &full_snapshot_archive_info, @@ -355,7 +306,6 @@ pub fn bank_from_latest_snapshot_archives( genesis_config, runtime_config, debug_keys, - additional_builtins, limit_load_slot_count_from_snapshot, accounts_db_skip_shrink, accounts_db_force_initial_clean, @@ -380,13 +330,12 @@ pub fn bank_from_snapshot_dir( genesis_config: &GenesisConfig, runtime_config: &RuntimeConfig, debug_keys: Option>>, - additional_builtins: Option<&[BuiltinPrototype]>, limit_load_slot_count_from_snapshot: Option, verify_index: bool, - accounts_db_config: Option, + accounts_db_config: AccountsDbConfig, accounts_update_notifier: Option, exit: Arc, -) -> snapshot_utils::Result<(Bank, BankFromDirTimings)> { +) -> snapshot_utils::Result { info!( "Loading bank from snapshot dir: {}", bank_snapshot.snapshot_dir.display() @@ -399,21 +348,17 @@ pub fn bank_from_snapshot_dir( } let next_append_vec_id = Arc::new(AtomicAccountsFileId::new(0)); - let storage_access = accounts_db_config - .as_ref() - .map(|config| config.storage_access) - .unwrap_or_default(); let ((storage, bank_fields, accounts_db_fields), measure_rebuild_storages) = measure_time!( rebuild_storages_from_snapshot_dir( bank_snapshot, account_paths, next_append_vec_id.clone(), - storage_access, + accounts_db_config.storage_access, )?, "rebuild storages from snapshot dir" ); - info!("{}", measure_rebuild_storages); + info!("{measure_rebuild_storages}"); let next_append_vec_id = Arc::try_unwrap(next_append_vec_id).expect("this is the only strong reference"); @@ -423,7 +368,7 @@ pub fn bank_from_snapshot_dir( }; let snapshot_bank_fields = SnapshotBankFields::new(bank_fields, None); let snapshot_accounts_db_fields = SnapshotAccountsDbFields::new(accounts_db_fields, None); - let ((bank, _info), measure_rebuild_bank) = measure_time!( + let ((bank, info), measure_rebuild_bank) = measure_time!( reconstruct_bank_from_fields( snapshot_bank_fields, snapshot_accounts_db_fields, @@ -432,7 +377,6 @@ pub fn bank_from_snapshot_dir( account_paths, storage_and_next_append_vec_id, debug_keys, - additional_builtins, limit_load_slot_count_from_snapshot, verify_index, accounts_db_config, @@ -441,32 +385,39 @@ pub fn bank_from_snapshot_dir( )?, "rebuild bank from snapshot" ); - info!("{}", measure_rebuild_bank); + info!("{measure_rebuild_bank}"); verify_epoch_stakes(&bank)?; let status_cache_path = bank_snapshot .snapshot_dir .join(snapshot_utils::SNAPSHOT_STATUS_CACHE_FILENAME); - let slot_deltas = deserialize_status_cache(&status_cache_path)?; + info!( + "Rebuilding status cache from {}", + status_cache_path.display() + ); + let slot_deltas = serde_snapshot::deserialize_status_cache(&status_cache_path)?; verify_slot_deltas(slot_deltas.as_slice(), &bank)?; bank.status_cache.write().unwrap().append(&slot_deltas); - // We trust our local state, so skip the startup accounts verification. - bank.set_initial_accounts_hash_verification_completed(); + if !bank.verify_snapshot_bank( + true, + false, + 0, // since force_clean is false, this value is unused + Some(&info.calculated_accounts_lt_hash), + ) && limit_load_slot_count_from_snapshot.is_none() + { + panic!("Snapshot bank for slot {} failed to verify", bank.slot()); + } - let timings = BankFromDirTimings { - rebuild_storages_us: measure_rebuild_storages.as_us(), - rebuild_bank_us: measure_rebuild_bank.as_us(), - }; datapoint_info!( "bank_from_snapshot_dir", - ("rebuild_storages_us", timings.rebuild_storages_us, i64), - ("rebuild_bank_us", timings.rebuild_bank_us, i64), + ("rebuild_storages_us", measure_rebuild_storages.as_us(), i64), + ("rebuild_bank_us", measure_rebuild_bank.as_us(), i64), ); - Ok((bank, timings)) + Ok(bank) } /// follow the prototype of fn bank_from_latest_snapshot_archives, implement the from_dir case @@ -477,23 +428,21 @@ pub fn bank_from_latest_snapshot_dir( runtime_config: &RuntimeConfig, account_paths: &[PathBuf], debug_keys: Option>>, - additional_builtins: Option<&[BuiltinPrototype]>, limit_load_slot_count_from_snapshot: Option, verify_index: bool, - accounts_db_config: Option, + accounts_db_config: AccountsDbConfig, accounts_update_notifier: Option, exit: Arc, ) -> snapshot_utils::Result { - let bank_snapshot = get_highest_bank_snapshot_post(&bank_snapshots_dir).ok_or_else(|| { + let bank_snapshot = get_highest_bank_snapshot(&bank_snapshots_dir).ok_or_else(|| { SnapshotError::NoSnapshotSlotDir(bank_snapshots_dir.as_ref().to_path_buf()) })?; - let (bank, _) = bank_from_snapshot_dir( + let bank = bank_from_snapshot_dir( account_paths, &bank_snapshot, genesis_config, runtime_config, debug_keys, - additional_builtins, limit_load_slot_count_from_snapshot, verify_index, accounts_db_config, @@ -557,23 +506,6 @@ fn snapshot_version_and_root_paths( Ok((snapshot_version, snapshot_root_paths)) } -fn deserialize_status_cache( - status_cache_path: &Path, -) -> snapshot_utils::Result> { - deserialize_snapshot_data_file(status_cache_path, |stream| { - info!( - "Rebuilding status cache from {}", - status_cache_path.display() - ); - let slot_delta: Vec = bincode::options() - .with_limit(snapshot_utils::MAX_SNAPSHOT_DATA_FILE_SIZE) - .with_fixint_encoding() - .allow_trailing_bytes() - .deserialize_from(stream)?; - Ok(slot_delta) - }) -} - /// Verify that the snapshot's slot deltas are not corrupt/invalid fn verify_slot_deltas( slot_deltas: &[BankSlotDelta], @@ -643,9 +575,7 @@ fn verify_slot_deltas_with_history( ) -> std::result::Result<(), VerifySlotDeltasError> { // ensure the slot history is valid (as much as possible), since we're using it to verify the // slot deltas - if slot_history.newest() != bank_slot { - return Err(VerifySlotDeltasError::BadSlotHistory); - } + verify_slot_history(slot_history, bank_slot)?; // all slots in the slot deltas should be in the bank's slot history let slot_missing_from_history = slots_from_slot_deltas @@ -674,11 +604,27 @@ fn verify_slot_deltas_with_history( Ok(()) } +/// Verify that the snapshot's SlotHistory is not corrupt/invalid +fn verify_slot_history( + slot_history: &SlotHistory, + bank_slot: Slot, +) -> Result<(), VerifySlotHistoryError> { + if slot_history.newest() != bank_slot { + return Err(VerifySlotHistoryError::InvalidNewestSlot); + } + + if slot_history.bits.len() != solana_slot_history::MAX_ENTRIES { + return Err(VerifySlotHistoryError::InvalidNumEntries); + } + + Ok(()) +} + /// Verifies the bank's epoch stakes are valid after rebuilding from a snapshot fn verify_epoch_stakes(bank: &Bank) -> std::result::Result<(), VerifyEpochStakesError> { // Stakes are required for epochs from the current epoch up-to-and-including the // leader schedule epoch. In practice this will only be two epochs: the current and the next. - // Using a range mirrors how Bank::new_with_paths() seeds the initial epoch stakes. + // Using a range mirrors how Bank::new_from_genesis() seeds the initial epoch stakes. let current_epoch = bank.epoch(); let leader_schedule_epoch = bank.get_leader_schedule_epoch(bank.slot()); let required_epochs = current_epoch..=leader_schedule_epoch; @@ -867,21 +813,23 @@ mod tests { snapshot_config::SnapshotConfig, snapshot_utils::{ clean_orphaned_account_snapshot_dirs, create_tmp_accounts_dir_for_tests, - get_bank_snapshot_dir, get_bank_snapshots, get_bank_snapshots_post, - get_bank_snapshots_pre, get_highest_bank_snapshot, get_highest_bank_snapshot_pre, - get_highest_loadable_bank_snapshot, get_snapshot_file_name, - purge_all_bank_snapshots, purge_bank_snapshot, + get_bank_snapshot_dir, get_bank_snapshots, get_highest_bank_snapshot, + get_highest_loadable_bank_snapshot, purge_all_bank_snapshots, purge_bank_snapshot, purge_bank_snapshots_older_than_slot, purge_incomplete_bank_snapshots, purge_old_bank_snapshots, purge_old_bank_snapshots_at_startup, - snapshot_storage_rebuilder::get_slot_and_append_vec_id, BankSnapshotKind, - BANK_SNAPSHOT_PRE_FILENAME_EXTENSION, SNAPSHOT_FULL_SNAPSHOT_SLOT_FILENAME, + snapshot_storage_rebuilder::get_slot_and_append_vec_id, + SNAPSHOT_FULL_SNAPSHOT_SLOT_FILENAME, }, status_cache::Status, }, - solana_accounts_db::accounts_db::ACCOUNTS_DB_CONFIG_FOR_TESTING, + semver::Version, + solana_accounts_db::{ + accounts_db::{MarkObsoleteAccounts, ACCOUNTS_DB_CONFIG_FOR_TESTING}, + accounts_file::StorageAccess, + }, solana_genesis_config::create_genesis_config, solana_keypair::Keypair, - solana_native_token::{sol_to_lamports, LAMPORTS_PER_SOL}, + solana_native_token::LAMPORTS_PER_SOL, solana_signer::Signer, solana_system_transaction as system_transaction, solana_transaction::sanitized::SanitizedTransaction, @@ -896,17 +844,14 @@ mod tests { genesis_config: &GenesisConfig, bank_snapshots_dir: impl AsRef, num_total: usize, - num_posts: usize, should_flush_and_hard_link_storages: bool, ) -> Bank { - assert!(num_posts <= num_total); - // We don't need the snapshot archives to live after this function returns, // so let TempDir::drop() handle cleanup. let snapshot_archives_dir = TempDir::new().unwrap(); let mut bank = Arc::new(Bank::new_for_tests(genesis_config)); - for i in 0..num_total { + for _i in 0..num_total { let slot = bank.slot() + 1; bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::new_unique(), slot)); bank.fill_bank_with_ticks_for_tests(); @@ -921,14 +866,6 @@ mod tests { should_flush_and_hard_link_storages, ) .unwrap(); - - // As a hack, to make a PRE bank snapshot, just rename the POST one. - if i >= num_posts { - let bank_snapshot_dir = get_bank_snapshot_dir(&bank_snapshots_dir, slot); - let post = bank_snapshot_dir.join(get_snapshot_file_name(slot)); - let pre = post.with_extension(BANK_SNAPSHOT_PRE_FILENAME_EXTENSION); - fs::rename(post, pre).unwrap(); - } } Arc::into_inner(bank).unwrap() @@ -973,7 +910,7 @@ mod tests { ) .unwrap(); - let (roundtrip_bank, _) = bank_from_snapshot_archives( + let roundtrip_bank = bank_from_snapshot_archives( &[accounts_dir], bank_snapshots_dir.path(), &snapshot_archive_info, @@ -982,16 +919,14 @@ mod tests { &RuntimeConfig::default(), None, None, - None, false, false, false, - Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), + ACCOUNTS_DB_CONFIG_FOR_TESTING, None, Arc::default(), ) .unwrap(); - roundtrip_bank.wait_for_initial_accounts_hash_verification_completed_for_tests(); assert_eq!(original_bank, roundtrip_bank); } @@ -1005,16 +940,26 @@ mod tests { let key3 = Keypair::new(); // Create a few accounts - let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1_000_000.)); - let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let (genesis_config, mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); + + let bank_test_config = BankTestConfig { + accounts_db_config: AccountsDbConfig { + mark_obsolete_accounts: MarkObsoleteAccounts::Enabled, + ..ACCOUNTS_DB_CONFIG_FOR_TESTING + }, + }; + + let bank = Bank::new_with_config_for_tests(&genesis_config, bank_test_config); + + let (bank0, bank_forks) = Bank::wrap_with_bank_forks_for_tests(bank); bank0 - .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) + .transfer(LAMPORTS_PER_SOL, &mint_keypair, &key1.pubkey()) .unwrap(); bank0 - .transfer(sol_to_lamports(2.), &mint_keypair, &key2.pubkey()) + .transfer(2 * LAMPORTS_PER_SOL, &mint_keypair, &key2.pubkey()) .unwrap(); bank0 - .transfer(sol_to_lamports(3.), &mint_keypair, &key3.pubkey()) + .transfer(3 * LAMPORTS_PER_SOL, &mint_keypair, &key3.pubkey()) .unwrap(); bank0.fill_bank_with_ticks_for_tests(); @@ -1022,42 +967,16 @@ mod tests { bank0.squash(); bank0.force_flush_accounts_cache(); - // Find the account storage entry for slot 0 - let target_slot = 0; - let account_storage_entry = bank0 - .accounts() - .accounts_db - .storage - .get_slot_storage_entry(target_slot) - .unwrap(); - - // Find all the accounts in slot 0 - let accounts = bank0 - .accounts() - .accounts_db - .get_unique_accounts_from_storage(&account_storage_entry); - - // Find the offset of pubkey `key1` in the accounts db slot0 and save the offset. - let offset = accounts - .stored_accounts - .iter() - .find(|account| key1.pubkey() == *account.pubkey()) - .map(|account| account.index_info.offset()) - .expect("Pubkey1 is present in Slot0"); - // Create a new slot, and invalidate the account for key1 in slot0 let slot = 1; let bank1 = new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank0, &collector, slot); bank1 - .transfer(sol_to_lamports(1.), &key3, &key1.pubkey()) + .transfer(LAMPORTS_PER_SOL, &key3, &key1.pubkey()) .unwrap(); bank1.fill_bank_with_ticks_for_tests(); - // Mark the entry for pubkey1 as obsolete in slot0 - account_storage_entry.mark_accounts_obsolete(vec![(offset, 0)].into_iter(), slot); - let (_tmp_dir, accounts_dir) = create_tmp_accounts_dir_for_tests(); let bank_snapshots_dir = tempfile::TempDir::new().unwrap(); let snapshot_archives_dir = tempfile::TempDir::new().unwrap(); @@ -1073,7 +992,7 @@ mod tests { ) .unwrap(); - let (roundtrip_bank, _) = bank_from_snapshot_archives( + let roundtrip_bank = bank_from_snapshot_archives( &[accounts_dir], bank_snapshots_dir.path(), &full_snapshot_archive_info, @@ -1082,16 +1001,14 @@ mod tests { &RuntimeConfig::default(), None, None, - None, false, false, false, - Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), + ACCOUNTS_DB_CONFIG_FOR_TESTING, None, Arc::default(), ) .unwrap(); - roundtrip_bank.wait_for_initial_accounts_hash_verification_completed_for_tests(); assert_eq!(*bank1, roundtrip_bank); } @@ -1107,16 +1024,16 @@ mod tests { let key4 = Keypair::new(); let key5 = Keypair::new(); - let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1_000_000.)); + let (genesis_config, mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); bank0 - .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) + .transfer(LAMPORTS_PER_SOL, &mint_keypair, &key1.pubkey()) .unwrap(); bank0 - .transfer(sol_to_lamports(2.), &mint_keypair, &key2.pubkey()) + .transfer(2 * LAMPORTS_PER_SOL, &mint_keypair, &key2.pubkey()) .unwrap(); bank0 - .transfer(sol_to_lamports(3.), &mint_keypair, &key3.pubkey()) + .transfer(3 * LAMPORTS_PER_SOL, &mint_keypair, &key3.pubkey()) .unwrap(); bank0.fill_bank_with_ticks_for_tests(); @@ -1124,13 +1041,13 @@ mod tests { let bank1 = new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank0, &collector, slot); bank1 - .transfer(sol_to_lamports(3.), &mint_keypair, &key3.pubkey()) + .transfer(3 * LAMPORTS_PER_SOL, &mint_keypair, &key3.pubkey()) .unwrap(); bank1 - .transfer(sol_to_lamports(4.), &mint_keypair, &key4.pubkey()) + .transfer(4 * LAMPORTS_PER_SOL, &mint_keypair, &key4.pubkey()) .unwrap(); bank1 - .transfer(sol_to_lamports(5.), &mint_keypair, &key5.pubkey()) + .transfer(5 * LAMPORTS_PER_SOL, &mint_keypair, &key5.pubkey()) .unwrap(); bank1.fill_bank_with_ticks_for_tests(); @@ -1138,7 +1055,7 @@ mod tests { let bank2 = new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank1, &collector, slot); bank2 - .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) + .transfer(LAMPORTS_PER_SOL, &mint_keypair, &key1.pubkey()) .unwrap(); bank2.fill_bank_with_ticks_for_tests(); @@ -1146,7 +1063,7 @@ mod tests { let bank3 = new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank2, &collector, slot); bank3 - .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) + .transfer(LAMPORTS_PER_SOL, &mint_keypair, &key1.pubkey()) .unwrap(); bank3.fill_bank_with_ticks_for_tests(); @@ -1154,7 +1071,7 @@ mod tests { let bank4 = new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank3, &collector, slot); bank4 - .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) + .transfer(LAMPORTS_PER_SOL, &mint_keypair, &key1.pubkey()) .unwrap(); bank4.fill_bank_with_ticks_for_tests(); @@ -1174,7 +1091,7 @@ mod tests { ) .unwrap(); - let (roundtrip_bank, _) = bank_from_snapshot_archives( + let roundtrip_bank = bank_from_snapshot_archives( &[accounts_dir], bank_snapshots_dir.path(), &full_snapshot_archive_info, @@ -1183,16 +1100,14 @@ mod tests { &RuntimeConfig::default(), None, None, - None, false, false, false, - Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), + ACCOUNTS_DB_CONFIG_FOR_TESTING, None, Arc::default(), ) .unwrap(); - roundtrip_bank.wait_for_initial_accounts_hash_verification_completed_for_tests(); assert_eq!(*bank4, roundtrip_bank); } @@ -1214,16 +1129,16 @@ mod tests { let key4 = Keypair::new(); let key5 = Keypair::new(); - let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1_000_000.)); + let (genesis_config, mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); bank0 - .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) + .transfer(LAMPORTS_PER_SOL, &mint_keypair, &key1.pubkey()) .unwrap(); bank0 - .transfer(sol_to_lamports(2.), &mint_keypair, &key2.pubkey()) + .transfer(2 * LAMPORTS_PER_SOL, &mint_keypair, &key2.pubkey()) .unwrap(); bank0 - .transfer(sol_to_lamports(3.), &mint_keypair, &key3.pubkey()) + .transfer(3 * LAMPORTS_PER_SOL, &mint_keypair, &key3.pubkey()) .unwrap(); bank0.fill_bank_with_ticks_for_tests(); @@ -1231,13 +1146,13 @@ mod tests { let bank1 = new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank0, &collector, slot); bank1 - .transfer(sol_to_lamports(3.), &mint_keypair, &key3.pubkey()) + .transfer(3 * LAMPORTS_PER_SOL, &mint_keypair, &key3.pubkey()) .unwrap(); bank1 - .transfer(sol_to_lamports(4.), &mint_keypair, &key4.pubkey()) + .transfer(4 * LAMPORTS_PER_SOL, &mint_keypair, &key4.pubkey()) .unwrap(); bank1 - .transfer(sol_to_lamports(5.), &mint_keypair, &key5.pubkey()) + .transfer(5 * LAMPORTS_PER_SOL, &mint_keypair, &key5.pubkey()) .unwrap(); bank1.fill_bank_with_ticks_for_tests(); @@ -1262,7 +1177,7 @@ mod tests { let bank2 = new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank1, &collector, slot); bank2 - .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) + .transfer(LAMPORTS_PER_SOL, &mint_keypair, &key1.pubkey()) .unwrap(); bank2.fill_bank_with_ticks_for_tests(); @@ -1270,7 +1185,7 @@ mod tests { let bank3 = new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank2, &collector, slot); bank3 - .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) + .transfer(LAMPORTS_PER_SOL, &mint_keypair, &key1.pubkey()) .unwrap(); bank3.fill_bank_with_ticks_for_tests(); @@ -1278,7 +1193,7 @@ mod tests { let bank4 = new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank3, &collector, slot); bank4 - .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) + .transfer(LAMPORTS_PER_SOL, &mint_keypair, &key1.pubkey()) .unwrap(); bank4.fill_bank_with_ticks_for_tests(); @@ -1293,7 +1208,7 @@ mod tests { ) .unwrap(); - let (roundtrip_bank, _) = bank_from_snapshot_archives( + let roundtrip_bank = bank_from_snapshot_archives( &[accounts_dir], bank_snapshots_dir.path(), &full_snapshot_archive_info, @@ -1302,16 +1217,14 @@ mod tests { &RuntimeConfig::default(), None, None, - None, false, false, false, - Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), + ACCOUNTS_DB_CONFIG_FOR_TESTING, None, Arc::default(), ) .unwrap(); - roundtrip_bank.wait_for_initial_accounts_hash_verification_completed_for_tests(); assert_eq!(*bank4, roundtrip_bank); } @@ -1323,16 +1236,16 @@ mod tests { let key2 = Keypair::new(); let key3 = Keypair::new(); - let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1_000_000.)); + let (genesis_config, mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); bank0 - .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) + .transfer(LAMPORTS_PER_SOL, &mint_keypair, &key1.pubkey()) .unwrap(); bank0 - .transfer(sol_to_lamports(2.), &mint_keypair, &key2.pubkey()) + .transfer(2 * LAMPORTS_PER_SOL, &mint_keypair, &key2.pubkey()) .unwrap(); bank0 - .transfer(sol_to_lamports(3.), &mint_keypair, &key3.pubkey()) + .transfer(3 * LAMPORTS_PER_SOL, &mint_keypair, &key3.pubkey()) .unwrap(); bank0.fill_bank_with_ticks_for_tests(); @@ -1340,13 +1253,13 @@ mod tests { let bank1 = new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank0, &collector, slot); bank1 - .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) + .transfer(LAMPORTS_PER_SOL, &mint_keypair, &key1.pubkey()) .unwrap(); bank1 - .transfer(sol_to_lamports(2.), &mint_keypair, &key2.pubkey()) + .transfer(2 * LAMPORTS_PER_SOL, &mint_keypair, &key2.pubkey()) .unwrap(); bank1 - .transfer(sol_to_lamports(3.), &mint_keypair, &key3.pubkey()) + .transfer(3 * LAMPORTS_PER_SOL, &mint_keypair, &key3.pubkey()) .unwrap(); bank1.fill_bank_with_ticks_for_tests(); @@ -1371,7 +1284,7 @@ mod tests { let bank2 = new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank1, &collector, slot); bank2 - .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) + .transfer(LAMPORTS_PER_SOL, &mint_keypair, &key1.pubkey()) .unwrap(); bank2.fill_bank_with_ticks_for_tests(); @@ -1379,7 +1292,7 @@ mod tests { let bank3 = new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank2, &collector, slot); bank3 - .transfer(sol_to_lamports(2.), &mint_keypair, &key2.pubkey()) + .transfer(2 * LAMPORTS_PER_SOL, &mint_keypair, &key2.pubkey()) .unwrap(); bank3.fill_bank_with_ticks_for_tests(); @@ -1387,7 +1300,7 @@ mod tests { let bank4 = new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank3, &collector, slot); bank4 - .transfer(sol_to_lamports(3.), &mint_keypair, &key3.pubkey()) + .transfer(3 * LAMPORTS_PER_SOL, &mint_keypair, &key3.pubkey()) .unwrap(); bank4.fill_bank_with_ticks_for_tests(); @@ -1411,16 +1324,14 @@ mod tests { &RuntimeConfig::default(), None, None, - None, false, false, false, - Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), + ACCOUNTS_DB_CONFIG_FOR_TESTING, None, Arc::default(), ) .unwrap(); - deserialized_bank.wait_for_initial_accounts_hash_verification_completed_for_tests(); assert_eq!(deserialized_bank, *bank4); } @@ -1458,11 +1369,12 @@ mod tests { let incremental_snapshot_archives_dir = tempfile::TempDir::new().unwrap(); let snapshot_archive_format = SnapshotConfig::default().archive_format; - let (mut genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1_000_000.)); + let (mut genesis_config, mint_keypair) = + create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); // test expects 0 transaction fee genesis_config.fee_rate_governor = solana_fee_calculator::FeeRateGovernor::new(0, 0); - let lamports_to_transfer = sol_to_lamports(123_456.); + let lamports_to_transfer = 123_456 * LAMPORTS_PER_SOL; let (bank0, bank_forks) = Bank::new_with_paths_for_tests( &genesis_config, Arc::::default(), @@ -1531,7 +1443,7 @@ mod tests { snapshot_archive_format, ) .unwrap(); - let (deserialized_bank, _) = bank_from_snapshot_archives( + let deserialized_bank = bank_from_snapshot_archives( &[accounts_dir.clone()], bank_snapshots_dir.path(), &full_snapshot_archive_info, @@ -1540,16 +1452,14 @@ mod tests { &RuntimeConfig::default(), None, None, - None, false, false, false, - Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), + ACCOUNTS_DB_CONFIG_FOR_TESTING, None, Arc::default(), ) .unwrap(); - deserialized_bank.wait_for_initial_accounts_hash_verification_completed_for_tests(); assert_eq!( deserialized_bank, *bank2, "Ensure rebuilding from an incremental snapshot works" @@ -1590,7 +1500,7 @@ mod tests { ) .unwrap(); - let (deserialized_bank, _) = bank_from_snapshot_archives( + let deserialized_bank = bank_from_snapshot_archives( &[accounts_dir], bank_snapshots_dir.path(), &full_snapshot_archive_info, @@ -1599,16 +1509,14 @@ mod tests { &RuntimeConfig::default(), None, None, - None, false, false, false, - Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), + ACCOUNTS_DB_CONFIG_FOR_TESTING, None, Arc::default(), ) .unwrap(); - deserialized_bank.wait_for_initial_accounts_hash_verification_completed_for_tests(); assert_eq!( deserialized_bank, *bank4, "Ensure rebuilding from an incremental snapshot works", @@ -1627,7 +1535,7 @@ mod tests { let collector = Pubkey::new_unique(); let key1 = Keypair::new(); - let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1_000_000.)); + let (genesis_config, mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); let (bank0, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); bank0.fill_bank_with_ticks_for_tests(); @@ -1654,7 +1562,7 @@ mod tests { let bank2 = new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank1, &collector, slot); bank2 - .transfer(sol_to_lamports(1.), &mint_keypair, &key1.pubkey()) + .transfer(LAMPORTS_PER_SOL, &mint_keypair, &key1.pubkey()) .unwrap(); bank2.fill_bank_with_ticks_for_tests(); @@ -1672,7 +1580,10 @@ mod tests { let bank_fields = bank_fields_from_snapshot_archives( &all_snapshots_dir, &all_snapshots_dir, - storage_access, + &AccountsDbConfig { + storage_access, + ..ACCOUNTS_DB_CONFIG_FOR_TESTING + }, ) .unwrap(); assert_eq!(bank_fields.slot, bank2.slot()); @@ -1718,6 +1629,73 @@ mod tests { assert!(hardlink_dirs.iter().all(|dir| fs::metadata(dir).is_err())); } + /// Test versioning when fastbooting + /// If the storages flushed file is present, fastboot should always pass + /// If only the fastboot version file is present, the version should be checked for compatibility + #[test] + fn test_fastboot_versioning() { + let genesis_config = GenesisConfig::default(); + let bank_snapshots_dir = tempfile::TempDir::new().unwrap(); + let _bank = create_snapshot_dirs_for_tests(&genesis_config, &bank_snapshots_dir, 3, true); + + let snapshot_config = SnapshotConfig { + bank_snapshots_dir: bank_snapshots_dir.as_ref().to_path_buf(), + full_snapshot_archives_dir: bank_snapshots_dir.as_ref().to_path_buf(), + incremental_snapshot_archives_dir: bank_snapshots_dir.as_ref().to_path_buf(), + ..Default::default() + }; + + // Verify the snapshot is found with all files present + let snapshot = get_highest_loadable_bank_snapshot(&snapshot_config).unwrap(); + assert_eq!(snapshot.slot, 3); + + // Test 1: Remove the storages flushed file + let storages_flushed_file = snapshot + .snapshot_dir + .join(snapshot_utils::SNAPSHOT_STORAGES_FLUSHED_FILENAME); + fs::remove_file(storages_flushed_file).unwrap(); + + // If the storages flushed file is removed, the version in the version file should be + // checked, and the snapshot should be found + let snapshot = get_highest_loadable_bank_snapshot(&snapshot_config).unwrap(); + assert_eq!(snapshot.slot, 3); + + // Test 2: Modify the version in the fastboot version file to something newer + // than current + let complete_flag_file = snapshot + .snapshot_dir + .join(snapshot_utils::SNAPSHOT_FASTBOOT_VERSION_FILENAME); + let version = fs::read_to_string(&complete_flag_file).unwrap(); + let version = Version::parse(&version).unwrap(); + let new_version = Version::new(version.major + 1, version.minor, version.patch); + + fs::write(&complete_flag_file, new_version.to_string()).unwrap(); + + // With an invalid version and no flush file, the snapshot will be considered invalid + let new_snapshot = get_highest_loadable_bank_snapshot(&snapshot_config); + assert!(new_snapshot.is_none()); + + // Test 3: Remove the bank snapshot version file + let complete_flag_file = snapshot + .snapshot_dir + .join(snapshot_utils::SNAPSHOT_VERSION_FILENAME); + fs::remove_file(complete_flag_file).unwrap(); + + // This will now find the previous entry in the directory, which is slot 2 + let snapshot = get_highest_loadable_bank_snapshot(&snapshot_config).unwrap(); + assert_eq!(snapshot.slot, 2); + + // Test 4: Remove the fastboot version file + let fastboot_version_file = snapshot + .snapshot_dir + .join(snapshot_utils::SNAPSHOT_FASTBOOT_VERSION_FILENAME); + fs::remove_file(fastboot_version_file).unwrap(); + + // The flush file will still be found, making this a valid snapshot + let snapshot = get_highest_loadable_bank_snapshot(&snapshot_config).unwrap(); + assert_eq!(snapshot.slot, 2); + } + #[test_case(false)] #[test_case(true)] fn test_get_highest_bank_snapshot(should_flush_and_hard_link_storages: bool) { @@ -1727,17 +1705,16 @@ mod tests { &genesis_config, &bank_snapshots_dir, 4, - 0, should_flush_and_hard_link_storages, ); let snapshot = get_highest_bank_snapshot(&bank_snapshots_dir).unwrap(); assert_eq!(snapshot.slot, 4); - let complete_flag_file = snapshot + let version_file = snapshot .snapshot_dir - .join(snapshot_utils::SNAPSHOT_STATE_COMPLETE_FILENAME); - fs::remove_file(complete_flag_file).unwrap(); + .join(snapshot_utils::SNAPSHOT_VERSION_FILENAME); + fs::remove_file(version_file).unwrap(); // The incomplete snapshot dir should still exist let snapshot_dir_4 = snapshot.snapshot_dir; assert!(snapshot_dir_4.exists()); @@ -1763,8 +1740,7 @@ mod tests { fn test_clean_orphaned_account_snapshot_dirs() { let genesis_config = GenesisConfig::default(); let bank_snapshots_dir = tempfile::TempDir::new().unwrap(); - let _bank = - create_snapshot_dirs_for_tests(&genesis_config, &bank_snapshots_dir, 2, 0, true); + let _bank = create_snapshot_dirs_for_tests(&genesis_config, &bank_snapshots_dir, 2, true); let snapshot_dir_slot_2 = bank_snapshots_dir.path().join("2"); let accounts_link_dir_slot_2 = @@ -1807,8 +1783,7 @@ mod tests { fn test_clean_orphaned_account_snapshot_dirs_no_hard_link() { let genesis_config = GenesisConfig::default(); let bank_snapshots_dir = tempfile::TempDir::new().unwrap(); - let _bank = - create_snapshot_dirs_for_tests(&genesis_config, &bank_snapshots_dir, 2, 0, false); + let _bank = create_snapshot_dirs_for_tests(&genesis_config, &bank_snapshots_dir, 2, false); // Ensure the bank snapshot dir does exist. let bank_snapshot_dir = snapshot_utils::get_bank_snapshot_dir(&bank_snapshots_dir, 2); @@ -1833,16 +1808,14 @@ mod tests { &genesis_config, &bank_snapshots_dir, 2, - 0, should_flush_and_hard_link_storages, ); - // remove the "state complete" files so the snapshots will be purged + // remove the "version" files so the snapshots will be purged for slot in [1, 2] { let bank_snapshot_dir = get_bank_snapshot_dir(&bank_snapshots_dir, slot); - let state_complete_file = - bank_snapshot_dir.join(snapshot_utils::SNAPSHOT_STATE_COMPLETE_FILENAME); - fs::remove_file(state_complete_file).unwrap(); + let version_file = bank_snapshot_dir.join(snapshot_utils::SNAPSHOT_VERSION_FILENAME); + fs::remove_file(version_file).unwrap(); } purge_incomplete_bank_snapshots(&bank_snapshots_dir); @@ -1878,9 +1851,9 @@ mod tests { let full_snapshot_archives_dir = tempfile::TempDir::new().unwrap(); let snapshot_archive_format = SnapshotConfig::default().archive_format; - let (genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1_000_000.)); + let (genesis_config, mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); - let lamports_to_transfer = sol_to_lamports(123_456.); + let lamports_to_transfer = 123_456 * LAMPORTS_PER_SOL; let bank_test_config = BankTestConfig { accounts_db_config: AccountsDbConfig { storage_access, @@ -1965,7 +1938,7 @@ mod tests { let accounts_dir = tempfile::TempDir::new().unwrap(); let other_bank_snapshots_dir = tempfile::TempDir::new().unwrap(); - let (deserialized_bank, _) = bank_from_snapshot_archives( + let deserialized_bank = bank_from_snapshot_archives( &[accounts_dir.path().to_path_buf()], other_bank_snapshots_dir.path(), &full_snapshot_archive_info, @@ -1974,18 +1947,15 @@ mod tests { &RuntimeConfig::default(), None, None, - None, false, false, false, - Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), + ACCOUNTS_DB_CONFIG_FOR_TESTING, None, Arc::default(), ) .unwrap(); - deserialized_bank.wait_for_initial_accounts_hash_verification_completed_for_tests(); - assert!( deserialized_bank .get_account_modified_slot(&key1.pubkey()) @@ -2001,30 +1971,27 @@ mod tests { fn test_bank_from_snapshot_dir(storage_access: StorageAccess) { let genesis_config = GenesisConfig::default(); let bank_snapshots_dir = tempfile::TempDir::new().unwrap(); - let bank = create_snapshot_dirs_for_tests(&genesis_config, &bank_snapshots_dir, 3, 0, true); + let bank = create_snapshot_dirs_for_tests(&genesis_config, &bank_snapshots_dir, 3, true); let bank_snapshot = get_highest_bank_snapshot(&bank_snapshots_dir).unwrap(); let account_paths = &bank.rc.accounts.accounts_db.paths; - let (bank_constructed, ..) = bank_from_snapshot_dir( + let bank_constructed = bank_from_snapshot_dir( account_paths, &bank_snapshot, &genesis_config, &RuntimeConfig::default(), None, None, - None, false, - Some(AccountsDbConfig { + AccountsDbConfig { storage_access, ..ACCOUNTS_DB_CONFIG_FOR_TESTING - }), + }, None, Arc::default(), ) .unwrap(); - - bank_constructed.wait_for_initial_accounts_hash_verification_completed_for_tests(); assert_eq!(bank_constructed, bank); // Verify that the next_append_vec_id tracking is correct @@ -2046,7 +2013,7 @@ mod tests { fn test_bank_from_latest_snapshot_dir() { let genesis_config = GenesisConfig::default(); let bank_snapshots_dir = tempfile::TempDir::new().unwrap(); - let bank = create_snapshot_dirs_for_tests(&genesis_config, &bank_snapshots_dir, 3, 3, true); + let bank = create_snapshot_dirs_for_tests(&genesis_config, &bank_snapshots_dir, 3, true); let account_paths = &bank.rc.accounts.accounts_db.paths; @@ -2057,9 +2024,8 @@ mod tests { account_paths, None, None, - None, false, - Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), + ACCOUNTS_DB_CONFIG_FOR_TESTING, None, Arc::default(), ) @@ -2080,7 +2046,6 @@ mod tests { &genesis_config, &bank_snapshots_dir, 10, - 5, should_flush_and_hard_link_storages, ); // Keep bank in this scope so that its account_paths tmp dirs are not released, and purge_all_bank_snapshots @@ -2100,7 +2065,6 @@ mod tests { &genesis_config, &bank_snapshots_dir, 10, - 5, should_flush_and_hard_link_storages, ); // Keep bank in this scope so that its account_paths tmp dirs are not released, and purge_old_bank_snapshots @@ -2108,18 +2072,14 @@ mod tests { assert_eq!(get_bank_snapshots(&bank_snapshots_dir).len(), 10); - purge_old_bank_snapshots(&bank_snapshots_dir, 3, Some(BankSnapshotKind::Pre)); - assert_eq!(get_bank_snapshots_pre(&bank_snapshots_dir).len(), 3); - - purge_old_bank_snapshots(&bank_snapshots_dir, 2, Some(BankSnapshotKind::Post)); - assert_eq!(get_bank_snapshots_post(&bank_snapshots_dir).len(), 2); + purge_old_bank_snapshots(&bank_snapshots_dir, 2); - assert_eq!(get_bank_snapshots(&bank_snapshots_dir).len(), 5); + assert_eq!(get_bank_snapshots(&bank_snapshots_dir).len(), 2); - purge_old_bank_snapshots(&bank_snapshots_dir, 2, None); + purge_old_bank_snapshots(&bank_snapshots_dir, 2); assert_eq!(get_bank_snapshots(&bank_snapshots_dir).len(), 2); - purge_old_bank_snapshots(&bank_snapshots_dir, 0, None); + purge_old_bank_snapshots(&bank_snapshots_dir, 0); assert_eq!(get_bank_snapshots(&bank_snapshots_dir).len(), 0); } @@ -2134,7 +2094,6 @@ mod tests { &genesis_config, &bank_snapshots_dir, 9, - 6, should_flush_and_hard_link_storages, ); let bank_snapshots_before = get_bank_snapshots(&bank_snapshots_dir); @@ -2168,18 +2127,14 @@ mod tests { &genesis_config, &bank_snapshots_dir, 9, - 6, should_flush_and_hard_link_storages, ); purge_old_bank_snapshots_at_startup(&bank_snapshots_dir); - let bank_snapshots_pre = get_bank_snapshots_pre(&bank_snapshots_dir); - assert!(bank_snapshots_pre.is_empty()); - - let bank_snapshots_post = get_bank_snapshots_post(&bank_snapshots_dir); - assert_eq!(bank_snapshots_post.len(), 1); - assert_eq!(bank_snapshots_post.first().unwrap().slot, 6); + let bank_snapshots = get_bank_snapshots(&bank_snapshots_dir); + assert_eq!(bank_snapshots.len(), 1); + assert_eq!(bank_snapshots.first().unwrap().slot, 9); } #[test] @@ -2281,17 +2236,6 @@ mod tests { assert_eq!(result, Ok(())); } - #[test] - fn test_verify_slot_deltas_with_history_bad_slot_history() { - let bank_slot = 444; - let result = verify_slot_deltas_with_history( - &HashSet::default(), - &SlotHistory::default(), // <-- will only have an entry for slot 0 - bank_slot, - ); - assert_eq!(result, Err(VerifySlotDeltasError::BadSlotHistory)); - } - #[test] fn test_verify_slot_deltas_with_history_bad_slot_not_in_history() { let slots_from_slot_deltas = HashSet::from([ @@ -2333,6 +2277,37 @@ mod tests { ); } + #[test] + fn test_verify_slot_history_good() { + let mut slot_history = SlotHistory::default(); + // note: slot history expects slots to be added in numeric order + for slot in [0, 111, 222, 333, 444] { + slot_history.add(slot); + } + + let bank_slot = 444; + let result = verify_slot_history(&slot_history, bank_slot); + assert_eq!(result, Ok(())); + } + + #[test] + fn test_verify_slot_history_bad_invalid_newest_slot() { + let slot_history = SlotHistory::default(); + let bank_slot = 444; + let result = verify_slot_history(&slot_history, bank_slot); + assert_eq!(result, Err(VerifySlotHistoryError::InvalidNewestSlot)); + } + + #[test] + fn test_verify_slot_history_bad_invalid_num_entries() { + let mut slot_history = SlotHistory::default(); + slot_history.bits.truncate(slot_history.bits.len() - 1); + + let bank_slot = 0; + let result = verify_slot_history(&slot_history, bank_slot); + assert_eq!(result, Err(VerifySlotHistoryError::InvalidNumEntries)); + } + #[test] fn test_verify_epoch_stakes_good() { let bank = create_simple_test_bank(100 * LAMPORTS_PER_SOL); @@ -2382,8 +2357,9 @@ mod tests { } } - #[test] - fn test_get_highest_loadable_bank_snapshot() { + #[test_case(SnapshotConfig::new_load_only())] + #[test_case(SnapshotConfig::default())] + fn test_get_highest_loadable_bank_snapshot(snapshot_config: SnapshotConfig) { let bank_snapshots_dir = TempDir::new().unwrap(); let snapshot_archives_dir = TempDir::new().unwrap(); @@ -2391,110 +2367,66 @@ mod tests { bank_snapshots_dir: bank_snapshots_dir.as_ref().to_path_buf(), full_snapshot_archives_dir: snapshot_archives_dir.as_ref().to_path_buf(), incremental_snapshot_archives_dir: snapshot_archives_dir.as_ref().to_path_buf(), - ..Default::default() - }; - let load_only_snapshot_config = SnapshotConfig { - bank_snapshots_dir: snapshot_config.bank_snapshots_dir.clone(), - full_snapshot_archives_dir: snapshot_config.full_snapshot_archives_dir.clone(), - incremental_snapshot_archives_dir: snapshot_config - .incremental_snapshot_archives_dir - .clone(), - ..SnapshotConfig::new_load_only() + ..snapshot_config }; let genesis_config = GenesisConfig::default(); let mut bank = Arc::new(Bank::new_for_tests(&genesis_config)); - let mut full_snapshot_archive_info = None; // take some snapshots, and archive them - // note the `+1` at the end; we'll turn it into a PRE afterwards for _ in 0..snapshot_config .maximum_full_snapshot_archives_to_retain .get() - + 1 { let slot = bank.slot() + 1; bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), slot)); bank.fill_bank_with_ticks_for_tests(); - full_snapshot_archive_info = Some( - bank_to_full_snapshot_archive_with( - &snapshot_config.bank_snapshots_dir, - &bank, - snapshot_config.snapshot_version, - &snapshot_config.full_snapshot_archives_dir, - &snapshot_config.incremental_snapshot_archives_dir, - snapshot_config.archive_format, - false, - ) - .unwrap(), - ); + bank_to_full_snapshot_archive_with( + &snapshot_config.bank_snapshots_dir, + &bank, + snapshot_config.snapshot_version, + &snapshot_config.full_snapshot_archives_dir, + &snapshot_config.incremental_snapshot_archives_dir, + snapshot_config.archive_format, + false, + ) + .unwrap(); } - - // As a hack, to make a PRE bank snapshot, just rename the last POST one. - let slot = bank.slot(); - let bank_snapshot_dir = get_bank_snapshot_dir(&bank_snapshots_dir, slot); - let post = bank_snapshot_dir.join(get_snapshot_file_name(slot)); - let pre = post.with_extension(BANK_SNAPSHOT_PRE_FILENAME_EXTENSION); - fs::rename(post, pre).unwrap(); - - // ...and we also need to delete the last snapshot archive - fs::remove_file(full_snapshot_archive_info.unwrap().path()).unwrap(); - - let highest_full_snapshot_archive = - get_highest_full_snapshot_archive_info(&snapshot_archives_dir).unwrap(); - let highest_bank_snapshot_post = - get_highest_bank_snapshot_post(&bank_snapshots_dir).unwrap(); - let highest_bank_snapshot_pre = get_highest_bank_snapshot_pre(&bank_snapshots_dir).unwrap(); - - // we want a bank snapshot PRE with the highest slot to ensure get_highest_loadable() - // correctly skips bank snapshots PRE - assert!(highest_bank_snapshot_pre.slot > highest_bank_snapshot_post.slot); + let highest_bank_snapshot = get_highest_bank_snapshot(&bank_snapshots_dir).unwrap(); // 1. call get_highest_loadable() but bad snapshot dir, so returns None assert!(get_highest_loadable_bank_snapshot(&SnapshotConfig::default()).is_none()); - // 2. the 'storages flushed' file hasn't been written yet, so get_highest_loadable() should return NONE + // 2. the bank snapshot has not been marked as loadable, so get_highest_loadable() should return NONE assert!(get_highest_loadable_bank_snapshot(&snapshot_config).is_none()); - // 3. write 'storages flushed' file, get_highest_loadable(), should return highest_bank_snapshot_post_slot - snapshot_utils::write_storages_flushed_file(&highest_bank_snapshot_post.snapshot_dir) + // 3. Mark the bank snapshot as loadable, get_highest_loadable() should return highest_bank_snapshot_slot + snapshot_utils::mark_bank_snapshot_as_loadable(&highest_bank_snapshot.snapshot_dir) .unwrap(); let bank_snapshot = get_highest_loadable_bank_snapshot(&snapshot_config).unwrap(); - assert_eq!(bank_snapshot, highest_bank_snapshot_post); + assert_eq!(bank_snapshot, highest_bank_snapshot); - // 4. delete highest full snapshot archive, get_highest_loadable() should return NONE - fs::remove_file(highest_full_snapshot_archive.path()).unwrap(); + // 4. delete highest bank snapshot, get_highest_loadable() should return NONE + fs::remove_dir_all(&highest_bank_snapshot.snapshot_dir).unwrap(); assert!(get_highest_loadable_bank_snapshot(&snapshot_config).is_none()); - // 5. get_highest_loadable(), but with a load-only snapshot config, should return Some() - let bank_snapshot = get_highest_loadable_bank_snapshot(&load_only_snapshot_config).unwrap(); - assert_eq!(bank_snapshot, highest_bank_snapshot_post); - - // 6. delete highest bank snapshot, get_highest_loadable() should return NONE - fs::remove_dir_all(&highest_bank_snapshot_post.snapshot_dir).unwrap(); - assert!(get_highest_loadable_bank_snapshot(&snapshot_config).is_none()); - - // 7. write 'storages flushed' file, get_highest_loadable() should return Some() again, with slot-1 - snapshot_utils::write_storages_flushed_file(get_bank_snapshot_dir( + // 5. Mark the bank snapshot as loadable, get_highest_loadable() should return Some() again, with slot-1 + snapshot_utils::mark_bank_snapshot_as_loadable(get_bank_snapshot_dir( &snapshot_config.bank_snapshots_dir, - highest_bank_snapshot_post.slot - 1, + highest_bank_snapshot.slot - 1, )) .unwrap(); let bank_snapshot = get_highest_loadable_bank_snapshot(&snapshot_config).unwrap(); - assert_eq!(bank_snapshot.slot, highest_bank_snapshot_post.slot - 1); + assert_eq!(bank_snapshot.slot, highest_bank_snapshot.slot - 1); - // 8. delete the full snapshot slot file, get_highest_loadable() should return NONE + // 6. delete the full snapshot slot file, get_highest_loadable() should return return Some() again, with slot-1 fs::remove_file( bank_snapshot .snapshot_dir .join(SNAPSHOT_FULL_SNAPSHOT_SLOT_FILENAME), ) .unwrap(); - assert!(get_highest_loadable_bank_snapshot(&snapshot_config).is_none()); - - // 9. however, a load-only snapshot config should return Some() again - let bank_snapshot2 = - get_highest_loadable_bank_snapshot(&load_only_snapshot_config).unwrap(); + let bank_snapshot2 = get_highest_loadable_bank_snapshot(&snapshot_config).unwrap(); assert_eq!(bank_snapshot2, bank_snapshot); } } diff --git a/runtime/src/snapshot_controller.rs b/runtime/src/snapshot_controller.rs index 0245d998c0a110..efe903c2545102 100644 --- a/runtime/src/snapshot_controller.rs +++ b/runtime/src/snapshot_controller.rs @@ -107,24 +107,16 @@ impl SnapshotController { is_root_bank_squashed = bank_slot == root; let mut snapshot_time = Measure::start("squash::snapshot_time"); - if bank.has_initial_accounts_hash_verification_completed() { - // Save off the status cache because these may get pruned if another - // `set_root()` is called before the snapshots package can be generated - let status_cache_slot_deltas = - bank.status_cache.read().unwrap().root_slot_deltas(); - if let Err(e) = self.abs_request_sender.send(SnapshotRequest { - snapshot_root_bank: Arc::clone(bank), - status_cache_slot_deltas, - request_kind, - enqueued: Instant::now(), - }) { - warn!( - "Error sending snapshot request for bank: {}, err: {:?}", - bank_slot, e - ); - } - } else { - info!("Not sending snapshot request for bank: {}, startup verification is incomplete", bank_slot); + // Save off the status cache because these may get pruned if another + // `set_root()` is called before the snapshots package can be generated + let status_cache_slot_deltas = bank.status_cache.read().unwrap().root_slot_deltas(); + if let Err(e) = self.abs_request_sender.send(SnapshotRequest { + snapshot_root_bank: Arc::clone(bank), + status_cache_slot_deltas, + request_kind, + enqueued: Instant::now(), + }) { + warn!("Error sending snapshot request for bank: {bank_slot}, err: {e:?}"); } snapshot_time.stop(); total_snapshot_ms += snapshot_time.as_ms(); diff --git a/runtime/src/snapshot_minimizer.rs b/runtime/src/snapshot_minimizer.rs index b63ed7005ad3af..e7a6725b53d9cd 100644 --- a/runtime/src/snapshot_minimizer.rs +++ b/runtime/src/snapshot_minimizer.rs @@ -46,7 +46,12 @@ impl<'a> SnapshotMinimizer<'a> { /// /// This function will modify accounts_db by removing accounts not needed to replay [starting_slot, ending_slot], /// and update the bank's capitalization. - pub fn minimize(bank: &'a Bank, starting_slot: Slot, transaction_account_set: DashSet) { + pub fn minimize( + bank: &'a Bank, + starting_slot: Slot, + transaction_account_set: DashSet, + should_recalculate_accounts_lt_hash: bool, + ) { let minimizer = SnapshotMinimizer { bank, starting_slot, @@ -71,14 +76,16 @@ impl<'a> SnapshotMinimizer<'a> { .bank .set_capitalization_for_tests(minimizer.bank.calculate_capitalization_for_tests()); - // Since the account state has changed, the accounts lt hash must be recalculated - let new_accounts_lt_hash = minimizer - .accounts_db() - .calculate_accounts_lt_hash_at_startup_from_index( - &minimizer.bank.ancestors, - minimizer.bank.slot(), - ); - bank.set_accounts_lt_hash_for_snapshot_minimizer(new_accounts_lt_hash); + if should_recalculate_accounts_lt_hash { + // Since the account state has changed, the accounts lt hash must be recalculated + let new_accounts_lt_hash = minimizer + .accounts_db() + .calculate_accounts_lt_hash_at_startup_from_index( + &minimizer.bank.ancestors, + minimizer.bank.slot(), + ); + bank.set_accounts_lt_hash_for_snapshot_minimizer(new_accounts_lt_hash); + } } /// Helper function to measure time and number of accounts added @@ -92,7 +99,8 @@ impl<'a> SnapshotMinimizer<'a> { let added_accounts = total_accounts_len - initial_accounts_len; info!( - "Added {added_accounts} {name} for total of {total_accounts_len} accounts. get {measure}" + "Added {added_accounts} {name} for total of {total_accounts_len} accounts. get \ + {measure}" ); } @@ -310,11 +318,8 @@ impl<'a> SnapshotMinimizer<'a> { let remove_pubkeys = purge_pubkeys_collect.into_inner().unwrap(); let total_bytes = total_bytes_collect.load(Ordering::Relaxed); - let purge_pubkeys: Vec<_> = remove_pubkeys - .into_iter() - .map(|pubkey| (*pubkey, slot)) - .collect(); - let _ = self.accounts_db().purge_keys_exact(purge_pubkeys.iter()); + let purge_pubkeys = remove_pubkeys.into_iter().map(|pubkey| (*pubkey, slot)); + let _ = self.accounts_db().purge_keys_exact(purge_pubkeys); let mut shrink_in_progress = None; if total_bytes > 0 { @@ -376,7 +381,7 @@ mod tests { }, dashmap::DashSet, solana_account::{AccountSharedData, ReadableAccount, WritableAccount}, - solana_accounts_db::accounts_db::ACCOUNTS_DB_CONFIG_FOR_TESTING, + solana_accounts_db::accounts_db::{AccountsDbConfig, ACCOUNTS_DB_CONFIG_FOR_TESTING}, solana_genesis_config::create_genesis_config, solana_loader_v3_interface::state::UpgradeableLoaderState, solana_pubkey::Pubkey, @@ -385,6 +390,7 @@ mod tests { solana_stake_interface as stake, std::sync::Arc, tempfile::TempDir, + test_case::test_case, }; #[test] @@ -551,7 +557,7 @@ mod tests { current_slot += 1; for (index, pubkey) in pubkeys.iter().enumerate() { - accounts.store_for_tests(current_slot, &[(pubkey, &account)]); + accounts.store_for_tests((current_slot, [(pubkey, &account)].as_slice())); if current_slot % 2 == 0 && index % 100 == 0 { minimized_account_set.insert(*pubkey); @@ -587,10 +593,11 @@ mod tests { ); // snapshot slot is untouched, so still has all 300 accounts } - /// Ensure that minimization recalculates the accounts lt hash correctly - /// so the minimized snapshot is loadable. - #[test] - fn test_minimize_and_accounts_lt_hash() { + /// Ensure that minimized snapshots are loadable with and without + /// recalculating the accounts lt hash. + #[test_case(false)] + #[test_case(true)] + fn test_minimize_and_recalculate_accounts_lt_hash(should_recalculate_accounts_lt_hash: bool) { let genesis_config_info = genesis_utils::create_genesis_config(123_456_789_000_000_000); let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config_info.genesis_config); @@ -622,7 +629,12 @@ mod tests { bank.force_flush_accounts_cache(); // do the minimization - SnapshotMinimizer::minimize(&bank, bank.slot(), DashSet::from_iter([pubkey_to_keep])); + SnapshotMinimizer::minimize( + &bank, + bank.slot(), + DashSet::from_iter([pubkey_to_keep]), + should_recalculate_accounts_lt_hash, + ); // take a snapshot of the minimized bank, then load it let snapshot_config = SnapshotConfig::default(); @@ -638,7 +650,12 @@ mod tests { ) .unwrap(); let (_accounts_tempdir, accounts_dir) = snapshot_utils::create_tmp_accounts_dir_for_tests(); - let (roundtrip_bank, _) = snapshot_bank_utils::bank_from_snapshot_archives( + let accounts_db_config = AccountsDbConfig { + // must skip accounts verification if we did not recalculate the accounts lt hash + skip_initial_hash_calc: !should_recalculate_accounts_lt_hash, + ..ACCOUNTS_DB_CONFIG_FOR_TESTING + }; + let roundtrip_bank = snapshot_bank_utils::bank_from_snapshot_archives( &[accounts_dir], &bank_snapshots_dir, &snapshot, @@ -647,18 +664,15 @@ mod tests { &RuntimeConfig::default(), None, None, - None, false, false, false, - Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), + accounts_db_config, None, Arc::default(), ) .unwrap(); - // Wait for the startup verification to complete. If we don't panic, then we're good! - roundtrip_bank.wait_for_initial_accounts_hash_verification_completed_for_tests(); assert_eq!(roundtrip_bank, *bank); } } diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 0ad669ac979bfb..7a66cde0da0d4e 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -1,3 +1,5 @@ +#[cfg(feature = "dev-context-only-utils")] +use solana_accounts_db::utils::create_accounts_run_and_snapshot_dirs; use { crate::{ bank::{BankFieldsToDeserialize, BankFieldsToSerialize, BankHashStats, BankSlotDelta}, @@ -9,7 +11,6 @@ use { FullSnapshotArchiveInfo, IncrementalSnapshotArchiveInfo, SnapshotArchiveInfo, SnapshotArchiveInfoGetter, }, - snapshot_bank_utils, snapshot_config::SnapshotConfig, snapshot_hash::SnapshotHash, snapshot_package::{SnapshotKind, SnapshotPackage}, @@ -20,10 +21,11 @@ use { crossbeam_channel::{Receiver, Sender}, log::*, regex::Regex, + semver::Version, solana_accounts_db::{ account_storage::{AccountStorageMap, AccountStoragesOrderer}, account_storage_reader::AccountStorageReader, - accounts_db::{AccountStorageEntry, AtomicAccountsFileId}, + accounts_db::{AccountStorageEntry, AccountsDbConfig, AtomicAccountsFileId}, accounts_file::{AccountsFile, AccountsFileError, StorageAccess}, hardened_unpack::{self, UnpackError}, utils::{move_and_async_delete_path, ACCOUNTS_RUN_DIR, ACCOUNTS_SNAPSHOT_DIR}, @@ -49,11 +51,6 @@ use { tempfile::TempDir, thiserror::Error, }; -#[cfg(feature = "dev-context-only-utils")] -use { - hardened_unpack::UnpackedAppendVecMap, - solana_accounts_db::utils::create_accounts_run_and_snapshot_dirs, -}; mod archive_format; mod snapshot_interval; @@ -62,16 +59,23 @@ pub use {archive_format::*, snapshot_interval::SnapshotInterval}; pub const SNAPSHOT_STATUS_CACHE_FILENAME: &str = "status_cache"; pub const SNAPSHOT_VERSION_FILENAME: &str = "version"; +pub const SNAPSHOT_FASTBOOT_VERSION_FILENAME: &str = "fastboot_version"; +/// No longer checked in version v3.1. Can be removed in v3.2 pub const SNAPSHOT_STATE_COMPLETE_FILENAME: &str = "state_complete"; pub const SNAPSHOT_STORAGES_FLUSHED_FILENAME: &str = "storages_flushed"; pub const SNAPSHOT_ACCOUNTS_HARDLINKS: &str = "accounts_hardlinks"; pub const SNAPSHOT_ARCHIVE_DOWNLOAD_DIR: &str = "remote"; +/// No longer checked in version v3.1. Can be removed in v3.2 pub const SNAPSHOT_FULL_SNAPSHOT_SLOT_FILENAME: &str = "full_snapshot_slot"; +/// When a snapshot is taken of a bank, the state is serialized under this directory. +/// Specifically in `BANK_SNAPSHOTS_DIR/SLOT/`. +/// This is also where the bank state is located in the snapshot archive. +pub const BANK_SNAPSHOTS_DIR: &str = "snapshots"; pub const MAX_SNAPSHOT_DATA_FILE_SIZE: u64 = 32 * 1024 * 1024 * 1024; // 32 GiB const MAX_SNAPSHOT_VERSION_FILE_SIZE: u64 = 8; // byte +const SNAPSHOT_FASTBOOT_VERSION: Version = Version::new(1, 0, 0); const VERSION_STRING_V1_2_0: &str = "1.2.0"; pub const TMP_SNAPSHOT_ARCHIVE_PREFIX: &str = "tmp-snapshot-archive-"; -pub const BANK_SNAPSHOT_PRE_FILENAME_EXTENSION: &str = "pre"; pub const DEFAULT_FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS: NonZeroU64 = NonZeroU64::new(100_000).unwrap(); pub const DEFAULT_INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS: NonZeroU64 = @@ -84,6 +88,9 @@ pub const FULL_SNAPSHOT_ARCHIVE_FILENAME_REGEX: &str = r"^snapshot-(?P[[:digit:]]+)-(?P[[:alnum:]]+)\.(?Ptar\.zst|tar\.lz4)$"; pub const INCREMENTAL_SNAPSHOT_ARCHIVE_FILENAME_REGEX: &str = r"^incremental-snapshot-(?P[[:digit:]]+)-(?P[[:digit:]]+)-(?P[[:alnum:]]+)\.(?Ptar\.zst|tar\.lz4)$"; +// Allows scheduling a large number of reads such that temporary disk access delays +// shouldn't block decompression (unless read bandwidth is saturated). +const MAX_SNAPSHOT_READER_BUF_SIZE: u64 = 128 * 1024 * 1024; // Balance large and small files order in snapshot tar with bias towards small (4 small + 1 large), // such that during unpacking large writes are mixed with file metadata operations // and towards the end of archive (sizes equalize) writes are >256KiB / file. @@ -141,8 +148,6 @@ impl SnapshotVersion { pub struct BankSnapshotInfo { /// Slot of the bank pub slot: Slot, - /// Snapshot kind - pub snapshot_kind: BankSnapshotKind, /// Path to the bank snapshot directory pub snapshot_dir: PathBuf, /// Snapshot version @@ -181,10 +186,15 @@ impl BankSnapshotInfo { // I/O errors. // There is a time window from the slot directory being created, and the content being completely - // filled. Check the completion to avoid using a highest found slot directory with missing content. - if !is_bank_snapshot_complete(&bank_snapshot_dir) { - return Err(SnapshotNewFromDirError::IncompleteDir(bank_snapshot_dir)); - } + // filled. Check the version file as it is the last file written to avoid using a highest + // found slot directory with missing content + let version_path = bank_snapshot_dir.join(SNAPSHOT_VERSION_FILENAME); + let version_str = snapshot_version_from_file(&version_path).map_err(|err| { + SnapshotNewFromDirError::IncompleteDir(err, bank_snapshot_dir.clone()) + })?; + + let snapshot_version = SnapshotVersion::from_str(version_str.as_str()) + .or(Err(SnapshotNewFromDirError::InvalidVersion(version_str)))?; let status_cache_file = bank_snapshot_dir.join(SNAPSHOT_STATUS_CACHE_FILENAME); if !status_cache_file.is_file() { @@ -193,30 +203,8 @@ impl BankSnapshotInfo { )); } - let version_path = bank_snapshot_dir.join(SNAPSHOT_VERSION_FILENAME); - let version_str = snapshot_version_from_file(&version_path).or(Err( - SnapshotNewFromDirError::MissingVersionFile(version_path), - ))?; - let snapshot_version = SnapshotVersion::from_str(version_str.as_str()) - .or(Err(SnapshotNewFromDirError::InvalidVersion(version_str)))?; - - let bank_snapshot_post_path = bank_snapshot_dir.join(get_snapshot_file_name(slot)); - let bank_snapshot_pre_path = - bank_snapshot_post_path.with_extension(BANK_SNAPSHOT_PRE_FILENAME_EXTENSION); - - // NOTE: It is important that checking for "Pre" happens before "Post. - // - // Consider the scenario where AccountsHashVerifier is actively processing an - // AccountsPackage for a snapshot/slot; if AHV is in the middle of reserializing the - // bank snapshot file (writing the new "Post" file), and then the process dies, - // there will be an incomplete "Post" file on disk. We do not want only the existence of - // this "Post" file to be sufficient for deciding the snapshot kind as "Post". More so, - // "Post" *requires* the *absence* of a "Pre" file. - let snapshot_kind = if bank_snapshot_pre_path.is_file() { - BankSnapshotKind::Pre - } else if bank_snapshot_post_path.is_file() { - BankSnapshotKind::Post - } else { + let bank_snapshot_path = bank_snapshot_dir.join(get_snapshot_file_name(slot)); + if !bank_snapshot_path.is_file() { return Err(SnapshotNewFromDirError::MissingSnapshotFile( bank_snapshot_dir, )); @@ -224,41 +212,16 @@ impl BankSnapshotInfo { Ok(BankSnapshotInfo { slot, - snapshot_kind, snapshot_dir: bank_snapshot_dir, snapshot_version, }) } pub fn snapshot_path(&self) -> PathBuf { - let mut bank_snapshot_path = self.snapshot_dir.join(get_snapshot_file_name(self.slot)); - - let ext = match self.snapshot_kind { - BankSnapshotKind::Pre => BANK_SNAPSHOT_PRE_FILENAME_EXTENSION, - BankSnapshotKind::Post => "", - }; - bank_snapshot_path.set_extension(ext); - - bank_snapshot_path + self.snapshot_dir.join(get_snapshot_file_name(self.slot)) } } -/// Bank snapshots traditionally had their accounts hash calculated prior to serialization. Since -/// the hash calculation takes a long time, an optimization has been put in to offload the accounts -/// hash calculation. The bank serialization format has not changed, so we need another way to -/// identify if a bank snapshot contains the calculated accounts hash or not. -/// -/// When a bank snapshot is first taken, it does not have the calculated accounts hash. It is said -/// that this bank snapshot is "pre" accounts hash. Later, when the accounts hash is calculated, -/// the bank snapshot is re-serialized, and is now "post" accounts hash. -#[derive(Debug, Copy, Clone, Eq, PartialEq)] -pub enum BankSnapshotKind { - /// This bank snapshot has *not* yet had its accounts hash calculated - Pre, - /// This bank snapshot *has* had its accounts hash calculated - Post, -} - /// When constructing a bank a snapshot, traditionally the snapshot was from a snapshot archive. Now, /// the snapshot can be from a snapshot directory, or from a snapshot archive. This is the flag to /// indicate which. @@ -359,7 +322,10 @@ pub enum SnapshotError { #[error("could not parse snapshot archive's file name '{0}'")] ParseSnapshotArchiveFileNameError(String), - #[error("snapshots are incompatible: full snapshot slot ({0}) and incremental snapshot base slot ({1}) do not match")] + #[error( + "snapshots are incompatible: full snapshot slot ({0}) and incremental snapshot base slot \ + ({1}) do not match" + )] MismatchedBaseSlot(Slot, Slot), #[error("no snapshot archives to load from '{0}'")] @@ -405,6 +371,15 @@ pub enum SnapshotError { RebuildStorages(String), } +#[derive(Error, Debug)] +pub enum SnapshotFastbootError { + #[error("invalid version string for fastboot '{0}'")] + InvalidVersion(String), + + #[error("incompatible fastboot version '{0}'")] + IncompatibleVersion(Version), +} + #[derive(Error, Debug)] pub enum SnapshotNewFromDirError { #[error("invalid bank snapshot directory '{0}'")] @@ -419,8 +394,8 @@ pub enum SnapshotNewFromDirError { #[error("invalid snapshot version '{0}'")] InvalidVersion(String), - #[error("snapshot directory incomplete '{0}'")] - IncompleteDir(PathBuf), + #[error("snapshot directory incomplete '{1}': {0}")] + IncompleteDir(#[source] IoError, PathBuf), #[error("missing snapshot file '{0}'")] MissingSnapshotFile(PathBuf), @@ -449,8 +424,18 @@ pub enum VerifySlotDeltasError { #[error("slot {0} was in history but missing from slot deltas")] SlotNotFoundInDeltas(Slot), - #[error("slot history is bad and cannot be used to verify slot deltas")] - BadSlotHistory, + #[error("snapshot slot history is invalid: {0}")] + VerifySlotHistory(#[from] VerifySlotHistoryError), +} + +/// Errors that can happen in `verify_slot_history()` +#[derive(Error, Debug, PartialEq, Eq)] +pub enum VerifySlotHistoryError { + #[error("newest slot does not match snapshot")] + InvalidNewestSlot, + + #[error("invalid number of entries")] + InvalidNumEntries, } /// Errors that can happen in `verify_epoch_stakes()` @@ -475,9 +460,6 @@ pub enum AddBankSnapshotError { #[error("failed to flush storage '{1}': {0}")] FlushStorage(#[source] AccountsFileError, PathBuf), - #[error("failed to mark snapshot storages as 'flushed': {0}")] - MarkStoragesFlushed(#[source] IoError), - #[error("failed to hard link storages: {0}")] HardLinkStorages(#[source] HardLinkStoragesToSnapshotError), @@ -490,8 +472,8 @@ pub enum AddBankSnapshotError { #[error("failed to write snapshot version file '{1}': {0}")] WriteSnapshotVersionFile(#[source] IoError, PathBuf), - #[error("failed to mark snapshot as 'complete': {0}")] - MarkSnapshotComplete(#[source] IoError), + #[error("failed to mark snapshot as 'loadable': {0}")] + MarkSnapshotLoadable(#[source] IoError), } /// Errors that can happen in `archive_snapshot_package()` @@ -673,24 +655,8 @@ pub fn purge_incomplete_bank_snapshots(bank_snapshots_dir: impl AsRef) { /// Is the bank snapshot complete? fn is_bank_snapshot_complete(bank_snapshot_dir: impl AsRef) -> bool { - let state_complete_path = bank_snapshot_dir - .as_ref() - .join(SNAPSHOT_STATE_COMPLETE_FILENAME); - state_complete_path.is_file() -} - -/// Marks the bank snapshot as complete -fn write_snapshot_state_complete_file(bank_snapshot_dir: impl AsRef) -> io::Result<()> { - let state_complete_path = bank_snapshot_dir - .as_ref() - .join(SNAPSHOT_STATE_COMPLETE_FILENAME); - fs::File::create(&state_complete_path).map_err(|err| { - IoError::other(format!( - "failed to create file '{}': {err}", - state_complete_path.display(), - )) - })?; - Ok(()) + let version_path = bank_snapshot_dir.as_ref().join(SNAPSHOT_VERSION_FILENAME); + version_path.is_file() } /// Writes the full snapshot slot file into the bank snapshot dir @@ -736,8 +702,22 @@ pub fn read_full_snapshot_slot_file(bank_snapshot_dir: impl AsRef) -> io:: Ok(slot) } -/// Writes the 'snapshot storages have been flushed' file to the bank snapshot dir -pub fn write_storages_flushed_file(bank_snapshot_dir: impl AsRef) -> io::Result<()> { +/// Writes files that indicate the bank snapshot is loadable by fastboot +pub fn mark_bank_snapshot_as_loadable(bank_snapshot_dir: impl AsRef) -> io::Result<()> { + // Mark this directory complete. Used in older versions to check if the snapshot is complete + // Never read in v3.1, can be removed in v3.2 + let state_complete_path = bank_snapshot_dir + .as_ref() + .join(SNAPSHOT_STATE_COMPLETE_FILENAME); + fs::File::create(&state_complete_path).map_err(|err| { + IoError::other(format!( + "failed to create file '{}': {err}", + state_complete_path.display(), + )) + })?; + + // Write the storages flushed file. Used in older versions to check if the snapshot is complete + // Read in v3.1 for backwards compatibility, can be removed in v3.2 let flushed_storages_path = bank_snapshot_dir .as_ref() .join(SNAPSHOT_STORAGES_FLUSHED_FILENAME); @@ -747,46 +727,84 @@ pub fn write_storages_flushed_file(bank_snapshot_dir: impl AsRef) -> io::R flushed_storages_path.display(), )) })?; + + let snapshot_fastboot_version_path = bank_snapshot_dir + .as_ref() + .join(SNAPSHOT_FASTBOOT_VERSION_FILENAME); + fs::write( + &snapshot_fastboot_version_path, + SNAPSHOT_FASTBOOT_VERSION.to_string(), + ) + .map_err(|err| { + IoError::other(format!( + "failed to write fastboot version file '{}': {err}", + snapshot_fastboot_version_path.display(), + )) + })?; Ok(()) } -/// Were the snapshot storages flushed in this bank snapshot? -fn are_bank_snapshot_storages_flushed(bank_snapshot_dir: impl AsRef) -> bool { +/// Is this bank snapshot loadable? +fn is_bank_snapshot_loadable( + bank_snapshot_dir: impl AsRef, +) -> std::result::Result { + // Legacy storages flushed file + // Read in v3.1 for backwards compatibility, can be removed in v3.2 let flushed_storages = bank_snapshot_dir .as_ref() .join(SNAPSHOT_STORAGES_FLUSHED_FILENAME); - flushed_storages.is_file() + if flushed_storages.is_file() { + return Ok(true); + } + + let snapshot_fastboot_version_path = bank_snapshot_dir + .as_ref() + .join(SNAPSHOT_FASTBOOT_VERSION_FILENAME); + + if let Ok(version_string) = fs::read_to_string(&snapshot_fastboot_version_path) { + if let Ok(version) = Version::from_str(version_string.trim()) { + is_snapshot_fastboot_compatible(&version) + } else { + Err(SnapshotFastbootError::InvalidVersion(version_string)) + } + } else { + // No fastboot version file, so this is not a fastbootable + Ok(false) + } +} + +/// Is the fastboot snapshot version compatible? +fn is_snapshot_fastboot_compatible( + version: &Version, +) -> std::result::Result { + if version.major <= SNAPSHOT_FASTBOOT_VERSION.major { + Ok(true) + } else { + Err(SnapshotFastbootError::IncompatibleVersion(version.clone())) + } } /// Gets the highest, loadable, bank snapshot /// /// The highest bank snapshot is the one with the highest slot. -/// To be loadable, the bank snapshot must be a BankSnapshotKind::Post. -/// And if we're generating snapshots (e.g. running a normal validator), then -/// the full snapshot file's slot must match the highest full snapshot archive's. -/// Lastly, the account storages must have been flushed to be loadable. pub fn get_highest_loadable_bank_snapshot( snapshot_config: &SnapshotConfig, ) -> Option { - let highest_bank_snapshot = - get_highest_bank_snapshot_post(&snapshot_config.bank_snapshots_dir)?; + let highest_bank_snapshot = get_highest_bank_snapshot(&snapshot_config.bank_snapshots_dir)?; - // If we're *not* generating snapshots, e.g. running ledger-tool, then we *can* load - // this bank snapshot, and we do not need to check for anything else. - if !snapshot_config.should_generate_snapshots() { - return Some(highest_bank_snapshot); - } + let is_bank_snapshot_loadable = is_bank_snapshot_loadable(&highest_bank_snapshot.snapshot_dir); - // Otherwise, the bank snapshot's full snapshot slot *must* be the same as - // the highest full snapshot archive's slot. - let highest_full_snapshot_archive_slot = - get_highest_full_snapshot_archive_slot(&snapshot_config.full_snapshot_archives_dir)?; - let full_snapshot_file_slot = - read_full_snapshot_slot_file(&highest_bank_snapshot.snapshot_dir).ok()?; - let are_storages_flushed = - are_bank_snapshot_storages_flushed(&highest_bank_snapshot.snapshot_dir); - (are_storages_flushed && (full_snapshot_file_slot == highest_full_snapshot_archive_slot)) - .then_some(highest_bank_snapshot) + match is_bank_snapshot_loadable { + Ok(true) => Some(highest_bank_snapshot), + Ok(false) => None, + Err(err) => { + warn!( + "Bank snapshot is not loadable '{}': {err}", + highest_bank_snapshot.snapshot_dir.display() + ); + None + } + } } /// If the validator halts in the middle of `archive_snapshot_package()`, the temporary staging @@ -855,7 +873,8 @@ pub fn serialize_and_archive_snapshot_package( write_full_snapshot_slot_file(&bank_snapshot_info.snapshot_dir, full_snapshot_archive_slot) .map_err(|err| { IoError::other(format!( - "failed to serialize snapshot slot {snapshot_slot}, block height {block_height}, kind {snapshot_kind:?}: {err}", + "failed to serialize snapshot slot {snapshot_slot}, block height {block_height}, \ + kind {snapshot_kind:?}: {err}", )) })?; @@ -928,28 +947,6 @@ fn serialize_snapshot( bank_snapshot_path.display(), ); - let (flush_storages_us, hard_link_storages_us) = if should_flush_and_hard_link_storages { - let flush_measure = Measure::start(""); - for storage in snapshot_storages { - storage.flush().map_err(|err| { - AddBankSnapshotError::FlushStorage(err, storage.path().to_path_buf()) - })?; - } - let flush_us = flush_measure.end_as_us(); - let (_, hard_link_us) = measure_us!(hard_link_storages_to_snapshot( - &bank_snapshot_dir, - slot, - snapshot_storages - ) - .map_err(AddBankSnapshotError::HardLinkStorages)?); - write_storages_flushed_file(&bank_snapshot_dir) - .map_err(AddBankSnapshotError::MarkStoragesFlushed)?; - Some((flush_us, hard_link_us)) - } else { - None - } - .unzip(); - let bank_snapshot_serializer = move |stream: &mut BufWriter| -> Result<()> { let versioned_epoch_stakes = mem::take(&mut bank_fields.versioned_epoch_stakes); let extra_fields = ExtraFieldsToSerialize { @@ -977,7 +974,7 @@ fn serialize_snapshot( let status_cache_path = bank_snapshot_dir.join(SNAPSHOT_STATUS_CACHE_FILENAME); let (status_cache_consumed_size, status_cache_serialize_us) = measure_us!( - snapshot_bank_utils::serialize_status_cache(slot_deltas, &status_cache_path) + serde_snapshot::serialize_status_cache(slot_deltas, &status_cache_path) .map_err(|err| AddBankSnapshotError::SerializeStatusCache(Box::new(err)))? ); @@ -988,12 +985,30 @@ fn serialize_snapshot( ) .map_err(|err| AddBankSnapshotError::WriteSnapshotVersionFile(err, version_path))?); - // Mark this directory complete so it can be used. Check this flag first before selecting for deserialization. - let (_, write_state_complete_file_us) = measure_us!({ - write_snapshot_state_complete_file(&bank_snapshot_dir) - .map_err(AddBankSnapshotError::MarkSnapshotComplete)? - }); + let (flush_storages_us, hard_link_storages_us) = if should_flush_and_hard_link_storages { + let flush_measure = Measure::start(""); + for storage in snapshot_storages { + storage.flush().map_err(|err| { + AddBankSnapshotError::FlushStorage(err, storage.path().to_path_buf()) + })?; + } + let flush_us = flush_measure.end_as_us(); + let (_, hard_link_us) = measure_us!(hard_link_storages_to_snapshot( + &bank_snapshot_dir, + slot, + snapshot_storages + ) + .map_err(AddBankSnapshotError::HardLinkStorages)?); + + // Now that the storages are flushed and hard linked, mark the snapshot as loadable + mark_bank_snapshot_as_loadable(&bank_snapshot_dir) + .map_err(AddBankSnapshotError::MarkSnapshotLoadable)?; + Some((flush_us, hard_link_us)) + } else { + None + } + .unzip(); measure_everything.stop(); // Monitor sizes because they're capped to MAX_SNAPSHOT_DATA_FILE_SIZE @@ -1007,11 +1022,6 @@ fn serialize_snapshot( ("bank_serialize_us", bank_serialize.as_us(), i64), ("status_cache_serialize_us", status_cache_serialize_us, i64), ("write_version_file_us", write_version_file_us, i64), - ( - "write_state_complete_file_us", - write_state_complete_file_us, - i64 - ), ("total_us", measure_everything.as_us(), i64), ); @@ -1024,7 +1034,6 @@ fn serialize_snapshot( Ok(BankSnapshotInfo { slot, - snapshot_kind: BankSnapshotKind::Pre, snapshot_dir: bank_snapshot_dir, snapshot_version, }) @@ -1044,7 +1053,6 @@ fn archive_snapshot( archive_format: ArchiveFormat, ) -> Result { use ArchiveSnapshotPackageError as E; - const SNAPSHOTS_DIR: &str = "snapshots"; const ACCOUNTS_DIR: &str = "accounts"; info!("Generating snapshot archive for slot {snapshot_slot}, kind: {snapshot_kind:?}"); @@ -1059,10 +1067,10 @@ fn archive_snapshot( // Create the staging directories let staging_dir_prefix = TMP_SNAPSHOT_ARCHIVE_PREFIX; let staging_dir = tempfile::Builder::new() - .prefix(&format!("{}{}-", staging_dir_prefix, snapshot_slot)) + .prefix(&format!("{staging_dir_prefix}{snapshot_slot}-")) .tempdir_in(tar_dir) .map_err(|err| E::CreateStagingDir(err, tar_dir.to_path_buf()))?; - let staging_snapshots_dir = staging_dir.path().join(SNAPSHOTS_DIR); + let staging_snapshots_dir = staging_dir.path().join(BANK_SNAPSHOTS_DIR); let slot_str = snapshot_slot.to_string(); let staging_snapshot_dir = staging_snapshots_dir.join(&slot_str); @@ -1122,7 +1130,7 @@ fn archive_snapshot( .append_path_with_name(&staging_version_file, SNAPSHOT_VERSION_FILENAME) .map_err(E::ArchiveVersionFile)?; archive - .append_dir_all(SNAPSHOTS_DIR, &staging_snapshots_dir) + .append_dir_all(BANK_SNAPSHOTS_DIR, &staging_snapshots_dir) .map_err(E::ArchiveSnapshotsDir)?; let storages_orderer = AccountStoragesOrderer::with_small_to_large_ratio( @@ -1224,7 +1232,7 @@ pub fn get_bank_snapshots(bank_snapshots_dir: impl AsRef) -> Vec paths .filter_map(|entry| { // check if this entry is a directory and only a Slot - // bank snapshots are bank_snapshots_dir/slot/slot(BANK_SNAPSHOT_PRE_FILENAME_EXTENSION) + // bank snapshots are bank_snapshots_dir/slot/slot entry .ok() .filter(|entry| entry.path().is_dir()) @@ -1248,42 +1256,6 @@ pub fn get_bank_snapshots(bank_snapshots_dir: impl AsRef) -> Vec) -> Vec { - let mut bank_snapshots = get_bank_snapshots(bank_snapshots_dir); - bank_snapshots.retain(|bank_snapshot| bank_snapshot.snapshot_kind == BankSnapshotKind::Pre); - bank_snapshots -} - -/// Get the bank snapshots in a directory -/// -/// This function retains only the bank snapshots of kind BankSnapshotKind::Post -pub fn get_bank_snapshots_post(bank_snapshots_dir: impl AsRef) -> Vec { - let mut bank_snapshots = get_bank_snapshots(bank_snapshots_dir); - bank_snapshots.retain(|bank_snapshot| bank_snapshot.snapshot_kind == BankSnapshotKind::Post); - bank_snapshots -} - -/// Get the bank snapshot with the highest slot in a directory -/// -/// This function gets the highest bank snapshot of kind BankSnapshotKind::Pre -pub fn get_highest_bank_snapshot_pre( - bank_snapshots_dir: impl AsRef, -) -> Option { - do_get_highest_bank_snapshot(get_bank_snapshots_pre(bank_snapshots_dir)) -} - -/// Get the bank snapshot with the highest slot in a directory -/// -/// This function gets the highest bank snapshot of kind BankSnapshotKind::Post -pub fn get_highest_bank_snapshot_post( - bank_snapshots_dir: impl AsRef, -) -> Option { - do_get_highest_bank_snapshot(get_bank_snapshots_post(bank_snapshots_dir)) -} - /// Get the bank snapshot with the highest slot in a directory /// /// This function gets the highest bank snapshot of any kind @@ -1448,7 +1420,8 @@ fn check_deserialize_file_consumed( if consumed_size != file_size { let error_message = format!( - "invalid snapshot data file: '{}' has {} bytes, however consumed {} bytes to deserialize", + "invalid snapshot data file: '{}' has {} bytes, however consumed {} bytes to \ + deserialize", file_path.as_ref().display(), file_size, consumed_size, @@ -1579,7 +1552,7 @@ pub fn verify_and_unarchive_snapshots( full_snapshot_archive_info: &FullSnapshotArchiveInfo, incremental_snapshot_archive_info: Option<&IncrementalSnapshotArchiveInfo>, account_paths: &[PathBuf], - storage_access: StorageAccess, + accounts_db_config: &AccountsDbConfig, ) -> Result<(UnarchivedSnapshots, UnarchivedSnapshotsGuard)> { check_are_snapshots_compatible( full_snapshot_archive_info, @@ -1602,7 +1575,7 @@ pub fn verify_and_unarchive_snapshots( account_paths, full_snapshot_archive_info.archive_format(), next_append_vec_id.clone(), - storage_access, + accounts_db_config, )?; let ( @@ -1628,7 +1601,7 @@ pub fn verify_and_unarchive_snapshots( account_paths, incremental_snapshot_archive_info.archive_format(), next_append_vec_id.clone(), - storage_access, + accounts_db_config, )?; ( Some(unpack_dir), @@ -1673,13 +1646,19 @@ fn streaming_unarchive_snapshot( ledger_dir: PathBuf, snapshot_archive_path: PathBuf, archive_format: ArchiveFormat, + memlock_budget_size: usize, ) -> JoinHandle> { Builder::new() .name("solTarUnpack".to_string()) .spawn(move || { - let decompressor = decompressed_tar_reader(archive_format, snapshot_archive_path)?; + let archive_size = fs::metadata(&snapshot_archive_path)?.len() as usize; + let read_write_budget_size = (memlock_budget_size / 2).min(archive_size); + let read_buf_size = MAX_SNAPSHOT_READER_BUF_SIZE.min(read_write_budget_size as u64); + let decompressor = + decompressed_tar_reader(archive_format, snapshot_archive_path, read_buf_size)?; hardened_unpack::streaming_unpack_snapshot( Archive::new(decompressor), + read_write_budget_size, ledger_dir.as_path(), &account_paths, &file_sender, @@ -1692,10 +1671,10 @@ fn streaming_unarchive_snapshot( fn decompressed_tar_reader( archive_format: ArchiveFormat, archive_path: impl AsRef, -) -> Result>> { - const INPUT_READER_BUF_SIZE: usize = 128 * 1024 * 1024; + buf_size: u64, +) -> Result> { let buf_reader = - solana_accounts_db::large_file_buf_reader(archive_path.as_ref(), INPUT_READER_BUF_SIZE) + solana_accounts_db::large_file_buf_reader(archive_path.as_ref(), buf_size as usize) .map_err(|err| { io::Error::other(format!( "failed to open snapshot archive '{}': {err}", @@ -1817,7 +1796,7 @@ fn snapshot_fields_from_files(file_receiver: &Receiver) -> Result) -> Result<()> { - let snapshots_dir = unpack_dir.as_ref().join("snapshots"); + let snapshots_dir = unpack_dir.as_ref().join(BANK_SNAPSHOTS_DIR); if !snapshots_dir.is_dir() { return Err(SnapshotError::NoSnapshotSlotDir(snapshots_dir)); } @@ -1839,7 +1818,7 @@ fn create_snapshot_meta_files_for_unarchived_snapshot(unpack_dir: impl AsRef, - storage_access: StorageAccess, + accounts_db_config: &AccountsDbConfig, ) -> Result { let unpack_dir = tempfile::Builder::new() .prefix(unpacked_snapshots_dir_prefix) .tempdir_in(bank_snapshots_dir)?; - let unpacked_snapshots_dir = unpack_dir.path().join("snapshots"); + let unpacked_snapshots_dir = unpack_dir.path().join(BANK_SNAPSHOTS_DIR); let (file_sender, file_receiver) = crossbeam_channel::unbounded(); let unarchive_handle = streaming_unarchive_snapshot( @@ -1869,6 +1848,7 @@ fn unarchive_snapshot( unpack_dir.path().to_path_buf(), snapshot_archive_path.as_ref().to_path_buf(), archive_format, + accounts_db_config.memlock_budget_size, ); let num_rebuilder_threads = num_cpus::get_physical().saturating_sub(1).max(1); @@ -1888,11 +1868,11 @@ fn unarchive_snapshot( num_rebuilder_threads, next_append_vec_id, SnapshotFrom::Archive, - storage_access, + accounts_db_config.storage_access, )?, measure_name ); - info!("{}", measure_untar); + info!("{measure_untar}"); create_snapshot_meta_files_for_unarchived_snapshot(&unpack_dir)?; Ok(UnarchivedSnapshot { @@ -2035,7 +2015,7 @@ pub fn rebuild_storages_from_snapshot_dir( /// Reads the `snapshot_version` from a file. Before opening the file, its size /// is compared to `MAX_SNAPSHOT_VERSION_FILE_SIZE`. If the size exceeds this /// threshold, it is not opened and an error is returned. -fn snapshot_version_from_file(path: impl AsRef) -> Result { +fn snapshot_version_from_file(path: impl AsRef) -> io::Result { // Check file size. let file_metadata = fs::metadata(&path).map_err(|err| { IoError::other(format!( @@ -2051,7 +2031,7 @@ fn snapshot_version_from_file(path: impl AsRef) -> Result { file_size, MAX_SNAPSHOT_VERSION_FILE_SIZE, ); - return Err(IoError::other(error_message).into()); + return Err(IoError::other(error_message)); } // Read snapshot_version from file. @@ -2403,22 +2383,6 @@ pub fn purge_old_snapshot_archives( } } -#[cfg(feature = "dev-context-only-utils")] -fn unpack_snapshot_local( - snapshot_path: impl AsRef, - archive_format: ArchiveFormat, - ledger_dir: &Path, - account_paths: &[PathBuf], - num_threads: usize, -) -> Result { - assert!(num_threads > 0); - let archive = Archive::new(decompressed_tar_reader(archive_format, snapshot_path)?); - let unpacked_append_vec_map = - hardened_unpack::unpack_snapshot(archive, ledger_dir, account_paths)?; - - Ok(unpacked_append_vec_map) -} - pub fn verify_unpacked_snapshots_dir_and_version( unpacked_snapshots_dir_and_version: &UnpackedSnapshotsDirAndVersion, ) -> Result<(SnapshotVersion, BankSnapshotInfo)> { @@ -2429,7 +2393,7 @@ pub fn verify_unpacked_snapshots_dir_and_version( let snapshot_version = unpacked_snapshots_dir_and_version.snapshot_version; let mut bank_snapshots = - get_bank_snapshots_post(&unpacked_snapshots_dir_and_version.unpacked_snapshots_dir); + get_bank_snapshots(&unpacked_snapshots_dir_and_version.unpacked_snapshots_dir); if bank_snapshots.len() > 1 { return Err(IoError::other(format!( "invalid snapshot format: only one snapshot allowed, but found {}", @@ -2470,100 +2434,6 @@ pub enum VerifyBank { NonDeterministic, } -#[cfg(feature = "dev-context-only-utils")] -pub fn verify_snapshot_archive( - snapshot_archive: impl AsRef, - snapshots_to_verify: impl AsRef, - archive_format: ArchiveFormat, - verify_bank: VerifyBank, - slot: Slot, -) { - let temp_dir = tempfile::TempDir::new().unwrap(); - let unpack_dir = temp_dir.path(); - let unpack_account_dir = create_accounts_run_and_snapshot_dirs(unpack_dir).unwrap().0; - unpack_snapshot_local( - snapshot_archive, - archive_format, - unpack_dir, - &[unpack_account_dir.clone()], - 1, - ) - .unwrap(); - - // Check snapshots are the same - let unpacked_snapshots = unpack_dir.join("snapshots"); - - // Since the unpack code collects all the appendvecs into one directory unpack_account_dir, we need to - // collect all the appendvecs in account_paths//snapshot/ into one directory for later comparison. - let storages_to_verify = unpack_dir.join("storages_to_verify"); - // Create the directory if it doesn't exist - fs::create_dir_all(&storages_to_verify).unwrap(); - - let slot = slot.to_string(); - let snapshot_slot_dir = snapshots_to_verify.as_ref().join(&slot); - - if let VerifyBank::NonDeterministic = verify_bank { - // file contents may be different, but deserialized structs should be equal - let p1 = snapshots_to_verify.as_ref().join(&slot).join(&slot); - let p2 = unpacked_snapshots.join(&slot).join(&slot); - assert!(crate::serde_snapshot::compare_two_serialized_banks(&p1, &p2).unwrap()); - fs::remove_file(p1).unwrap(); - fs::remove_file(p2).unwrap(); - } - - // The new the status_cache file is inside the slot directory together with the snapshot file. - // When unpacking an archive, the status_cache file from the archive is one-level up outside of - // the slot directory. - // The unpacked status_cache file need to be put back into the slot directory for the directory - // comparison to pass. - let existing_unpacked_status_cache_file = - unpacked_snapshots.join(SNAPSHOT_STATUS_CACHE_FILENAME); - let new_unpacked_status_cache_file = unpacked_snapshots - .join(&slot) - .join(SNAPSHOT_STATUS_CACHE_FILENAME); - fs::rename( - existing_unpacked_status_cache_file, - new_unpacked_status_cache_file, - ) - .unwrap(); - - let accounts_hardlinks_dir = snapshot_slot_dir.join(SNAPSHOT_ACCOUNTS_HARDLINKS); - if accounts_hardlinks_dir.is_dir() { - // This directory contain symlinks to all /snapshot/ directories. - for entry in fs::read_dir(&accounts_hardlinks_dir).unwrap() { - let link_dst_path = fs::read_link(entry.unwrap().path()).unwrap(); - // Copy all the files in dst_path into the storages_to_verify directory. - for entry in fs::read_dir(&link_dst_path).unwrap() { - let src_path = entry.unwrap().path(); - let dst_path = storages_to_verify.join(src_path.file_name().unwrap()); - fs::copy(src_path, dst_path).unwrap(); - } - } - fs::remove_dir_all(accounts_hardlinks_dir).unwrap(); - } - - let version_path = snapshot_slot_dir.join(SNAPSHOT_VERSION_FILENAME); - if version_path.is_file() { - fs::remove_file(version_path).unwrap(); - } - - let state_complete_path = snapshot_slot_dir.join(SNAPSHOT_STATE_COMPLETE_FILENAME); - if state_complete_path.is_file() { - fs::remove_file(state_complete_path).unwrap(); - } - - assert!(!dir_diff::is_different(&snapshots_to_verify, unpacked_snapshots).unwrap()); - - // In the unarchiving case, there is an extra empty "accounts" directory. The account - // files in the archive accounts/ have been expanded to [account_paths]. - // Remove the empty "accounts" directory for the directory comparison below. - // In some test cases the directory to compare do not come from unarchiving. - // Ignore the error when this directory does not exist. - _ = fs::remove_dir(unpack_account_dir.join("accounts")); - // Check the account entries are the same - assert!(!dir_diff::is_different(&storages_to_verify, unpack_account_dir).unwrap()); -} - /// Purges all bank snapshots pub fn purge_all_bank_snapshots(bank_snapshots_dir: impl AsRef) { let bank_snapshots = get_bank_snapshots(&bank_snapshots_dir); @@ -2574,13 +2444,8 @@ pub fn purge_all_bank_snapshots(bank_snapshots_dir: impl AsRef) { pub fn purge_old_bank_snapshots( bank_snapshots_dir: impl AsRef, num_bank_snapshots_to_retain: usize, - filter_by_kind: Option, ) { - let mut bank_snapshots = match filter_by_kind { - Some(BankSnapshotKind::Pre) => get_bank_snapshots_pre(&bank_snapshots_dir), - Some(BankSnapshotKind::Post) => get_bank_snapshots_post(&bank_snapshots_dir), - None => get_bank_snapshots(&bank_snapshots_dir), - }; + let mut bank_snapshots = get_bank_snapshots(&bank_snapshots_dir); bank_snapshots.sort_unstable(); purge_bank_snapshots( @@ -2592,18 +2457,14 @@ pub fn purge_old_bank_snapshots( } /// At startup, purge old (i.e. unusable) bank snapshots -/// -/// Only a single bank snapshot could be needed at startup (when using fast boot), so -/// retain the highest bank snapshot "post", and purge the rest. pub fn purge_old_bank_snapshots_at_startup(bank_snapshots_dir: impl AsRef) { - purge_old_bank_snapshots(&bank_snapshots_dir, 0, Some(BankSnapshotKind::Pre)); - purge_old_bank_snapshots(&bank_snapshots_dir, 1, Some(BankSnapshotKind::Post)); + purge_old_bank_snapshots(&bank_snapshots_dir, 1); - let highest_bank_snapshot_post = get_highest_bank_snapshot_post(&bank_snapshots_dir); - if let Some(highest_bank_snapshot_post) = highest_bank_snapshot_post { + let highest_bank_snapshot = get_highest_bank_snapshot(&bank_snapshots_dir); + if let Some(highest_bank_snapshot) = highest_bank_snapshot { debug!( "Retained bank snapshot for slot {}, and purged the rest.", - highest_bank_snapshot_post.slot + highest_bank_snapshot.slot ); } } @@ -2845,7 +2706,7 @@ mod tests { file.write_all(&file_content).unwrap(); assert_matches!( snapshot_version_from_file(file.path()), - Err(SnapshotError::Io(ref message)) if message.to_string().starts_with("snapshot version file too large") + Err(ref message) if message.to_string().starts_with("snapshot version file too large") ); } @@ -3049,9 +2910,6 @@ mod tests { let version_path = snapshot_dir.join(SNAPSHOT_VERSION_FILENAME); fs::write(version_path, SnapshotVersion::default().as_str().as_bytes()).unwrap(); - - // Mark this directory complete so it can be used. Check this flag first before selecting for deserialization. - write_snapshot_state_complete_file(snapshot_dir).unwrap(); } } @@ -3067,13 +2925,13 @@ mod tests { } #[test] - fn test_get_highest_bank_snapshot_post() { + fn test_get_highest_bank_snapshot() { let temp_snapshots_dir = tempfile::TempDir::new().unwrap(); let min_slot = 99; let max_slot = 123; common_create_bank_snapshot_files(temp_snapshots_dir.path(), min_slot, max_slot); - let highest_bank_snapshot = get_highest_bank_snapshot_post(temp_snapshots_dir.path()); + let highest_bank_snapshot = get_highest_bank_snapshot(temp_snapshots_dir.path()); assert!(highest_bank_snapshot.is_some()); assert_eq!(highest_bank_snapshot.unwrap().slot, max_slot - 1); } diff --git a/runtime/src/stake_history.rs b/runtime/src/stake_history.rs index 5ca9bad3d9bfeb..5a035b68e6ad97 100644 --- a/runtime/src/stake_history.rs +++ b/runtime/src/stake_history.rs @@ -1,9 +1,9 @@ //! This module implements clone-on-write semantics for the SDK's `StakeHistory` to reduce //! unnecessary cloning of the underlying vector. -pub use solana_sysvar::stake_history::StakeHistoryGetEntry; +pub use solana_stake_interface::stake_history::StakeHistoryGetEntry; use { solana_clock::Epoch, - solana_sysvar::stake_history::{self, StakeHistoryEntry}, + solana_stake_interface::stake_history::{self, StakeHistoryEntry}, std::{ ops::{Deref, DerefMut}, sync::Arc, @@ -39,7 +39,7 @@ impl StakeHistoryGetEntry for StakeHistory { #[cfg(test)] mod tests { - use {super::*, solana_sysvar::stake_history::StakeHistoryEntry}; + use {super::*, solana_stake_interface::stake_history::StakeHistoryEntry}; fn rand_stake_history_entry() -> StakeHistoryEntry { StakeHistoryEntry { diff --git a/runtime/src/stake_weighted_timestamp.rs b/runtime/src/stake_weighted_timestamp.rs index 796eb306545fa5..7353300ab314b9 100644 --- a/runtime/src/stake_weighted_timestamp.rs +++ b/runtime/src/stake_weighted_timestamp.rs @@ -102,7 +102,7 @@ where #[cfg(test)] pub mod tests { - use {super::*, solana_account::Account, solana_native_token::sol_to_lamports}; + use {super::*, solana_account::Account, solana_native_token::LAMPORTS_PER_SOL}; #[test] fn test_calculate_stake_weighted_timestamp_uses_median() { @@ -120,30 +120,30 @@ pub mod tests { let stakes: HashMap = [ ( pubkey0, - (sol_to_lamports(1.0), Account::new(1, 0, &Pubkey::default())), + (LAMPORTS_PER_SOL, Account::new(1, 0, &Pubkey::default())), ), ( pubkey1, - (sol_to_lamports(1.0), Account::new(1, 0, &Pubkey::default())), + (LAMPORTS_PER_SOL, Account::new(1, 0, &Pubkey::default())), ), ( pubkey2, ( - sol_to_lamports(1_000_000.0), + 1_000_000 * LAMPORTS_PER_SOL, Account::new(1, 0, &Pubkey::default()), ), ), ( pubkey3, ( - sol_to_lamports(1_000_000.0), + 1_000_000 * LAMPORTS_PER_SOL, Account::new(1, 0, &Pubkey::default()), ), ), ( pubkey4, ( - sol_to_lamports(1_000_000.0), + 1_000_000 * LAMPORTS_PER_SOL, Account::new(1, 0, &Pubkey::default()), ), ), @@ -228,21 +228,21 @@ pub mod tests { ( pubkey0, ( - sol_to_lamports(1_000_000.0), // 1/3 stake + 1_000_000 * LAMPORTS_PER_SOL, // 1/3 stake Account::new(1, 0, &Pubkey::default()), ), ), ( pubkey1, ( - sol_to_lamports(1_000_000.0), + 1_000_000 * LAMPORTS_PER_SOL, Account::new(1, 0, &Pubkey::default()), ), ), ( pubkey2, ( - sol_to_lamports(1_000_000.0), + 1_000_000 * LAMPORTS_PER_SOL, Account::new(1, 0, &Pubkey::default()), ), ), @@ -276,14 +276,14 @@ pub mod tests { ( pubkey0, ( - sol_to_lamports(1_000_001.0), // 1/3 stake + 1_000_001 * LAMPORTS_PER_SOL, // 1/3 stake Account::new(1, 0, &Pubkey::default()), ), ), ( pubkey1, ( - sol_to_lamports(1_000_000.0), + 1_000_000 * LAMPORTS_PER_SOL, Account::new(1, 0, &Pubkey::default()), ), ), @@ -332,21 +332,21 @@ pub mod tests { ( pubkey0, ( - sol_to_lamports(1_000_000.0), + 1_000_000 * LAMPORTS_PER_SOL, Account::new(1, 0, &Pubkey::default()), ), ), ( pubkey1, ( - sol_to_lamports(1_000_000.0), + 1_000_000 * LAMPORTS_PER_SOL, Account::new(1, 0, &Pubkey::default()), ), ), ( pubkey2, ( - sol_to_lamports(1_000_000.0), + 1_000_000 * LAMPORTS_PER_SOL, Account::new(1, 0, &Pubkey::default()), ), ), @@ -473,21 +473,21 @@ pub mod tests { ( pubkey0, ( - sol_to_lamports(1_000_000.0), + 1_000_000 * LAMPORTS_PER_SOL, Account::new(1, 0, &Pubkey::default()), ), ), ( pubkey1, ( - sol_to_lamports(1_000_000.0), + 1_000_000 * LAMPORTS_PER_SOL, Account::new(1, 0, &Pubkey::default()), ), ), ( pubkey2, ( - sol_to_lamports(1_000_000.0), + 1_000_000 * LAMPORTS_PER_SOL, Account::new(1, 0, &Pubkey::default()), ), ), @@ -609,21 +609,21 @@ pub mod tests { ( pubkey0, ( - sol_to_lamports(1_000_000.0), + 1_000_000 * LAMPORTS_PER_SOL, Account::new(1, 0, &Pubkey::default()), ), ), ( pubkey1, ( - sol_to_lamports(1_000_000.0), + 1_000_000 * LAMPORTS_PER_SOL, Account::new(1, 0, &Pubkey::default()), ), ), ( pubkey2, ( - sol_to_lamports(1_000_000.0), + 1_000_000 * LAMPORTS_PER_SOL, Account::new(1, 0, &Pubkey::default()), ), ), @@ -747,21 +747,21 @@ pub mod tests { ( pubkey0, ( - sol_to_lamports(1_000_000.0), + 1_000_000 * LAMPORTS_PER_SOL, Account::new(1, 0, &Pubkey::default()), ), ), ( pubkey1, ( - sol_to_lamports(1_000_000.0), + 1_000_000 * LAMPORTS_PER_SOL, Account::new(1, 0, &Pubkey::default()), ), ), ( pubkey2, ( - sol_to_lamports(1_000_000.0), + 1_000_000 * LAMPORTS_PER_SOL, Account::new(1, 0, &Pubkey::default()), ), ), diff --git a/runtime/src/stakes.rs b/runtime/src/stakes.rs index 8caf572ae7007c..3253dbe6aecd85 100644 --- a/runtime/src/stakes.rs +++ b/runtime/src/stakes.rs @@ -519,7 +519,7 @@ pub(crate) mod tests { solana_rent::Rent, solana_stake_interface as stake, solana_stake_program::stake_state, - solana_vote_interface::state::{VoteState, VoteStateVersions}, + solana_vote_interface::state::{VoteStateV3, VoteStateVersions}, solana_vote_program::vote_state, }; @@ -696,8 +696,8 @@ pub(crate) mod tests { } // Vote account uninitialized - let default_vote_state = VoteState::default(); - let versioned = VoteStateVersions::new_current(default_vote_state); + let default_vote_state = VoteStateV3::default(); + let versioned = VoteStateVersions::new_v3(default_vote_state); vote_state::to(&versioned, &mut vote_account).unwrap(); stakes_cache.check_and_store(&vote_pubkey, &vote_account, None); diff --git a/runtime/src/status_cache.rs b/runtime/src/status_cache.rs index 8c384bd519ba17..8a7cdb133cae4b 100644 --- a/runtime/src/status_cache.rs +++ b/runtime/src/status_cache.rs @@ -1,4 +1,5 @@ use { + ahash::{HashMap, HashMapExt as _}, log::*, rand::{thread_rng, Rng}, serde::Serialize, @@ -6,7 +7,7 @@ use { solana_clock::{Slot, MAX_RECENT_BLOCKHASHES}, solana_hash::Hash, std::{ - collections::{hash_map::Entry, HashMap, HashSet}, + collections::{hash_map::Entry, HashSet}, sync::{Arc, Mutex}, }, }; @@ -16,7 +17,7 @@ const CACHED_KEY_SIZE: usize = 20; // Store forks in a single chunk of memory to avoid another lookup. pub type ForkStatus = Vec<(Slot, T)>; -type KeySlice = [u8; CACHED_KEY_SIZE]; +pub(crate) type KeySlice = [u8; CACHED_KEY_SIZE]; type KeyMap = HashMap>; // Map of Hash and status pub type Status = Arc)>>>; @@ -100,7 +101,8 @@ impl StatusCache { } } else { panic!( - "Map for key must exist if key exists in self.slot_deltas, slot: {slot}" + "Map for key must exist if key exists in self.slot_deltas, slot: \ + {slot}" ) } } @@ -152,7 +154,7 @@ impl StatusCache { let keys: Vec<_> = self.cache.keys().copied().collect(); for blockhash in keys.iter() { - trace!("get_status_any_blockhash: trying {}", blockhash); + trace!("get_status_any_blockhash: trying {blockhash}"); let status = self.get_status(&key, blockhash, ancestors); if status.is_some() { return status; diff --git a/runtime/tests/stake.rs b/runtime/tests/stake.rs deleted file mode 100755 index 44179f86489730..00000000000000 --- a/runtime/tests/stake.rs +++ /dev/null @@ -1,669 +0,0 @@ -#![allow(clippy::arithmetic_side_effects)] - -use { - solana_account::{from_account, state_traits::StateMut}, - solana_client_traits::SyncClient, - solana_clock::Slot, - solana_epoch_schedule::{EpochSchedule, MINIMUM_SLOTS_PER_EPOCH}, - solana_keypair::Keypair, - solana_message::Message, - solana_pubkey::Pubkey, - solana_rent::Rent, - solana_runtime::{ - bank::Bank, - bank_client::BankClient, - bank_forks::BankForks, - genesis_utils::{create_genesis_config_with_leader, GenesisConfigInfo}, - }, - solana_signer::Signer, - solana_stake_interface::{ - self as stake, instruction as stake_instruction, - state::{Authorized, Lockup, StakeStateV2}, - }, - solana_stake_program::stake_state, - solana_sysvar::{self as sysvar, stake_history::StakeHistory}, - solana_vote_program::{ - vote_instruction, - vote_state::{TowerSync, VoteInit, VoteStateV3, VoteStateVersions, MAX_LOCKOUT_HISTORY}, - }, - std::sync::{Arc, RwLock}, -}; - -fn new_bank_from_parent_with_bank_forks( - bank_forks: &RwLock, - parent: Arc, - collector_id: &Pubkey, - slot: Slot, -) -> Arc { - let bank = Bank::new_from_parent(parent, collector_id, slot); - bank_forks - .write() - .unwrap() - .insert(bank) - .clone_without_scheduler() -} - -/// get bank at next epoch + `n` slots -fn next_epoch_and_n_slots( - bank: Arc, - bank_forks: &RwLock, - mut n: usize, -) -> Arc { - bank.squash(); - let slot = bank.get_slots_in_epoch(bank.epoch()) + bank.slot(); - let mut bank = new_bank_from_parent_with_bank_forks(bank_forks, bank, &Pubkey::default(), slot); - - while n > 0 { - bank.squash(); - let slot = bank.slot() + 1; - bank = new_bank_from_parent_with_bank_forks(bank_forks, bank, &Pubkey::default(), slot); - n -= 1; - } - - bank -} - -fn fill_epoch_with_votes( - mut bank: Arc, - bank_forks: &RwLock, - vote_keypair: &Keypair, - mint_keypair: &Keypair, - start_slot: Slot, -) -> Arc { - let mint_pubkey = mint_keypair.pubkey(); - let vote_pubkey = vote_keypair.pubkey(); - let old_epoch = bank.epoch(); - while bank.epoch() != old_epoch + 1 { - bank.squash(); - let slot = bank.slot() + 1; - bank = new_bank_from_parent_with_bank_forks(bank_forks, bank, &Pubkey::default(), slot); - - let bank_client = BankClient::new_shared(bank.clone()); - let parent = bank.parent().unwrap(); - let lowest_slot = u64::max( - (parent.slot() + 1).saturating_sub(MAX_LOCKOUT_HISTORY as u64), - start_slot, - ); - let slots: Vec<_> = (lowest_slot..(parent.slot() + 1)).collect(); - let root = (lowest_slot > start_slot).then(|| lowest_slot - 1); - let tower_sync = TowerSync::new_from_slots(slots, parent.hash(), root); - let message = Message::new( - &[vote_instruction::tower_sync( - &vote_pubkey, - &vote_pubkey, - tower_sync, - )], - Some(&mint_pubkey), - ); - assert!(bank_client - .send_and_confirm_message(&[mint_keypair, vote_keypair], message) - .is_ok()); - } - bank -} - -fn warmed_up(bank: &Bank, stake_pubkey: &Pubkey) -> bool { - let stake = stake_state::stake_from(&bank.get_account(stake_pubkey).unwrap()).unwrap(); - - stake.delegation.stake - == stake.stake( - bank.epoch(), - &from_account::( - &bank.get_account(&sysvar::stake_history::id()).unwrap(), - ) - .unwrap(), - bank.new_warmup_cooldown_rate_epoch(), - ) -} - -fn get_staked(bank: &Bank, stake_pubkey: &Pubkey) -> u64 { - stake_state::stake_from(&bank.get_account(stake_pubkey).unwrap()) - .unwrap() - .stake( - bank.epoch(), - &from_account::( - &bank.get_account(&sysvar::stake_history::id()).unwrap(), - ) - .unwrap(), - bank.new_warmup_cooldown_rate_epoch(), - ) -} - -#[test] -fn test_stake_create_and_split_single_signature() { - solana_logger::setup(); - - let GenesisConfigInfo { - genesis_config, - mint_keypair: staker_keypair, - .. - } = create_genesis_config_with_leader(100_000_000_000, &solana_pubkey::new_rand(), 1_000_000); - - let staker_pubkey = staker_keypair.pubkey(); - - let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); - let bank_client = BankClient::new_shared(bank.clone()); - - let stake_address = - Pubkey::create_with_seed(&staker_pubkey, "stake", &stake::program::id()).unwrap(); - - let authorized = Authorized::auto(&staker_pubkey); - - let lamports = { - let rent = &bank.rent_collector().rent; - let rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); - let minimum_delegation = solana_stake_program::get_minimum_delegation( - bank.feature_set - .is_active(&agave_feature_set::stake_raise_minimum_delegation_to_1_sol::id()), - ); - 2 * (rent_exempt_reserve + minimum_delegation) - }; - - // Create stake account with seed - let message = Message::new( - &stake_instruction::create_account_with_seed( - &staker_pubkey, // from - &stake_address, // to - &staker_pubkey, // base - "stake", // seed - &authorized, - &Lockup::default(), - lamports, - ), - Some(&staker_pubkey), - ); - - // only one signature required - bank_client - .send_and_confirm_message(&[&staker_keypair], message) - .expect("failed to create and delegate stake account"); - - // split the stake - let split_stake_address = - Pubkey::create_with_seed(&staker_pubkey, "split_stake", &stake::program::id()).unwrap(); - // Test split - let message = Message::new( - &stake_instruction::split_with_seed( - &stake_address, // original - &staker_pubkey, // authorized - lamports / 2, - &split_stake_address, // new address - &staker_pubkey, // base - "split_stake", // seed - ), - Some(&staker_keypair.pubkey()), - ); - - assert!(bank_client - .send_and_confirm_message(&[&staker_keypair], message) - .is_ok()); - - // w00t! -} - -#[test] -fn test_stake_create_and_split_to_existing_system_account() { - // Ensure stake-split allows the user to promote an existing system account into - // a stake account. - - solana_logger::setup(); - - let GenesisConfigInfo { - genesis_config, - mint_keypair: staker_keypair, - .. - } = create_genesis_config_with_leader(100_000_000_000, &solana_pubkey::new_rand(), 1_000_000); - - let staker_pubkey = staker_keypair.pubkey(); - - let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); - let bank_client = BankClient::new_shared(bank.clone()); - - let stake_address = - Pubkey::create_with_seed(&staker_pubkey, "stake", &stake::program::id()).unwrap(); - - let authorized = Authorized::auto(&staker_pubkey); - - let lamports = { - let rent = &bank.rent_collector().rent; - let rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); - let minimum_delegation = solana_stake_program::get_minimum_delegation( - bank.feature_set - .is_active(&agave_feature_set::stake_raise_minimum_delegation_to_1_sol::id()), - ); - 2 * (rent_exempt_reserve + minimum_delegation) - }; - - // Create stake account with seed - let message = Message::new( - &stake_instruction::create_account_with_seed( - &staker_pubkey, // from - &stake_address, // to - &staker_pubkey, // base - "stake", // seed - &authorized, - &Lockup::default(), - lamports, - ), - Some(&staker_pubkey), - ); - - bank_client - .send_and_confirm_message(&[&staker_keypair], message) - .expect("failed to create and delegate stake account"); - - let split_stake_address = - Pubkey::create_with_seed(&staker_pubkey, "split_stake", &stake::program::id()).unwrap(); - - // First, put a system account where we want the new stake account - let existing_lamports = 42; - bank_client - .transfer_and_confirm(existing_lamports, &staker_keypair, &split_stake_address) - .unwrap(); - assert_eq!( - bank_client.get_balance(&split_stake_address).unwrap(), - existing_lamports - ); - - // Verify the split succeeds with lamports in the destination account - let message = Message::new( - &stake_instruction::split_with_seed( - &stake_address, // original - &staker_pubkey, // authorized - lamports / 2, - &split_stake_address, // new address - &staker_pubkey, // base - "split_stake", // seed - ), - Some(&staker_keypair.pubkey()), - ); - bank_client - .send_and_confirm_message(&[&staker_keypair], message) - .expect("failed to split into account with lamports"); - assert_eq!( - bank_client.get_balance(&split_stake_address).unwrap(), - existing_lamports + lamports / 2 - ); -} - -#[test] -fn test_stake_account_lifetime() { - let stake_keypair = Keypair::new(); - let stake_pubkey = stake_keypair.pubkey(); - let vote_keypair = Keypair::new(); - let vote_pubkey = vote_keypair.pubkey(); - let identity_keypair = Keypair::new(); - let identity_pubkey = identity_keypair.pubkey(); - - let GenesisConfigInfo { - mut genesis_config, - mint_keypair, - .. - } = create_genesis_config_with_leader( - 100_000_000_000, - &solana_pubkey::new_rand(), - 2_000_000_000, - ); - genesis_config.epoch_schedule = EpochSchedule::new(MINIMUM_SLOTS_PER_EPOCH); - genesis_config.rent = Rent::default(); - let (mut bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); - let mint_pubkey = mint_keypair.pubkey(); - let bank_client = BankClient::new_shared(bank.clone()); - - let (vote_balance, stake_rent_exempt_reserve, stake_minimum_delegation) = { - let rent = &bank.rent_collector().rent; - ( - rent.minimum_balance(VoteStateV3::size_of()), - rent.minimum_balance(StakeStateV2::size_of()), - solana_stake_program::get_minimum_delegation( - bank.feature_set - .is_active(&agave_feature_set::stake_raise_minimum_delegation_to_1_sol::id()), - ), - ) - }; - - // Create Vote Account - let message = Message::new( - &vote_instruction::create_account_with_config( - &mint_pubkey, - &vote_pubkey, - &VoteInit { - node_pubkey: identity_pubkey, - authorized_voter: vote_pubkey, - authorized_withdrawer: vote_pubkey, - commission: 50, - }, - vote_balance, - vote_instruction::CreateVoteAccountConfig { - space: VoteStateVersions::vote_state_size_of(true) as u64, - ..vote_instruction::CreateVoteAccountConfig::default() - }, - ), - Some(&mint_pubkey), - ); - bank_client - .send_and_confirm_message(&[&mint_keypair, &vote_keypair, &identity_keypair], message) - .expect("failed to create vote account"); - - let authorized = Authorized::auto(&stake_pubkey); - let bonus_delegation = 1_000_000_000; - let stake_starting_delegation = - 2 * stake_minimum_delegation + bonus_delegation + stake_rent_exempt_reserve; - let stake_starting_balance = stake_starting_delegation + stake_rent_exempt_reserve; - - // Create stake account and delegate to vote account - let message = Message::new( - &stake_instruction::create_account_and_delegate_stake( - &mint_pubkey, - &stake_pubkey, - &vote_pubkey, - &authorized, - &Lockup::default(), - stake_starting_balance, - ), - Some(&mint_pubkey), - ); - bank_client - .send_and_confirm_message(&[&mint_keypair, &stake_keypair], message) - .expect("failed to create and delegate stake account"); - - // Test that correct lamports are staked - let account = bank.get_account(&stake_pubkey).expect("account not found"); - let stake_state = account.state().expect("couldn't unpack account data"); - if let StakeStateV2::Stake(_meta, stake, _stake_flags) = stake_state { - assert_eq!(stake.delegation.stake, stake_starting_delegation,); - } else { - panic!("wrong account type found") - } - - // Test that we cannot withdraw anything until deactivation - let message = Message::new( - &[stake_instruction::withdraw( - &stake_pubkey, - &stake_pubkey, - &solana_pubkey::new_rand(), - 1, - None, - )], - Some(&mint_pubkey), - ); - assert!(bank_client - .send_and_confirm_message(&[&mint_keypair, &stake_keypair], message) - .is_err()); - - // Test that lamports are still staked - let account = bank.get_account(&stake_pubkey).expect("account not found"); - let stake_state = account.state().expect("couldn't unpack account data"); - if let StakeStateV2::Stake(_meta, stake, _stake_flags) = stake_state { - assert_eq!(stake.delegation.stake, stake_starting_delegation,); - } else { - panic!("wrong account type found") - } - - loop { - if warmed_up(&bank, &stake_pubkey) { - break; - } - // Cycle thru banks until we're fully warmed up - bank = next_epoch_and_n_slots(bank, bank_forks.as_ref(), 0); - } - - // Reward redemption - // Submit enough votes to generate rewards - let start_slot = bank.slot(); - bank = fill_epoch_with_votes( - bank, - bank_forks.as_ref(), - &vote_keypair, - &mint_keypair, - start_slot, - ); - - // Test that votes and credits are there - let account = bank.get_account(&vote_pubkey).expect("account not found"); - let vote_state: VoteStateV3 = StateMut::::state(&account) - .expect("couldn't unpack account data") - .convert_to_current(); - - // 1 less vote, as the first vote should have cleared the lockout - assert_eq!(vote_state.votes.len(), 31); - // one vote per slot, might be more slots than 32 in the epoch - assert!(vote_state.credits() >= 1); - - bank = fill_epoch_with_votes( - bank, - bank_forks.as_ref(), - &vote_keypair, - &mint_keypair, - start_slot, - ); - - let pre_staked = get_staked(&bank, &stake_pubkey); - let pre_balance = bank.get_balance(&stake_pubkey); - - // next epoch bank plus one additional slot should pay rewards - bank = next_epoch_and_n_slots(bank, bank_forks.as_ref(), 1); - - // Test that balance increased, and that the balance got staked - let staked = get_staked(&bank, &stake_pubkey); - let balance = bank.get_balance(&stake_pubkey); - assert!(staked > pre_staked); - assert!(balance > pre_balance); - - // split the stake - let split_stake_keypair = Keypair::new(); - let split_stake_pubkey = split_stake_keypair.pubkey(); - - bank.transfer( - stake_rent_exempt_reserve, - &mint_keypair, - &split_stake_pubkey, - ) - .unwrap(); - let bank_client = BankClient::new_shared(bank.clone()); - - // Test split - let split_starting_delegation = stake_minimum_delegation + bonus_delegation; - let message = Message::new( - &stake_instruction::split( - &stake_pubkey, - &stake_pubkey, - split_starting_delegation, - &split_stake_pubkey, - ), - Some(&mint_pubkey), - ); - assert!(bank_client - .send_and_confirm_message( - &[&mint_keypair, &stake_keypair, &split_stake_keypair], - message - ) - .is_ok()); - assert_eq!( - get_staked(&bank, &split_stake_pubkey), - split_starting_delegation, - ); - let stake_remaining_balance = balance - split_starting_delegation; - - // Deactivate the split - let message = Message::new( - &[stake_instruction::deactivate_stake( - &split_stake_pubkey, - &stake_pubkey, - )], - Some(&mint_pubkey), - ); - assert!(bank_client - .send_and_confirm_message(&[&mint_keypair, &stake_keypair], message) - .is_ok()); - assert_eq!( - get_staked(&bank, &split_stake_pubkey), - split_starting_delegation, - ); - - // Test that we cannot withdraw above what's staked - let message = Message::new( - &[stake_instruction::withdraw( - &split_stake_pubkey, - &stake_pubkey, - &solana_pubkey::new_rand(), - split_starting_delegation + 1, - None, - )], - Some(&mint_pubkey), - ); - assert!(bank_client - .send_and_confirm_message(&[&mint_keypair, &stake_keypair], message) - .is_err()); - - let mut bank = next_epoch_and_n_slots(bank, bank_forks.as_ref(), 1); - - let bank_client = BankClient::new_shared(bank.clone()); - - // assert we're still cooling down - let split_staked = get_staked(&bank, &split_stake_pubkey); - assert!(split_staked > 0); - - // withdrawal in cooldown - let split_balance = bank.get_balance(&split_stake_pubkey); - let message = Message::new( - &[stake_instruction::withdraw( - &split_stake_pubkey, - &stake_pubkey, - &solana_pubkey::new_rand(), - split_balance, - None, - )], - Some(&mint_pubkey), - ); - assert!(bank_client - .send_and_confirm_message(&[&mint_keypair, &stake_keypair], message) - .is_err()); - - // but we can withdraw unstaked - let split_unstaked = split_balance - split_staked - stake_rent_exempt_reserve; - assert!(split_unstaked > 0); - let message = Message::new( - &[stake_instruction::withdraw( - &split_stake_pubkey, - &stake_pubkey, - &solana_pubkey::new_rand(), - split_unstaked, - None, - )], - Some(&mint_pubkey), - ); - assert!(bank_client - .send_and_confirm_message(&[&mint_keypair, &stake_keypair], message) - .is_ok()); - - // finish cooldown - loop { - if get_staked(&bank, &split_stake_pubkey) == 0 { - break; - } - bank = next_epoch_and_n_slots(bank, bank_forks.as_ref(), 1); - } - let bank_client = BankClient::new_shared(bank.clone()); - - // Test that we can withdraw everything else out of the split - let split_remaining_balance = split_balance - split_unstaked; - let message = Message::new( - &[stake_instruction::withdraw( - &split_stake_pubkey, - &stake_pubkey, - &solana_pubkey::new_rand(), - split_remaining_balance, - None, - )], - Some(&mint_pubkey), - ); - assert!(bank_client - .send_and_confirm_message(&[&mint_keypair, &stake_keypair], message) - .is_ok()); - - // verify all the math sums to zero - assert_eq!(bank.get_balance(&split_stake_pubkey), 0); - assert_eq!(bank.get_balance(&stake_pubkey), stake_remaining_balance); -} - -#[test] -fn test_create_stake_account_from_seed() { - let vote_keypair = Keypair::new(); - let vote_pubkey = vote_keypair.pubkey(); - let identity_keypair = Keypair::new(); - let identity_pubkey = identity_keypair.pubkey(); - - let GenesisConfigInfo { - genesis_config, - mint_keypair, - .. - } = create_genesis_config_with_leader(100_000_000_000, &solana_pubkey::new_rand(), 1_000_000); - let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); - let mint_pubkey = mint_keypair.pubkey(); - let bank_client = BankClient::new_shared(bank.clone()); - - let seed = "test-string"; - let stake_pubkey = Pubkey::create_with_seed(&mint_pubkey, seed, &stake::program::id()).unwrap(); - - // Create Vote Account - let message = Message::new( - &vote_instruction::create_account_with_config( - &mint_pubkey, - &vote_pubkey, - &VoteInit { - node_pubkey: identity_pubkey, - authorized_voter: vote_pubkey, - authorized_withdrawer: vote_pubkey, - commission: 50, - }, - 10, - vote_instruction::CreateVoteAccountConfig { - space: VoteStateVersions::vote_state_size_of(true) as u64, - ..vote_instruction::CreateVoteAccountConfig::default() - }, - ), - Some(&mint_pubkey), - ); - bank_client - .send_and_confirm_message(&[&mint_keypair, &vote_keypair, &identity_keypair], message) - .expect("failed to create vote account"); - - let authorized = Authorized::auto(&mint_pubkey); - let (balance, delegation) = { - let rent = &bank.rent_collector().rent; - let rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); - let minimum_delegation = solana_stake_program::get_minimum_delegation( - bank.feature_set - .is_active(&agave_feature_set::stake_raise_minimum_delegation_to_1_sol::id()), - ); - (rent_exempt_reserve + minimum_delegation, minimum_delegation) - }; - - // Create stake account and delegate to vote account - let message = Message::new( - &stake_instruction::create_account_with_seed_and_delegate_stake( - &mint_pubkey, - &stake_pubkey, - &mint_pubkey, - seed, - &vote_pubkey, - &authorized, - &Lockup::default(), - balance, - ), - Some(&mint_pubkey), - ); - bank_client - .send_and_confirm_message(&[&mint_keypair], message) - .expect("failed to create and delegate stake account"); - - // Test that correct lamports are staked - let account = bank.get_account(&stake_pubkey).expect("account not found"); - let stake_state = account.state().expect("couldn't unpack account data"); - if let StakeStateV2::Stake(_meta, stake, _) = stake_state { - assert_eq!(stake.delegation.stake, delegation); - } else { - panic!("wrong account type found") - } -} diff --git a/rustfmt.toml b/rustfmt.toml index e26d07f0d84eb8..8651681b5c70de 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -1,2 +1,3 @@ imports_granularity = "One" +format_strings = true group_imports = "One" diff --git a/scheduler-bindings/Cargo.toml b/scheduler-bindings/Cargo.toml new file mode 100644 index 00000000000000..4f13cc119d2f1e --- /dev/null +++ b/scheduler-bindings/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "agave-scheduler-bindings" +description = "Agave scheduler-binding message types for external pack process integration" +documentation = "https://docs.rs/agave-scheduler-bindings" +readme = "../README.md" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] + +[lints] +workspace = true diff --git a/scheduler-bindings/src/lib.rs b/scheduler-bindings/src/lib.rs new file mode 100644 index 00000000000000..d3c56ce8b8f396 --- /dev/null +++ b/scheduler-bindings/src/lib.rs @@ -0,0 +1,289 @@ +#![no_std] + +//! Messages passed between agave and an external pack process. +//! Messages are passed via `shaq::Consumer/Producer`. +//! +//! Memory freeing is responsibility of the external pack process, +//! and is done via `rts-alloc` crate. It is also possible the external +//! pack process allocates memory to pass to agave, BUT it will still be +//! the responsibility of the external pack process to free that memory. +//! +//! Setting up the shared memory allocator and queues is done outside of +//! agave - it can be done by the external pack process or another +//! process. agave will just `join` shared memory regions, but not +//! create them. +//! Similarly, agave will not delete files used for shared memory regions. +//! See `shaq` and `rts-alloc` crates for details. +//! +//! The basic architecture is as follows: +//! ┌───────────────┐ ┌─────────────────┐ +//! │ tpu_to_pack │ │ progress_tracker│ +//! └───────┬───────┘ └───────┬─────────┘ +//! │ │ +//! │ │ +//! │ │ +//! ┌──▼───────────────────────▼───┐ +//! │ external scheduler │ +//! └─▲─────── ▲────────────────▲──┘ +//! │ │ │ +//! │ │ │ +//! ┌───▼───┐ ┌──▼─────┐ ... ┌───▼───┐ +//! │worker1│ │worker2 │ │workerN│ +//! └───────┘ └────────┘ └───────┘ +//! +//! - [`TpuToPackMessage`] are sent from `tpu_to_pack` queue to the +//! external scheduler process. This passes in tpu transactions to be scheduled, +//! and optionally vote transactions. +//! - [`ProgressMessage`] are sent from `progress_tracker` queue to the +//! external scheduler process. This passes information about leader status +//! and slot progress to the external scheduler process. +//! - [`PackToWorkerMessage`] are sent from the external scheduler process +//! to worker threads within agave. This passes a batch of transactions +//! to be processed by the worker threads. This processing can also involve +//! resolving the transactions' addresses, or similar operations beyond +//! execution. +//! - [`WorkerToPackMessage`] are sent from worker threads within agave +//! back to the external scheduler process. This passes back the results +//! of processing the transactions. +//! + +/// Reference to a transaction that can shared safely across processes. +#[repr(C)] +pub struct SharableTransactionRegion { + /// Offset within the shared memory allocator. + pub offset: usize, + /// Length of the transaction in bytes. + pub length: u32, +} + +/// Reference to an array of Pubkeys that can be shared safely across processes. +#[repr(C)] +pub struct SharablePubkeys { + /// Offset within the shared memory allocator. + pub offset: usize, + /// Number of pubkeys in the array. + /// IF 0, indicates no pubkeys and no allocation needing to be freed. + pub num_pubkeys: u32, +} + +/// Reference to an array of [`SharableTransactionRegion`] that can be shared safely +/// across processes. +/// General flow: +/// 1. External pack process allocates memory for +/// `num_transactions` [`SharableTransactionRegion`]. +/// 2. External pack sends a [`PackToWorkerMessage`] with `batch`. +/// 3. agave processes the transactions and sends back a [`WorkerToPackMessage`] +/// with the same `batch`. +/// 4. External pack process frees all transaction memory pointed to by the +/// [`SharableTransactionRegion`] in the batch, then frees the memory for +/// the array of [`SharableTransactionRegion`]. +#[repr(C)] +pub struct SharableTransactionBatchRegion { + /// Number of transactions in the batch. + pub num_transactions: u8, + /// Offset within the shared memory allocator for the batch of transactions. + /// The transactions are laid out back-to-back in memory as a + /// [`SharableTransactionRegion`] with size `num_transactions`. + pub transactions_offset: u32, +} +/// Reference to an array of response messages. +/// General flow: +/// 1. agave allocates memory for `num_transaction_responses` inner messages. +/// 2. agave sends a [`WorkerToPackMessage`] with `responses`. +/// 3. External pack process processes the inner messages. Potentially freeing +/// any memory within each inner message (see [`worker_message_types`] for details). +#[repr(C)] +pub struct TransactionResponseRegion { + /// Tag indicating the type of message. + /// See [`worker_message_types`] for details. + /// All inner messages/responses per trasaction will be of the same type. + pub tag: u8, + /// The number of transactions in the original message. + /// This corresponds to the number of inner response + /// messages that will be pointed to by `response_offset`. + /// This MUST be the same as `batch.num_transactions`. + pub num_transaction_responses: u8, + /// Offset within the shared memory allocator for the array of + /// inner messages. + /// The inner messages are laid out back-to-back in memory starting at + /// this offset. The type of each inner message is indicated by `tag`. + /// There are `num_transaction_responses` inner messages. + /// See [`worker_message_types`] for details on the inner message types. + pub transaction_responses_offset: u32, +} + +/// Message: [TPU -> Pack] +/// TPU passes transactions to the external pack process. +/// This is also a transfer of ownership of the transaction: +/// the external pack process is responsible for freeing the memory. +pub struct TpuToPackMessage { + pub transaction: SharableTransactionRegion, + /// See [`tpu_message_flags`] for details. + pub flags: u8, + /// The source address of the transaction. + /// IPv6-mapped IPv4 addresses: `::ffff:a.b.c.d` + /// where a.b.c.d is the IPv4 address. + /// See . + pub src_addr: [u8; 16], +} + +pub mod tpu_message_flags { + /// No special flags. + pub const NONE: u8 = 0; + + /// The transaction is a simple vote transaction. + pub const IS_SIMPLE_VOTE: u8 = 1 << 0; + /// The transaction was forwarded by a validator node. + pub const FORWARDED: u8 = 1 << 1; + /// The transaction was sent from a staked node. + pub const FROM_STAKED_NODE: u8 = 1 << 2; +} + +/// Message: [Agave -> Pack] +/// Agave passes leader status to the external pack process. +#[repr(C)] +pub struct ProgressMessage { + /// The current slot. + pub current_slot: u64, + /// Next known leader slot or u64::MAX if unknown. + /// If currently leader, this is equal to `current_slot`. + pub next_leader_slot: u64, + /// The remaining cost units allowed to be packed in the block. + /// i.e. block_limit - current_cost_units_used. + /// Only valid if currently leader, otherwise the value is undefined. + pub remaining_cost_units: u64, + /// Progress through the current slot in percentage. + pub current_slot_progress: u8, +} + +/// Message: [Pack -> Worker] +/// External pack processe passes transactions to worker threads within agave. +/// +/// These messages do not transfer ownership of the transactions. +/// The external pack process is still responsible for freeing the memory. +#[repr(C)] +pub struct PackToWorkerMessage { + /// Flags on how to handle this message. + /// See [`pack_message_flags`] for details. + pub flags: u16, + /// If [`pack_message_flags::RESOLVE`] flag is not set, this is the + /// maximum slot the transactions can be processed in. If the working + /// bank's slot in the worker thread is greater than this slot, + /// the transaction will not be processed. + pub max_execution_slot: u64, + /// Offset and number of transactions in the batch. + /// See [`SharableTransactionBatchRegion`] for details. + /// Agave will return this batch in the response message, it is + /// the responsibility of the external pack process to free the memory + /// ONLY after receiving the response message. + pub batch: SharableTransactionBatchRegion, +} + +pub mod pack_message_flags { + //! Flags for [`crate::PackToWorkerMessage::flags`]. + //! These flags can be ORed together so must be unique bits, with + //! the exception of [`NONE`]. + //! The *default* behavior, [`NONE`], is to attempt execution and + //! inclusion in the specified `max_execution_slot`. + + /// No special handling - execute the transactions normally. + pub const NONE: u16 = 0; + + /// Transactions on the [`super::PackToWorkerMessage`] should have their + /// addresses resolved. + /// + /// If this flag, the transaction will attempt to be executed and included + /// in the current block. + pub const RESOLVE: u16 = 1 << 1; +} + +/// Message: [Worker -> Pack] +/// Message from worker threads in response to a [`PackToWorkerMessage`]. +/// [`PackToWorkerMessage`] may have multiple response messages that +/// will follow the order of transactions in the original message. +#[repr(C)] +pub struct WorkerToPackMessage { + /// Offset and number of transactions in the batch. + /// See [`SharableTransactionBatchRegion`] for details. + /// Once the external pack process receives this message, + /// it is responsible for freeing the memory for this batch, + /// and is safe to do so - agave will hold no references to this memory + /// after sending this message. + pub batch: SharableTransactionBatchRegion, + /// `true` if the message was processed. + /// `false` if the message could not be processed. This will occur + /// if the passed message was invalid, and could indicate an issue + /// with the external pack process. + /// If `false`, the value of [`Self::responses`] is undefined. + pub processed: bool, + /// Response per transaction in the batch. + /// See [`TransactionResponseRegion`] for details. + pub responses: TransactionResponseRegion, +} + +pub mod worker_message_types { + use crate::SharablePubkeys; + + /// Tag indicating [`ExecutionResonse`] inner message. + pub const EXECUTION_RESPONSE: u8 = 0; + + /// Response to pack for a transaction that attempted execution. + /// This response will only be sent if the original message flags + /// requested execution i.e. not [`super::pack_message_flags::RESOLVE`]. + #[repr(C)] + pub struct ExecutionResponse { + /// Indicates if the transaction was included in the block or not. + /// If [`not_included_reasons::NONE`], the transaction was included. + not_included_reason: u8, + /// If included, cost units used by the transaction. + pub cost_units: u64, + /// If included, the fee-payer balance after execution. + pub fee_payer_balance: u64, + } + + pub mod not_included_reasons { + /// The transaction was included in the block. + pub const NONE: u8 = 0; + /// The transaction could not attempt processing because the + /// working bank was unavailable. + pub const BANK_NOT_AVAILABLE: u8 = 1; + /// The transaction could not be processed because the `slot` + /// in the passed message did not match the working bank's slot. + pub const SLOT_MISMATCH: u8 = 2; + + // The following reasons are mapped from `TransactionError` in + // `solana-sdk` crate. See that crate for details. + pub const PARSING_OR_SANITIZATION_FAILURE: u8 = 3; + pub const ALT_RESOLUTION_FAILURE: u8 = 4; + pub const BLOCKHASH_NOT_FOUND: u8 = 5; + pub const ALREADY_PROCESSED: u8 = 6; + pub const WOULD_EXCEED_VOTE_MAX_LIMIT: u8 = 7; + pub const WOULD_EXCEED_BLOCK_MAX_LIMIT: u8 = 8; + pub const WOULD_EXCEED_ACCOUNT_MAX_LIMIT: u8 = 9; + pub const WOULD_EXCEED_ACCOUNT_DATA_BLOCK_LIMIT: u8 = 10; + pub const TOO_MANY_ACCOUNT_LOCKS: u8 = 11; + pub const ACCOUNT_LOADED_TWICE: u8 = 12; + pub const ACCOUNT_IN_USE: u8 = 13; + pub const INVALID_ACCOUNT_FOR_FEE: u8 = 14; + pub const INSUFFICIENT_FUNDS_FOR_FEE: u8 = 15; + pub const INSUFFICIENT_FUNDS_FOR_RENT: u8 = 16; + } + + /// Tag indicating [`Resolved`] inner message. + pub const RESOLVED: u8 = 1; + + #[repr(C)] + pub struct Resolved { + /// Indicates if resolution was successful. + pub success: bool, + /// Slot of the bank used for resolution. + pub slot: u64, + /// Minimum deactivation slot of any ALT if any. + /// u64::MAX if no ALTs or deactivation. + pub min_alt_deactivation_slot: u64, + /// Resolved pubkeys - writable then readonly. + /// Freeing this memory is the responsiblity of the external + /// pack process. + pub resolved_pubkeys: SharablePubkeys, + } +} diff --git a/scripts/agave-build-lists.sh b/scripts/agave-build-lists.sh new file mode 100755 index 00000000000000..9fb9f6cf830233 --- /dev/null +++ b/scripts/agave-build-lists.sh @@ -0,0 +1,56 @@ +#!/usr/bin/env bash +# Defines reusable lists of Agave binary names for use across scripts. + +# Source this file to access the arrays +# Example: +# source "scripts/agave-build-lists.sh" +# printf '%s\n' "${AGAVE_BINS_DEV[@]}" + + +# Groups with binary names to be built, based on their intended audience +# Keep names in sync with build/install scripts that consume these lists. + +# shellcheck disable=SC2034 +AGAVE_BINS_DEV=( + cargo-build-sbf + cargo-test-sbf + solana-test-validator +) + +AGAVE_BINS_END_USER=( + agave-install + solana + solana-keygen +) + +AGAVE_BINS_VAL_OP=( + agave-validator + agave-watchtower + solana-gossip + solana-genesis + solana-faucet +) + +AGAVE_BINS_DCOU=( + agave-ledger-tool +) + +# These bins are deprecated and will be removed in a future release +AGAVE_BINS_DEPRECATED=( + solana-stake-accounts + solana-tokens + agave-install-init +) + +DCOU_TAINTED_PACKAGES=( + agave-ledger-tool + agave-store-histogram + agave-store-tool + solana-accounts-cluster-bench + solana-banking-bench + solana-bench-tps + solana-dos + solana-local-cluster + solana-transaction-dos + solana-vortexor +) diff --git a/scripts/cargo-clippy-nightly.sh b/scripts/cargo-clippy-nightly.sh index 756699408f4340..7a63529cb9298b 100755 --- a/scripts/cargo-clippy-nightly.sh +++ b/scripts/cargo-clippy-nightly.sh @@ -29,4 +29,5 @@ source "$here/../ci/rust-version.sh" nightly --deny=clippy::default_trait_access \ --deny=clippy::arithmetic_side_effects \ --deny=clippy::manual_let_else \ + --deny=clippy::uninlined-format-args \ --deny=clippy::used_underscore_binding diff --git a/scripts/cargo-install-all.sh b/scripts/cargo-install-all.sh index 96671d01deb70f..64f45421d27c43 100755 --- a/scripts/cargo-install-all.sh +++ b/scripts/cargo-install-all.sh @@ -86,55 +86,22 @@ cd "$(dirname "$0")"/.. SECONDS=0 -if [[ $CI_OS_NAME = windows ]]; then - # Limit windows to end-user command-line tools. Full validator support is not - # yet available on windows - BINS=( - cargo-build-sbf - cargo-test-sbf - solana - agave-install - agave-install-init - solana-keygen - solana-stake-accounts - solana-test-validator - solana-tokens - ) - DCOU_BINS=() +source "$SOLANA_ROOT"/scripts/agave-build-lists.sh + +BINS=() +DCOU_BINS=() +if [[ -n "$validatorOnly" ]]; then + echo "Building binaries for net.sh deploys: ${AGAVE_BINS_END_USER[*]} ${AGAVE_BINS_VAL_OP[*]} ${AGAVE_BINS_DCOU[*]}" + BINS+=("${AGAVE_BINS_END_USER[@]}" "${AGAVE_BINS_VAL_OP[@]}") + DCOU_BINS+=("${AGAVE_BINS_DCOU[@]}") else - ./fetch-perf-libs.sh + echo "Building binaries for all platforms: ${AGAVE_BINS_DEV[*]} ${AGAVE_BINS_END_USER[*]} ${AGAVE_BINS_DEPRECATED[*]}" + BINS+=("${AGAVE_BINS_DEV[@]}" "${AGAVE_BINS_END_USER[@]}" "${AGAVE_BINS_DEPRECATED[@]}") - BINS=( - solana - solana-faucet - solana-genesis - solana-gossip - agave-install - solana-keygen - solana-log-analyzer - solana-net-shaper - agave-validator - rbpf-cli - ) - DCOU_BINS=( - agave-ledger-tool - solana-bench-tps - ) - - # Speed up net.sh deploys by excluding unused binaries - if [[ -z "$validatorOnly" ]]; then - BINS+=( - cargo-build-sbf - cargo-test-sbf - agave-install-init - solana-stake-accounts - solana-test-validator - solana-tokens - agave-watchtower - ) - DCOU_BINS+=( - solana-dos - ) + if [[ $CI_OS_NAME != windows ]]; then + echo "Building binaries for linux and osx only: ${AGAVE_BINS_VAL_OP[*]}, ${AGAVE_BINS_DCOU[*]}" + BINS+=("${AGAVE_BINS_VAL_OP[@]}") + DCOU_BINS+=("${AGAVE_BINS_DCOU[@]}") fi fi @@ -148,15 +115,11 @@ for bin in "${DCOU_BINS[@]}"; do dcouBinArgs+=(--bin "$bin") done -source "$SOLANA_ROOT"/scripts/dcou-tainted-packages.sh - excludeArgs=() -for package in "${dcou_tainted_packages[@]}"; do +for package in "${DCOU_TAINTED_PACKAGES[@]}"; do excludeArgs+=(--exclude "$package") done -mkdir -p "$installDir/bin" - cargo_build() { # shellcheck disable=SC2086 # Don't want to double quote $maybeRustVersion "$cargo" $maybeRustVersion build $buildProfileArg "$@" @@ -214,8 +177,12 @@ for bin in "${BINS[@]}" "${DCOU_BINS[@]}"; do cp -fv "target/$buildProfile/$bin" "$installDir"/bin done -if [[ -d target/perf-libs ]]; then - cp -a target/perf-libs "$installDir"/bin/perf-libs +if [[ $CI_OS_NAME != windows ]]; then + ./fetch-perf-libs.sh + + if [[ -d target/perf-libs ]]; then + cp -a target/perf-libs "$installDir"/bin/perf-libs + fi fi if [[ -z "$validatorOnly" ]]; then @@ -232,7 +199,7 @@ fi # deps dir can be empty shopt -s nullglob for dep in target/"$buildProfile"/deps/libsolana*program.*; do - cp -fv "$dep" "$installDir/bin/deps" + cp -fv "$dep" "$installDir"/bin/deps done ) diff --git a/scripts/check-dev-context-only-utils.sh b/scripts/check-dev-context-only-utils.sh index 17ae497d4e9ccb..c34a19568bc491 100755 --- a/scripts/check-dev-context-only-utils.sh +++ b/scripts/check-dev-context-only-utils.sh @@ -28,10 +28,10 @@ source ci/rust-version.sh nightly # as normal (not dev) dependencies, only if you're sure that there's good # reason to bend dev-context-only-utils's original intention and that listed # package isn't part of released binaries. -source scripts/dcou-tainted-packages.sh +source scripts/agave-build-lists.sh # convert to comma separeted (ref: https://stackoverflow.com/a/53839433) -printf -v allowed '"%s",' "${dcou_tainted_packages[@]}" +printf -v allowed '"%s",' "${DCOU_TAINTED_PACKAGES[@]}" allowed="${allowed%,}" mode=${1:-full} diff --git a/scripts/coverage-in-disk.sh b/scripts/coverage-in-disk.sh index 3279f7735017da..da545b22e2b324 100755 --- a/scripts/coverage-in-disk.sh +++ b/scripts/coverage-in-disk.sh @@ -108,7 +108,6 @@ find target/cov -type f -name '*.gcda' -newer target/cov/before-test ! -newer ta --ignore \*.cargo\* --ignore \*build.rs --ignore bench-tps\* - --ignore upload-perf\* --ignore bench-streamer\* --ignore local-cluster\* ) diff --git a/scripts/coverage.sh b/scripts/coverage.sh index 6a144316ba50c1..ee2992b2d8edd2 100755 --- a/scripts/coverage.sh +++ b/scripts/coverage.sh @@ -46,6 +46,7 @@ rm -rf "$here/../target/cov/$COMMIT_HASH" # https://doc.rust-lang.org/rustc/instrument-coverage.html export RUSTFLAGS="-C instrument-coverage $RUSTFLAGS" +export RUSTFLAGS="--cfg curve25519_dalek_backend=\"serial\" $RUSTFLAGS" export LLVM_PROFILE_FILE="$here/../target/cov/${COMMIT_HASH}/profraw/default-%p-%m.profraw" if [[ -z $1 ]]; then @@ -78,7 +79,6 @@ grcov_common_args=( --ignore \*.cargo\* --ignore \*build.rs --ignore bench-tps\* - --ignore upload-perf\* --ignore bench-streamer\* --ignore local-cluster\* ) diff --git a/scripts/dcou-tainted-packages.sh b/scripts/dcou-tainted-packages.sh deleted file mode 100644 index 8590fbdd9f3cfc..00000000000000 --- a/scripts/dcou-tainted-packages.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env bash - -# shellcheck disable=SC2034 # This file is intended to be `source`d -declare dcou_tainted_packages=( - solana-banking-bench - agave-ledger-tool - solana-bench-tps - agave-store-tool - agave-store-histogram - solana-dos -) diff --git a/scripts/iftop-postprocess.sh b/scripts/iftop-postprocess.sh deleted file mode 100755 index 5454ba517cb196..00000000000000 --- a/scripts/iftop-postprocess.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env bash -# -# Reports network bandwidth usage -# -set -e - -usage() { - echo "Usage: $0 [optional list of IP address mapping]" - echo - echo Processes iftop log file, and extracts latest bandwidth used by each connection - echo - echo -} - -if [ "$#" -lt 2 ]; then - usage - exit 1 -fi - -cd "$(dirname "$0")" - -awk '{ if ($3 ~ "=>") { print $2, $7 } else if ($2 ~ "<=") { print $1, $6 }} ' < "$1" \ - | awk 'NR%2{printf "%s ",$0;next;}1' \ - | awk '{ print "{ \"a\": \""$1"\", " "\"b\": \""$3"\", \"a_to_b\": \""$2"\", \"b_to_a\": \""$4"\"}," }' > "$2" - -if [ "$#" -lt 3 ]; then - solana-log-analyzer iftop -f "$2" -else - list=$(cat "$3") - solana-log-analyzer iftop -f "$2" map-IP --list "$list" -fi - -exit 1 diff --git a/scripts/netem.sh b/scripts/netem.sh deleted file mode 100755 index ba286f8127c4f3..00000000000000 --- a/scripts/netem.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env bash -# -# Start/Stop network emulation -# -set -e - -[[ $(uname) == Linux ]] || exit 0 - -cd "$(dirname "$0")" - -sudo= -if sudo true; then - sudo="sudo -n" -fi - -set -x - -iface="$(ifconfig | grep mtu | grep -iv loopback | grep -i running | awk 'BEGIN { FS = ":" } ; {print $1}')" - -if [[ "$1" = delete ]]; then - $sudo iptables -F -t mangle -else - $sudo iptables -A POSTROUTING -t mangle -p udp -j MARK --set-mark 1 -fi - -$sudo tc qdisc "$1" dev "$iface" root handle 1: prio -# shellcheck disable=SC2086 # Do not want to quote $2. It has space separated arguments for netem -$sudo tc qdisc "$1" dev "$iface" parent 1:3 handle 30: netem $2 -$sudo tc filter "$1" dev "$iface" parent 1:0 protocol ip prio 3 handle 1 fw flowid 1:3 diff --git a/send-transaction-service/src/send_transaction_service.rs b/send-transaction-service/src/send_transaction_service.rs index d12b39c3b32f49..044261eaf03ef7 100644 --- a/send-transaction-service/src/send_transaction_service.rs +++ b/send-transaction-service/src/send_transaction_service.rs @@ -1,14 +1,8 @@ -#[deprecated( - since = "2.2.0", - note = "Please import from `send_transaction_service` directly." -)] -pub use crate::{ - send_transaction_service_stats::SendTransactionServiceStats, - transaction_client::{CurrentLeaderInfo, LEADER_INFO_REFRESH_RATE_MS}, -}; use { crate::{ - send_transaction_service_stats::SendTransactionServiceStatsReport, + send_transaction_service_stats::{ + SendTransactionServiceStats, SendTransactionServiceStatsReport, + }, transaction_client::TransactionClient, }, crossbeam_channel::{Receiver, RecvTimeoutError}, @@ -17,7 +11,10 @@ use { solana_hash::Hash, solana_nonce_account as nonce_account, solana_pubkey::Pubkey, - solana_runtime::{bank::Bank, bank_forks::BankForks}, + solana_runtime::{ + bank::Bank, + bank_forks::{BankForks, BankPair}, + }, solana_signature::Signature, std::{ collections::hash_map::{Entry, HashMap}, @@ -125,7 +122,7 @@ struct ProcessTransactionsResult { last_sent_time: Option, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq)] pub struct Config { pub retry_rate_ms: u64, pub leader_forward_count: u64, @@ -317,7 +314,7 @@ impl SendTransactionService { exit: Arc, ) -> JoinHandle<()> { debug!("Starting send-transaction-service::retry_thread."); - let root_bank = bank_forks.read().unwrap().sharable_root_bank(); + let sharable_banks = bank_forks.read().unwrap().sharable_banks(); let retry_interval_ms_default = MAX_RETRY_SLEEP_MS.min(config.retry_rate_ms); let mut retry_interval_ms = retry_interval_ms_default; Builder::new() @@ -335,11 +332,11 @@ impl SendTransactionService { stats .retry_queue_size .store(transactions.len() as u64, Ordering::Relaxed); - let (root_bank, working_bank) = { - let bank_forks = bank_forks.read().unwrap(); - (root_bank.load(), bank_forks.working_bank()) - }; + let BankPair { + root_bank, + working_bank, + } = sharable_banks.load(); let result = Self::process_transactions( &working_bank, &root_bank, diff --git a/stake-accounts/Cargo.toml b/stake-accounts/Cargo.toml index 799db6536de53e..53492d3d691054 100644 --- a/stake-accounts/Cargo.toml +++ b/stake-accounts/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "solana-stake-accounts" -description = "Blockchain, Rebuilt for Scale" documentation = "https://docs.rs/solana-stake-accounts" version = { workspace = true } authors = { workspace = true } +description = { workspace = true } repository = { workspace = true } homepage = { workspace = true } license = { workspace = true } @@ -17,6 +17,7 @@ clap = { workspace = true } solana-account = { workspace = true } solana-clap-utils = { workspace = true } solana-cli-config = { workspace = true } +solana-cli-output = { workspace = true } solana-clock = { workspace = true } solana-commitment-config = { workspace = true } solana-fee-calculator = { workspace = true } @@ -33,9 +34,11 @@ solana-signature = { workspace = true } solana-signer = { workspace = true } solana-stake-interface = { workspace = true } solana-stake-program = { workspace = true } +solana-sysvar = { workspace = true } solana-transaction = { workspace = true } solana-version = { workspace = true } [dev-dependencies] solana-client-traits = { workspace = true } +solana-program-binaries = { workspace = true } solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } diff --git a/stake-accounts/src/arg_parser.rs b/stake-accounts/src/arg_parser.rs index 173006c0e60ec2..afb24127fd6749 100644 --- a/stake-accounts/src/arg_parser.rs +++ b/stake-accounts/src/arg_parser.rs @@ -11,7 +11,7 @@ use { input_validators::{is_amount, is_rfc3339_datetime, is_valid_pubkey, is_valid_signer}, }, solana_cli_config::CONFIG_FILE, - solana_native_token::sol_to_lamports, + solana_native_token::sol_str_to_lamports, std::{ffi::OsString, process::exit}, }; @@ -164,8 +164,8 @@ where .possible_values(&["processed", "confirmed", "finalized"]) .hide_possible_values(true) .help( - "Return information at the selected commitment level \ - [possible values: processed, confirmed, finalized]", + "Return information at the selected commitment level [possible values: \ + processed, confirmed, finalized]", ), ) .arg( @@ -298,7 +298,10 @@ fn parse_new_args(matches: &ArgMatches<'_>) -> NewArgs { NewArgs { fee_payer: value_t_or_exit!(matches, "fee_payer", String), funding_keypair: value_t_or_exit!(matches, "funding_keypair", String), - lamports: sol_to_lamports(value_t_or_exit!(matches, "amount", f64)), + lamports: matches + .value_of("amount") + .and_then(sol_str_to_lamports) + .unwrap(), base_keypair: value_t_or_exit!(matches, "base_keypair", String), stake_authority: value_t_or_exit!(matches, "stake_authority", String), withdraw_authority: value_t_or_exit!(matches, "withdraw_authority", String), diff --git a/stake-accounts/src/main.rs b/stake-accounts/src/main.rs index f3c3461fa7e9f1..cee727d93cc89f 100644 --- a/stake-accounts/src/main.rs +++ b/stake-accounts/src/main.rs @@ -11,9 +11,9 @@ use { }, }, solana_cli_config::Config, + solana_cli_output::display::build_balance_message, solana_commitment_config::CommitmentConfig, solana_message::Message, - solana_native_token::lamports_to_sol, solana_pubkey::Pubkey, solana_rpc_client::rpc_client::RpcClient, solana_rpc_client_api::client_error::Error as ClientError, @@ -270,7 +270,7 @@ fn main() -> Result<(), Box> { ); let balances = get_balances(&client, addresses)?; let lamports: u64 = balances.into_iter().map(|(_, bal)| bal).sum(); - let sol = lamports_to_sol(lamports); + let sol = build_balance_message(lamports, false, false); println!("{sol} SOL"); } Command::Authorize(args) => { diff --git a/stake-accounts/src/stake_accounts.rs b/stake-accounts/src/stake_accounts.rs index d8e48c1ae1c394..9fb66ff5431c86 100644 --- a/stake-accounts/src/stake_accounts.rs +++ b/stake-accounts/src/stake_accounts.rs @@ -290,16 +290,36 @@ mod tests { solana_signer::Signer, solana_stake_interface::state::StakeStateV2, solana_stake_program::stake_state, + solana_sysvar::epoch_rewards::EpochRewards, std::sync::{Arc, RwLock}, }; fn create_bank(lamports: u64) -> (Arc, Arc>, Keypair, u64, u64) { let (mut genesis_config, mint_keypair) = create_genesis_config(lamports); genesis_config.fee_rate_governor = solana_fee_calculator::FeeRateGovernor::new(0, 0); + + for (pubkey, account) in + solana_program_binaries::by_id(&stake::program::id(), &genesis_config.rent) + .unwrap() + .into_iter() + { + genesis_config.add_account(pubkey, account); + } + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + bank.squash(); + let bank = Bank::new_from_parent(bank, &Pubkey::new_unique(), 1); + bank.set_sysvar_for_tests(&EpochRewards::default()); + let stake_rent = bank.get_minimum_balance_for_rent_exemption(StakeStateV2::size_of()); let system_rent = bank.get_minimum_balance_for_rent_exemption(0); - (bank, bank_forks, mint_keypair, stake_rent, system_rent) + ( + bank.into(), + bank_forks, + mint_keypair, + stake_rent, + system_rent, + ) } fn create_account( diff --git a/storage-bigtable/build-proto/Cargo.toml b/storage-bigtable/build-proto/Cargo.toml index 0a96465209757c..18f2bc1ab365e7 100644 --- a/storage-bigtable/build-proto/Cargo.toml +++ b/storage-bigtable/build-proto/Cargo.toml @@ -1,9 +1,9 @@ [package] -description = "Blockchain, Rebuilt for Scale" name = "proto" publish = false version = { workspace = true } authors = { workspace = true } +description = { workspace = true } repository = { workspace = true } homepage = { workspace = true } license = { workspace = true } diff --git a/storage-proto/src/convert.rs b/storage-proto/src/convert.rs index 7d5fd96d589ffc..c641e860efde14 100644 --- a/storage-proto/src/convert.rs +++ b/storage-proto/src/convert.rs @@ -793,7 +793,7 @@ impl TryFrom for TransactionError { 41 => InstructionError::ProgramFailedToCompile, 42 => InstructionError::Immutable, 43 => InstructionError::IncorrectAuthority, - 44 => InstructionError::BorshIoError(String::new()), + 44 => InstructionError::BorshIoError, 45 => InstructionError::AccountNotRentExempt, 46 => InstructionError::InvalidAccountOwner, 47 => InstructionError::ArithmeticOverflow, @@ -1131,7 +1131,7 @@ impl From for tx_by_addr::TransactionError { InstructionError::IncorrectAuthority => { tx_by_addr::InstructionErrorType::IncorrectAuthority } - InstructionError::BorshIoError(_) => { + InstructionError::BorshIoError => { tx_by_addr::InstructionErrorType::BorshIoError } InstructionError::AccountNotRentExempt => { diff --git a/streamer/Cargo.toml b/streamer/Cargo.toml index 2d338605faf21a..417534a1b500e8 100644 --- a/streamer/Cargo.toml +++ b/streamer/Cargo.toml @@ -21,7 +21,6 @@ dev-context-only-utils = [] [dependencies] arc-swap = { workspace = true } -async-channel = { workspace = true } bytes = { workspace = true } crossbeam-channel = { workspace = true } dashmap = { workspace = true } @@ -46,7 +45,7 @@ socket2 = { workspace = true } solana-keypair = { workspace = true } solana-measure = { workspace = true } solana-metrics = { workspace = true } -solana-net-utils = { workspace = true } +solana-net-utils = { workspace = true, features = ["agave-unstable-api"] } solana-packet = { workspace = true } solana-perf = { workspace = true } solana-pubkey = { workspace = true } @@ -63,7 +62,9 @@ tokio-util = { workspace = true } x509-parser = { workspace = true } [dev-dependencies] +anyhow = { workspace = true } assert_matches = { workspace = true } +clap = { version = "4.5.31", features = ["cargo", "derive", "error-context"] } solana-logger = { workspace = true } solana-net-utils = { workspace = true, features = ["dev-context-only-utils"] } solana-streamer = { path = ".", features = ["dev-context-only-utils"] } diff --git a/streamer/examples/swqos.rs b/streamer/examples/swqos.rs new file mode 100644 index 00000000000000..05c4afeac9b964 --- /dev/null +++ b/streamer/examples/swqos.rs @@ -0,0 +1,165 @@ +#![allow(clippy::arithmetic_side_effects)] +//! Standalone QUIC streamer server. +//! +//! This utility isolates the QUIC server component, making it convenient for +//! testing and performance tuning. It logs all received "transactions" to a +//! binary file. +//! The logged info includes the bytes 0..32, wherein you can store metadata such +//! as sender's pubkey. + +use { + clap::Parser, + crossbeam_channel::bounded, + log::{debug, info}, + solana_keypair::Keypair, + solana_net_utils::sockets::{bind_to_with_config, SocketConfiguration}, + solana_pubkey::Pubkey, + solana_streamer::{ + nonblocking::quic::SpawnNonBlockingServerResult, quic::QuicServerParams, + streamer::StakedNodes, + }, + std::{ + collections::HashMap, + io::{BufRead as _, BufReader, Write}, + net::SocketAddr, + path::Path, + str::FromStr as _, + sync::{Arc, RwLock}, + time::Duration, + }, + tokio::time::{sleep, Instant}, + tokio_util::sync::CancellationToken, +}; + +fn parse_duration(arg: &str) -> Result { + let seconds = arg.parse()?; + Ok(std::time::Duration::from_secs_f64(seconds)) +} +const LAMPORTS_PER_SOL: u64 = 1000000000; + +pub fn load_staked_nodes_overrides(path: &String) -> anyhow::Result> { + debug!("Loading staked nodes overrides configuration from {path}"); + if Path::new(&path).exists() { + let file = std::fs::File::open(path)?; + let reader = BufReader::new(file); + + let mut map = HashMap::new(); + for (line_num, line) in reader.lines().enumerate() { + let line = line?; + let parts: Vec<&str> = line.split_whitespace().collect(); + if parts.len() != 2 { + anyhow::bail!("invalid line {line_num}: {line}"); + } + let pubkey = Pubkey::from_str(parts[0]) + .map_err(|_| anyhow::anyhow!("invalid pubkey at line {line_num}"))?; + let value: u64 = parts[1] + .parse() + .map_err(|_| anyhow::anyhow!("invalid number at line {line_num}"))?; + + map.insert(pubkey, value.saturating_mul(LAMPORTS_PER_SOL)); + } + Ok(map) + } else { + anyhow::bail!("Staked nodes overrides provided '{path}' a non-existing file path.") + } +} + +#[derive(Debug, Parser)] +struct Cli { + #[arg(short, long, default_value_t = 1)] + max_connections_per_peer: usize, + + #[arg(short, long, default_value = "0.0.0.0:8008")] + bind_to: SocketAddr, + + #[arg(short, long, default_value = "./results/serverlog.bin")] + log_file: String, + + #[arg(short, long, value_parser = parse_duration)] + test_duration: Duration, + + #[arg(short, long)] + stake_amounts: String, +} + +// number of threads as in fn default_num_tpu_transaction_forward_receive_threads +#[tokio::main(flavor = "multi_thread", worker_threads = 16)] +async fn main() -> anyhow::Result<()> { + solana_logger::setup(); + let cli = Cli::parse(); + let socket = bind_to_with_config( + cli.bind_to.ip(), + cli.bind_to.port(), + SocketConfiguration::default(), + ) + .expect("should bind"); + + let (sender, receiver) = bounded(1024); + let keypair = Keypair::new(); + + let staked_nodes = { + let nodes = StakedNodes::new( + Arc::new(HashMap::new()), + load_staked_nodes_overrides(&cli.stake_amounts)?, + ); + Arc::new(RwLock::new(nodes)) + }; + + let cancel = CancellationToken::new(); + let SpawnNonBlockingServerResult { + endpoints, + stats, + thread: run_thread, + max_concurrent_connections: _, + } = solana_streamer::nonblocking::quic::spawn_server_with_cancel( + "quic_streamer_test", + [socket.try_clone()?], + &keypair, + sender, + staked_nodes, + QuicServerParams { + max_connections_per_peer: cli.max_connections_per_peer, + ..QuicServerParams::default() + }, + cancel.clone(), + )?; + info!("Server listening on {}", socket.local_addr()?); + + let path = cli.log_file.clone(); + let logger_thread = tokio::task::spawn_blocking(move || -> anyhow::Result<()> { + let start = Instant::now(); + let logfile = std::fs::File::create(&path)?; + info!("Logfile in {}", &path); + let mut logfile = std::io::BufWriter::new(logfile); + let mut sum = 0; + for batch in receiver { + let delta_time = start.elapsed().as_micros() as u32; + for pkt in batch.iter() { + let pkt = pkt.to_bytes_packet(); + if pkt.buffer().len() < 32 { + continue; + } + let pubkey: [u8; 32] = pkt.buffer()[0..32].try_into()?; + logfile.write_all(&pubkey)?; + let pkt_len = pkt.buffer().len(); + logfile.write_all(&pkt_len.to_ne_bytes())?; + logfile.write_all(&delta_time.to_ne_bytes())?; + let pubkey = Pubkey::new_from_array(pubkey); + debug!("{pubkey}: {pkt_len} bytes"); + sum += 1; + } + } + info!("Server captured {sum} TXs"); + logfile.flush()?; + Ok(()) + }); + + sleep(cli.test_duration).await; + info!("Server terminating"); + cancel.cancel(); + drop(endpoints); + run_thread.await?; + logger_thread.await??; + stats.report("final_stats"); + Ok(()) +} diff --git a/streamer/src/atomic_udp_socket.rs b/streamer/src/atomic_udp_socket.rs deleted file mode 100644 index 1617d7a3d8a9f2..00000000000000 --- a/streamer/src/atomic_udp_socket.rs +++ /dev/null @@ -1,125 +0,0 @@ -use { - arc_swap::ArcSwap, - std::{ - net::{SocketAddr, UdpSocket}, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }, - }, -}; - -/// Wrapper around UdpSocket that allows for atomic swapping of the socket. -#[derive(Debug)] -pub struct AtomicUdpSocketInner { - socket: ArcSwap, - did_change: AtomicBool, -} - -#[derive(Debug, Clone)] -pub struct AtomicUdpSocket { - inner: Arc, -} - -impl AtomicUdpSocket { - pub fn new(sock: UdpSocket) -> Self { - Self { - inner: Arc::new(AtomicUdpSocketInner { - socket: ArcSwap::from_pointee(sock), - did_change: AtomicBool::new(false), - }), - } - } - - #[inline] - pub fn load(&self) -> Arc { - self.inner.socket.load_full() - } - - /// Returns true if the socket has changed since the last call to this method. - /// - /// Will swap the `did_change` flag to `false`. - #[inline] - pub fn did_change(&self) -> bool { - self.inner.did_change.swap(false, Ordering::Acquire) - } - - #[inline] - pub fn swap(&self, new_sock: UdpSocket) { - self.inner.socket.store(Arc::new(new_sock)); - self.inner.did_change.store(true, Ordering::Release); - } - - pub fn local_addr(&self) -> std::io::Result { - self.inner.socket.load().local_addr() - } -} - -pub enum CurrentSocket { - Same(Arc), - Changed(Arc), -} - -/// Trait for providing a socket. -pub trait SocketProvider { - fn current_socket(&self) -> CurrentSocket; - - fn did_change(&self) -> bool; - - #[inline] - fn current_socket_ref(&self) -> Arc { - match self.current_socket() { - CurrentSocket::Same(sock) | CurrentSocket::Changed(sock) => sock, - } - } -} - -/// Fixed UDP Socket -> default -pub struct FixedSocketProvider { - socket: Arc, -} -impl FixedSocketProvider { - pub fn new(socket: Arc) -> Self { - Self { socket } - } -} -impl SocketProvider for FixedSocketProvider { - #[inline] - fn did_change(&self) -> bool { - false - } - - #[inline] - fn current_socket(&self) -> CurrentSocket { - CurrentSocket::Same(self.socket.clone()) - } -} - -/// Hot-swappable `AtomicUdpSocket` -pub struct AtomicSocketProvider { - atomic: Arc, -} - -impl AtomicSocketProvider { - pub fn new(atomic: Arc) -> Self { - Self { atomic } - } -} - -impl SocketProvider for AtomicSocketProvider { - #[inline] - fn did_change(&self) -> bool { - self.atomic.did_change() - } - - // Check if the socket has changed since the last call - #[inline] - fn current_socket(&self) -> CurrentSocket { - let sock = self.atomic.load(); - if self.did_change() { - CurrentSocket::Changed(sock) - } else { - CurrentSocket::Same(sock) - } - } -} diff --git a/streamer/src/lib.rs b/streamer/src/lib.rs index 25fa9ae721d1b4..60ee2b753f77e3 100644 --- a/streamer/src/lib.rs +++ b/streamer/src/lib.rs @@ -1,5 +1,4 @@ #![allow(clippy::arithmetic_side_effects)] -pub mod atomic_udp_socket; pub mod evicting_sender; pub mod msghdr; pub mod nonblocking; diff --git a/streamer/src/nonblocking/connection_rate_limiter.rs b/streamer/src/nonblocking/connection_rate_limiter.rs index 8680feba923fda..205b76c8620ef9 100644 --- a/streamer/src/nonblocking/connection_rate_limiter.rs +++ b/streamer/src/nonblocking/connection_rate_limiter.rs @@ -3,6 +3,7 @@ use { std::{net::IpAddr, num::NonZeroU32}, }; +/// Limits the rate of connections per IP address. pub struct ConnectionRateLimiter { limiter: DefaultKeyedRateLimiter, } @@ -75,7 +76,17 @@ impl TotalConnectionRateLimiter { #[cfg(test)] pub mod test { - use {super::*, std::net::Ipv4Addr}; + use { + super::*, + std::{ + net::Ipv4Addr, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::{Duration, Instant}, + }, + }; #[tokio::test] async fn test_total_connection_rate_limiter() { @@ -104,4 +115,42 @@ pub mod test { assert!(limiter.is_allowed(&ip2)); assert!(!limiter.is_allowed(&ip2)); } + + #[test] + fn test_bench_rate_limiter() { + let run_duration = Duration::from_secs(3); + let limiter = Arc::new(ConnectionRateLimiter::new(60 * 100)); + + let accepted = AtomicUsize::new(0); + let rejected = AtomicUsize::new(0); + + let start = Instant::now(); + let ip_pool = 2048; + let expected_total_accepts = (run_duration.as_secs() * 100 * ip_pool) as i64; + let workers = 8; + + std::thread::scope(|scope| { + for _ in 0..workers { + scope.spawn(|| { + for i in 1.. { + if Instant::now() > start + run_duration { + break; + } + let ip = IpAddr::V4(Ipv4Addr::from_bits(i % ip_pool as u32)); + if limiter.is_allowed(&ip) { + accepted.fetch_add(1, Ordering::Relaxed); + } else { + rejected.fetch_add(1, Ordering::Relaxed); + } + } + }); + } + }); + + let acc = accepted.load(Ordering::Relaxed); + let rej = rejected.load(Ordering::Relaxed); + println!("Run complete over {:?} seconds", run_duration.as_secs()); + println!("Accepted: {acc} (target {expected_total_accepts})"); + println!("Rejected: {rej}"); + } } diff --git a/streamer/src/nonblocking/quic.rs b/streamer/src/nonblocking/quic.rs index 9cb55d8f288609..d6f9c11e81746b 100644 --- a/streamer/src/nonblocking/quic.rs +++ b/streamer/src/nonblocking/quic.rs @@ -46,7 +46,6 @@ use { Arc, RwLock, }, task::Poll, - thread, time::{Duration, Instant}, }, tokio::{ @@ -60,7 +59,7 @@ use { // introduce any other awaits while holding the RwLock. select, sync::{Mutex, MutexGuard}, - task::JoinHandle, + task::{self, JoinHandle}, time::{sleep, timeout}, }, tokio_util::sync::CancellationToken, @@ -140,18 +139,20 @@ pub struct SpawnNonBlockingServerResult { pub max_concurrent_connections: usize, } -pub fn spawn_server( +#[deprecated(since = "3.0.0", note = "Use spawn_server instead")] +pub fn spawn_server_multi( name: &'static str, - sock: UdpSocket, + sockets: impl IntoIterator, keypair: &Keypair, packet_sender: Sender, exit: Arc, staked_nodes: Arc>, quic_server_params: QuicServerParams, ) -> Result { - spawn_server_multi( + #[allow(deprecated)] + spawn_server( name, - vec![sock], + sockets, keypair, packet_sender, exit, @@ -160,29 +161,53 @@ pub fn spawn_server( ) } -pub fn spawn_server_multi( +#[deprecated(since = "3.1.0", note = "Use spawn_server_with_cancel instead")] +pub fn spawn_server( name: &'static str, - sockets: Vec, + sockets: impl IntoIterator, keypair: &Keypair, packet_sender: Sender, exit: Arc, staked_nodes: Arc>, quic_server_params: QuicServerParams, ) -> Result { + let cancel = CancellationToken::new(); + tokio::spawn({ + let cancel = cancel.clone(); + async move { + loop { + if exit.load(Ordering::Relaxed) { + cancel.cancel(); + break; + } + sleep(Duration::from_millis(100)).await; + } + } + }); + + spawn_server_with_cancel( + name, + sockets, + keypair, + packet_sender, + staked_nodes, + quic_server_params, + cancel, + ) +} + +/// Spawn a streamer instance in the current tokio runtime. +pub fn spawn_server_with_cancel( + name: &'static str, + sockets: impl IntoIterator, + keypair: &Keypair, + packet_sender: Sender, + staked_nodes: Arc>, + quic_server_params: QuicServerParams, + cancel: CancellationToken, +) -> Result { + let sockets: Vec<_> = sockets.into_iter().collect(); info!("Start {name} quic server on {sockets:?}"); - let QuicServerParams { - max_unstaked_connections, - max_staked_connections, - max_connections_per_peer, - max_streams_per_ms, - max_connections_per_ipaddr_per_min, - wait_for_chunk_timeout, - coalesce, - coalesce_channel_size, - num_threads: _, - } = quic_server_params; - let concurrent_connections = max_staked_connections + max_unstaked_connections; - let max_concurrent_connections = concurrent_connections + concurrent_connections / 4; let (config, _) = configure_server(keypair)?; let endpoints = sockets @@ -198,23 +223,33 @@ pub fn spawn_server_multi( }) .collect::, _>>()?; let stats = Arc::::default(); + let (packet_batch_sender, packet_batch_receiver) = + bounded(quic_server_params.coalesce_channel_size); + task::spawn_blocking({ + let cancel = cancel.clone(); + let stats = stats.clone(); + move || { + run_packet_batch_sender( + packet_sender, + packet_batch_receiver, + stats, + quic_server_params.coalesce, + cancel, + ); + } + }); + + let max_concurrent_connections = quic_server_params.max_concurrent_connections(); let handle = tokio::spawn(run_server( name, endpoints.clone(), - packet_sender, - exit, - max_connections_per_peer, + packet_batch_sender, staked_nodes, - max_staked_connections, - max_unstaked_connections, - max_streams_per_ms, - max_connections_per_ipaddr_per_min, stats.clone(), - wait_for_chunk_timeout, - coalesce, - coalesce_channel_size, - max_concurrent_connections, + quic_server_params, + cancel, )); + Ok(SpawnNonBlockingServerResult { endpoints, stats, @@ -272,22 +307,14 @@ impl ClientConnectionTracker { async fn run_server( name: &'static str, endpoints: Vec, - packet_sender: Sender, - exit: Arc, - max_connections_per_peer: usize, + packet_batch_sender: Sender, staked_nodes: Arc>, - max_staked_connections: usize, - max_unstaked_connections: usize, - max_streams_per_ms: u64, - max_connections_per_ipaddr_per_min: u64, stats: Arc, - wait_for_chunk_timeout: Duration, - coalesce: Duration, - coalesce_channel_size: usize, - max_concurrent_connections: usize, + quic_server_params: QuicServerParams, + cancel: CancellationToken, ) { let rate_limiter = Arc::new(ConnectionRateLimiter::new( - max_connections_per_ipaddr_per_min, + quic_server_params.max_connections_per_ipaddr_per_min, )); let overall_connection_rate_limiter = Arc::new(TotalConnectionRateLimiter::new( TOTAL_CONNECTIONS_PER_SECOND, @@ -297,26 +324,17 @@ async fn run_server( debug!("spawn quic server"); let mut last_datapoint = Instant::now(); let unstaked_connection_table: Arc> = - Arc::new(Mutex::new(ConnectionTable::new())); + Arc::new(Mutex::new(ConnectionTable::new(false, cancel.clone()))); let stream_load_ema = Arc::new(StakedStreamLoadEMA::new( stats.clone(), - max_unstaked_connections, - max_streams_per_ms, + quic_server_params.max_unstaked_connections, + quic_server_params.max_streams_per_ms, )); stats .quic_endpoints_count .store(endpoints.len(), Ordering::Relaxed); let staked_connection_table: Arc> = - Arc::new(Mutex::new(ConnectionTable::new())); - let (sender, receiver) = bounded(coalesce_channel_size); - - thread::spawn({ - let exit = exit.clone(); - let stats = stats.clone(); - move || { - packet_batch_sender(packet_sender, receiver, exit, stats, coalesce); - } - }); + Arc::new(Mutex::new(ConnectionTable::new(true, cancel.clone()))); let mut accepts = endpoints .iter() @@ -329,7 +347,7 @@ async fn run_server( }) .collect::>(); - while !exit.load(Ordering::Relaxed) { + loop { let timeout_connection = select! { ready = accepts.next() => { if let Some((connecting, i)) = ready { @@ -348,6 +366,7 @@ async fn run_server( _ = tokio::time::sleep(WAIT_FOR_CONNECTION_TIMEOUT) => { Err(()) } + _ = cancel.cancelled() => break, }; if last_datapoint.elapsed().as_secs() >= 5 { @@ -368,9 +387,10 @@ async fn run_server( .connection_rate_limiter_length .store(rate_limiter.len(), Ordering::Relaxed); - let Ok(client_connection_tracker) = - ClientConnectionTracker::new(stats.clone(), max_concurrent_connections) - else { + let Ok(client_connection_tracker) = ClientConnectionTracker::new( + stats.clone(), + quic_server_params.max_concurrent_connections(), + ) else { stats .refused_connections_too_many_open_connections .fetch_add(1, Ordering::Relaxed); @@ -393,15 +413,11 @@ async fn run_server( client_connection_tracker, unstaked_connection_table.clone(), staked_connection_table.clone(), - sender.clone(), - max_connections_per_peer, + packet_batch_sender.clone(), staked_nodes.clone(), - max_staked_connections, - max_unstaked_connections, - max_streams_per_ms, stats.clone(), - wait_for_chunk_timeout, stream_load_ema.clone(), + quic_server_params.clone(), )); } Err(err) => { @@ -459,7 +475,7 @@ fn get_connection_stake( )) } -pub fn compute_max_allowed_uni_streams(peer_type: ConnectionPeerType, total_stake: u64) -> usize { +fn compute_max_allowed_uni_streams(peer_type: ConnectionPeerType, total_stake: u64) -> usize { match peer_type { ConnectionPeerType::Staked(peer_stake) => { // No checked math for f64 type. So let's explicitly check for 0 here @@ -493,11 +509,6 @@ enum ConnectionHandlerError { #[derive(Clone)] struct NewConnectionHandlerParams { - // In principle, the code can be made to work with a crossbeam channel - // as long as we're careful never to use a blocking recv or send call - // but I've found that it's simply too easy to accidentally block - // in async code when using the crossbeam channel, so for the sake of maintainability, - // we're sticking with an async channel packet_sender: Sender, remote_pubkey: Option, peer_type: ConnectionPeerType, @@ -506,6 +517,8 @@ struct NewConnectionHandlerParams { stats: Arc, max_stake: u64, min_stake: u64, + max_connections: usize, + wait_for_chunk_timeout: Duration, } impl NewConnectionHandlerParams { @@ -513,6 +526,8 @@ impl NewConnectionHandlerParams { packet_sender: Sender, max_connections_per_peer: usize, stats: Arc, + wait_for_chunk_timeout: Duration, + max_connections: usize, ) -> NewConnectionHandlerParams { NewConnectionHandlerParams { packet_sender, @@ -523,17 +538,30 @@ impl NewConnectionHandlerParams { stats, max_stake: 0, min_stake: 0, + max_connections, + wait_for_chunk_timeout, } } } +fn update_open_connections_stat(stats: &StreamerStats, connection_table: &ConnectionTable) { + if connection_table.is_staked() { + stats + .open_staked_connections + .store(connection_table.table_size(), Ordering::Relaxed); + } else { + stats + .open_unstaked_connections + .store(connection_table.table_size(), Ordering::Relaxed); + } +} + fn handle_and_cache_new_connection( client_connection_tracker: ClientConnectionTracker, connection: Connection, mut connection_table_l: MutexGuard, connection_table: Arc>, params: &NewConnectionHandlerParams, - wait_for_chunk_timeout: Duration, stream_load_ema: Arc, ) -> Result<(), ConnectionHandlerError> { if let Ok(max_uni_streams) = VarInt::from_u64(compute_max_allowed_uni_streams( @@ -565,6 +593,7 @@ fn handle_and_cache_new_connection( params.max_connections_per_peer, ) { + update_open_connections_stat(¶ms.stats, &connection_table_l); drop(connection_table_l); if let Ok(receive_window) = receive_window { @@ -577,11 +606,10 @@ fn handle_and_cache_new_connection( remote_addr, last_update, connection_table, - cancel_connection, params.clone(), - wait_for_chunk_timeout, stream_load_ema, stream_counter, + cancel_connection, )); Ok(()) } else { @@ -608,23 +636,20 @@ async fn prune_unstaked_connections_and_add_new_connection( client_connection_tracker: ClientConnectionTracker, connection: Connection, connection_table: Arc>, - max_connections: usize, params: &NewConnectionHandlerParams, - wait_for_chunk_timeout: Duration, stream_load_ema: Arc, ) -> Result<(), ConnectionHandlerError> { let stats = params.stats.clone(); - if max_connections > 0 { + if params.max_connections > 0 { let connection_table_clone = connection_table.clone(); let mut connection_table = connection_table.lock().await; - prune_unstaked_connection_table(&mut connection_table, max_connections, stats); + prune_unstaked_connection_table(&mut connection_table, params.max_connections, stats); handle_and_cache_new_connection( client_connection_tracker, connection, connection_table, connection_table_clone, params, - wait_for_chunk_timeout, stream_load_ema, ) } else { @@ -688,14 +713,10 @@ async fn setup_connection( unstaked_connection_table: Arc>, staked_connection_table: Arc>, packet_sender: Sender, - max_connections_per_peer: usize, staked_nodes: Arc>, - max_staked_connections: usize, - max_unstaked_connections: usize, - max_streams_per_ms: u64, stats: Arc, - wait_for_chunk_timeout: Duration, stream_load_ema: Arc, + quic_server_params: QuicServerParams, ) { const PRUNE_RANDOM_SAMPLE_SIZE: usize = 2; let from = connecting.remote_address(); @@ -738,14 +759,18 @@ async fn setup_connection( let params = get_connection_stake(&new_connection, &staked_nodes).map_or( NewConnectionHandlerParams::new_unstaked( packet_sender.clone(), - max_connections_per_peer, + quic_server_params.max_connections_per_peer, stats.clone(), + quic_server_params.wait_for_chunk_timeout, + quic_server_params.max_unstaked_connections, ), |(pubkey, stake, total_stake, max_stake, min_stake)| { // The heuristic is that the stake should be large engouh to have 1 stream pass throuh within one throttle // interval during which we allow max (MAX_STREAMS_PER_MS * STREAM_THROTTLING_INTERVAL_MS) streams. - let min_stake_ratio = - 1_f64 / (max_streams_per_ms * STREAM_THROTTLING_INTERVAL_MS) as f64; + let min_stake_ratio = 1_f64 + / (quic_server_params.max_streams_per_ms + * STREAM_THROTTLING_INTERVAL_MS) + as f64; let stake_ratio = stake as f64 / total_stake as f64; let peer_type = if stake_ratio < min_stake_ratio { // If it is a staked connection with ultra low stake ratio, treat it as unstaked. @@ -758,10 +783,12 @@ async fn setup_connection( remote_pubkey: Some(pubkey), peer_type, total_stake, - max_connections_per_peer, + max_connections_per_peer: quic_server_params.max_connections_per_peer, stats: stats.clone(), max_stake, min_stake, + wait_for_chunk_timeout: quic_server_params.wait_for_chunk_timeout, + max_connections: quic_server_params.max_staked_connections, } }, ); @@ -770,20 +797,23 @@ async fn setup_connection( ConnectionPeerType::Staked(stake) => { let mut connection_table_l = staked_connection_table.lock().await; - if connection_table_l.total_size >= max_staked_connections { + if connection_table_l.total_size + >= quic_server_params.max_staked_connections + { let num_pruned = connection_table_l.prune_random(PRUNE_RANDOM_SAMPLE_SIZE, stake); stats.num_evictions.fetch_add(num_pruned, Ordering::Relaxed); + update_open_connections_stat(&stats, &connection_table_l); } - if connection_table_l.total_size < max_staked_connections { + if connection_table_l.total_size < quic_server_params.max_staked_connections + { if let Ok(()) = handle_and_cache_new_connection( client_connection_tracker, new_connection, connection_table_l, staked_connection_table.clone(), ¶ms, - wait_for_chunk_timeout, stream_load_ema.clone(), ) { stats @@ -798,9 +828,7 @@ async fn setup_connection( client_connection_tracker, new_connection, unstaked_connection_table.clone(), - max_unstaked_connections, ¶ms, - wait_for_chunk_timeout, stream_load_ema.clone(), ) .await @@ -823,9 +851,7 @@ async fn setup_connection( client_connection_tracker, new_connection, unstaked_connection_table.clone(), - max_unstaked_connections, ¶ms, - wait_for_chunk_timeout, stream_load_ema.clone(), ) .await @@ -892,12 +918,12 @@ fn handle_connection_error(e: quinn::ConnectionError, stats: &StreamerStats, fro // Holder(s) of the Sender on the other end should not // wait for this function to exit -fn packet_batch_sender( +fn run_packet_batch_sender( packet_sender: Sender, packet_receiver: Receiver, - exit: Arc, stats: Arc, coalesce: Duration, + cancel: CancellationToken, ) { trace!("enter packet_batch_sender"); let mut batch_start_time = Instant::now(); @@ -914,7 +940,7 @@ fn packet_batch_sender( .fetch_add(PACKETS_PER_BATCH, Ordering::Relaxed); loop { - if exit.load(Ordering::Relaxed) { + if cancel.is_cancelled() { return; } let elapsed = batch_start_time.elapsed(); @@ -932,7 +958,7 @@ fn packet_batch_sender( // The downstream channel is disconnected, this error is not recoverable. if matches!(e, TrySendError::Disconnected(_)) { - exit.store(true, Ordering::Relaxed); + cancel.cancel(); return; } } else { @@ -1046,11 +1072,10 @@ async fn handle_connection( remote_addr: SocketAddr, last_update: Arc, connection_table: Arc>, - cancel: CancellationToken, params: NewConnectionHandlerParams, - wait_for_chunk_timeout: Duration, stream_load_ema: Arc, stream_counter: Arc, + cancel: CancellationToken, ) { let NewConnectionHandlerParams { packet_sender, @@ -1064,7 +1089,7 @@ async fn handle_connection( debug!( "quic new connection {} streams: {} connections: {}", remote_addr, - stats.total_streams.load(Ordering::Relaxed), + stats.active_streams.load(Ordering::Relaxed), stats.total_connections.load(Ordering::Relaxed), ); stats.total_connections.fetch_add(1, Ordering::Relaxed); @@ -1119,7 +1144,7 @@ async fn handle_connection( } stream_load_ema.increment_load(peer_type); stream_counter.stream_count.fetch_add(1, Ordering::Relaxed); - stats.total_streams.fetch_add(1, Ordering::Relaxed); + stats.active_streams.fetch_add(1, Ordering::Relaxed); stats.total_new_streams.fetch_add(1, Ordering::Relaxed); let mut meta = Meta::default(); @@ -1143,7 +1168,7 @@ async fn handle_connection( // packet loss or the peer stops sending for whatever reason. let n_chunks = match tokio::select! { chunk = tokio::time::timeout( - wait_for_chunk_timeout, + params.wait_for_chunk_timeout, stream.read_chunks(&mut chunks)) => chunk, // If the peer gets disconnected stop the task right away. @@ -1176,9 +1201,7 @@ async fn handle_connection( &packet_sender, &stats, peer_type, - ) - .await - { + ) { // The stream is finished, break out of the loop and close the stream. Ok(StreamState::Finished) => { last_update.store(timing::timestamp(), Ordering::Relaxed); @@ -1192,23 +1215,29 @@ async fn handle_connection( CONNECTION_CLOSE_CODE_INVALID_STREAM.into(), CONNECTION_CLOSE_REASON_INVALID_STREAM, ); - stats.total_streams.fetch_sub(1, Ordering::Relaxed); + stats.active_streams.fetch_sub(1, Ordering::Relaxed); stream_load_ema.update_ema_if_needed(); break 'conn; } } } - stats.total_streams.fetch_sub(1, Ordering::Relaxed); + stats.active_streams.fetch_sub(1, Ordering::Relaxed); stream_load_ema.update_ema_if_needed(); } let stable_id = connection.stable_id(); - let removed_connection_count = connection_table.lock().await.remove_connection( - ConnectionTableKey::new(remote_addr.ip(), remote_pubkey), - remote_addr.port(), - stable_id, - ); + let removed_connection_count = { + let mut connection_table = connection_table.lock().await; + let removed_connection_count = connection_table.remove_connection( + ConnectionTableKey::new(remote_addr.ip(), remote_pubkey), + remote_addr.port(), + stable_id, + ); + update_open_connections_stat(&stats, &connection_table); + removed_connection_count + }; + if removed_connection_count > 0 { stats .connection_removed @@ -1232,7 +1261,7 @@ enum StreamState { // packet sender. // // Returns Err(()) if the stream is invalid. -async fn handle_chunks( +fn handle_chunks( chunks: impl ExactSizeIterator, accum: &mut PacketAccumulator, packet_sender: &Sender, @@ -1401,18 +1430,31 @@ impl ConnectionTableKey { struct ConnectionTable { table: IndexMap>, total_size: usize, + is_staked: bool, + cancel: CancellationToken, } -// Prune the connection which has the oldest update -// Return number pruned +/// Prune the connection which has the oldest update +/// +/// Return number pruned impl ConnectionTable { - fn new() -> Self { + fn new(is_staked: bool, cancel: CancellationToken) -> Self { Self { table: IndexMap::default(), total_size: 0, + is_staked, + cancel, } } + fn table_size(&self) -> usize { + self.total_size + } + + fn is_staked(&self) -> bool { + self.is_staked + } + fn prune_oldest(&mut self, max_size: usize) -> usize { let mut num_pruned = 0; let key = |(_, connections): &(_, &Vec<_>)| { @@ -1478,7 +1520,7 @@ impl ConnectionTable { .map(|c| c <= max_connections_per_peer) .unwrap_or(false); if has_connection_capacity { - let cancel = CancellationToken::new(); + let cancel = self.cancel.child_token(); let last_update = Arc::new(AtomicU64::new(last_update)); let stream_counter = connection_entry .first() @@ -1686,12 +1728,12 @@ pub mod test { async fn test_quic_server_exit() { let SpawnTestServerResult { join_handle, - exit, receiver: _, server_address: _, stats: _, + cancel, } = setup_quic_server(None, QuicServerParams::default_for_tests()); - exit.store(true, Ordering::Relaxed); + cancel.cancel(); join_handle.await.unwrap(); } @@ -1700,14 +1742,14 @@ pub mod test { solana_logger::setup(); let SpawnTestServerResult { join_handle, - exit, receiver, server_address, stats: _, + cancel, } = setup_quic_server(None, QuicServerParams::default_for_tests()); check_timeout(receiver, server_address).await; - exit.store(true, Ordering::Relaxed); + cancel.cancel(); join_handle.await.unwrap(); } @@ -1716,18 +1758,18 @@ pub mod test { solana_logger::setup(); let (pkt_batch_sender, pkt_batch_receiver) = unbounded(); let (ptk_sender, pkt_receiver) = unbounded(); - let exit = Arc::new(AtomicBool::new(false)); + let cancel = CancellationToken::new(); let stats = Arc::new(StreamerStats::default()); - let handle = thread::spawn({ - let exit = exit.clone(); + let handle = task::spawn_blocking({ + let cancel = cancel.clone(); move || { - packet_batch_sender( + run_packet_batch_sender( pkt_batch_sender, pkt_receiver, - exit, stats, DEFAULT_TPU_COALESCE, + cancel, ); } }); @@ -1756,10 +1798,10 @@ pub mod test { } } assert_eq!(i, num_packets); - exit.store(true, Ordering::Relaxed); + cancel.cancel(); // Explicit drop to wake up packet_batch_sender drop(ptk_sender); - handle.join().unwrap(); + handle.await.unwrap(); } #[tokio::test(flavor = "multi_thread")] @@ -1767,14 +1809,14 @@ pub mod test { solana_logger::setup(); let SpawnTestServerResult { join_handle, - exit, receiver: _, server_address, stats, + cancel, } = setup_quic_server(None, QuicServerParams::default_for_tests()); let conn1 = make_client_endpoint(&server_address, None).await; - assert_eq!(stats.total_streams.load(Ordering::Relaxed), 0); + assert_eq!(stats.active_streams.load(Ordering::Relaxed), 0); assert_eq!(stats.total_stream_read_timeouts.load(Ordering::Relaxed), 0); // Send one byte to start the stream @@ -1786,14 +1828,14 @@ pub mod test { sleep(sleep_time).await; // Test that the stream was created, but timed out in read - assert_eq!(stats.total_streams.load(Ordering::Relaxed), 0); + assert_eq!(stats.active_streams.load(Ordering::Relaxed), 0); assert_ne!(stats.total_stream_read_timeouts.load(Ordering::Relaxed), 0); // Test that more writes to the stream will fail (i.e. the stream is no longer writable // after the timeouts) assert!(s1.write_all(&[0u8]).await.is_err()); - exit.store(true, Ordering::Relaxed); + cancel.cancel(); join_handle.await.unwrap(); } @@ -1802,13 +1844,13 @@ pub mod test { solana_logger::setup(); let SpawnTestServerResult { join_handle, - exit, receiver: _, server_address, stats: _, + cancel, } = setup_quic_server(None, QuicServerParams::default_for_tests()); check_block_multiple_connections(server_address).await; - exit.store(true, Ordering::Relaxed); + cancel.cancel(); join_handle.await.unwrap(); } @@ -1818,10 +1860,10 @@ pub mod test { let SpawnTestServerResult { join_handle, - exit, receiver: _, server_address, stats, + cancel, } = setup_quic_server( None, QuicServerParams { @@ -1884,7 +1926,7 @@ pub mod test { } assert!(start.elapsed().as_secs() < 1); - exit.store(true, Ordering::Relaxed); + cancel.cancel(); join_handle.await.unwrap(); } @@ -1893,13 +1935,13 @@ pub mod test { solana_logger::setup(); let SpawnTestServerResult { join_handle, - exit, receiver, server_address, stats: _, + cancel, } = setup_quic_server(None, QuicServerParams::default_for_tests()); check_multiple_writes(receiver, server_address, None).await; - exit.store(true, Ordering::Relaxed); + cancel.cancel(); join_handle.await.unwrap(); } @@ -1915,13 +1957,13 @@ pub mod test { ); let SpawnTestServerResult { join_handle, - exit, receiver, server_address, stats, + cancel, } = setup_quic_server(Some(staked_nodes), QuicServerParams::default_for_tests()); check_multiple_writes(receiver, server_address, Some(&client_keypair)).await; - exit.store(true, Ordering::Relaxed); + cancel.cancel(); join_handle.await.unwrap(); sleep(Duration::from_millis(100)).await; assert_eq!( @@ -1947,13 +1989,13 @@ pub mod test { ); let SpawnTestServerResult { join_handle, - exit, receiver, server_address, stats, + cancel, } = setup_quic_server(Some(staked_nodes), QuicServerParams::default_for_tests()); check_multiple_writes(receiver, server_address, Some(&client_keypair)).await; - exit.store(true, Ordering::Relaxed); + cancel.cancel(); join_handle.await.unwrap(); sleep(Duration::from_millis(100)).await; assert_eq!( @@ -1971,13 +2013,13 @@ pub mod test { solana_logger::setup(); let SpawnTestServerResult { join_handle, - exit, receiver, server_address, stats, + cancel, } = setup_quic_server(None, QuicServerParams::default_for_tests()); check_multiple_writes(receiver, server_address, None).await; - exit.store(true, Ordering::Relaxed); + cancel.cancel(); join_handle.await.unwrap(); sleep(Duration::from_millis(100)).await; assert_eq!( @@ -1994,32 +2036,32 @@ pub mod test { async fn test_quic_server_unstaked_node_connect_failure() { solana_logger::setup(); let s = bind_to_localhost_unique().expect("should bind"); - let exit = Arc::new(AtomicBool::new(false)); let (sender, _) = unbounded(); let keypair = Keypair::new(); let server_address = s.local_addr().unwrap(); let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); + let cancel = CancellationToken::new(); let SpawnNonBlockingServerResult { endpoints: _, stats: _, thread: t, max_concurrent_connections: _, - } = spawn_server( + } = spawn_server_with_cancel( "quic_streamer_test", - s, + [s], &keypair, sender, - exit.clone(), staked_nodes, QuicServerParams { max_unstaked_connections: 0, // Do not allow any connection from unstaked clients/nodes ..QuicServerParams::default_for_tests() }, + cancel.clone(), ) .unwrap(); check_unstaked_node_connect_failure(server_address).await; - exit.store(true, Ordering::Relaxed); + cancel.cancel(); t.await.unwrap(); } @@ -2027,37 +2069,41 @@ pub mod test { async fn test_quic_server_multiple_streams() { solana_logger::setup(); let s = bind_to_localhost_unique().expect("should bind"); - let exit = Arc::new(AtomicBool::new(false)); let (sender, receiver) = unbounded(); let keypair = Keypair::new(); let server_address = s.local_addr().unwrap(); let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); + let cancel = CancellationToken::new(); let SpawnNonBlockingServerResult { endpoints: _, stats, thread: t, max_concurrent_connections: _, - } = spawn_server( + } = spawn_server_with_cancel( "quic_streamer_test", - s, + [s], &keypair, sender, - exit.clone(), staked_nodes, QuicServerParams { max_connections_per_peer: 2, ..QuicServerParams::default_for_tests() }, + cancel.clone(), ) .unwrap(); check_multiple_streams(receiver, server_address, None).await; - assert_eq!(stats.total_streams.load(Ordering::Relaxed), 0); + assert_eq!(stats.active_streams.load(Ordering::Relaxed), 0); assert_eq!(stats.total_new_streams.load(Ordering::Relaxed), 20); assert_eq!(stats.total_connections.load(Ordering::Relaxed), 2); assert_eq!(stats.total_new_connections.load(Ordering::Relaxed), 2); - exit.store(true, Ordering::Relaxed); + cancel.cancel(); t.await.unwrap(); + // handle of the streamer doesn't wait for the child task to finish, so + // it is not deterministic if the tasks handling connections exit before + // the assertion below or after. + sleep(Duration::from_millis(100)).await; assert_eq!(stats.total_connections.load(Ordering::Relaxed), 0); assert_eq!(stats.total_new_connections.load(Ordering::Relaxed), 2); } @@ -2066,7 +2112,8 @@ pub mod test { fn test_prune_table_with_ip() { use std::net::Ipv4Addr; solana_logger::setup(); - let mut table = ConnectionTable::new(); + let cancel = CancellationToken::new(); + let mut table = ConnectionTable::new(false, cancel); let mut num_entries = 5; let max_connections_per_peer = 10; let sockets: Vec<_> = (0..num_entries) @@ -2119,7 +2166,8 @@ pub mod test { #[test] fn test_prune_table_with_unique_pubkeys() { solana_logger::setup(); - let mut table = ConnectionTable::new(); + let cancel = CancellationToken::new(); + let mut table = ConnectionTable::new(false, cancel); // We should be able to add more entries than max_connections_per_peer, since each entry is // from a different peer pubkey. @@ -2157,7 +2205,8 @@ pub mod test { #[test] fn test_prune_table_with_non_unique_pubkeys() { solana_logger::setup(); - let mut table = ConnectionTable::new(); + let cancel = CancellationToken::new(); + let mut table = ConnectionTable::new(false, cancel); let max_connections_per_peer = 10; let pubkey = Pubkey::new_unique(); @@ -2223,7 +2272,9 @@ pub mod test { fn test_prune_table_random() { use std::net::Ipv4Addr; solana_logger::setup(); - let mut table = ConnectionTable::new(); + let cancel = CancellationToken::new(); + let mut table = ConnectionTable::new(false, cancel); + let num_entries = 5; let max_connections_per_peer = 10; let sockets: Vec<_> = (0..num_entries) @@ -2265,7 +2316,9 @@ pub mod test { fn test_remove_connections() { use std::net::Ipv4Addr; solana_logger::setup(); - let mut table = ConnectionTable::new(); + let cancel = CancellationToken::new(); + let mut table = ConnectionTable::new(false, cancel); + let num_ips = 5; let max_connections_per_peer = 10; let mut sockets: Vec<_> = (0..num_ips) @@ -2394,10 +2447,10 @@ pub mod test { let SpawnTestServerResult { join_handle, - exit, receiver, server_address, stats, + cancel, } = setup_quic_server(None, QuicServerParams::default_for_tests()); let client_connection = make_client_endpoint(&server_address, None).await; @@ -2427,7 +2480,7 @@ pub mod test { assert_eq!(expected_num_txs, num_txs_received); // stop it - exit.store(true, Ordering::Relaxed); + cancel.cancel(); join_handle.await.unwrap(); assert_eq!( @@ -2455,7 +2508,7 @@ pub mod test { join_handle, server_address, stats, - exit, + cancel, .. } = setup_quic_server(None, QuicServerParams::default_for_tests()); @@ -2474,7 +2527,7 @@ pub mod test { _ => panic!("unexpected close"), } assert_eq!(stats.invalid_stream_size.load(Ordering::Relaxed), 1); - exit.store(true, Ordering::Relaxed); + cancel.cancel(); join_handle.await.unwrap(); } } diff --git a/streamer/src/nonblocking/testing_utilities.rs b/streamer/src/nonblocking/testing_utilities.rs index 2401b617c53a8a..5d130e2cada878 100644 --- a/streamer/src/nonblocking/testing_utilities.rs +++ b/streamer/src/nonblocking/testing_utilities.rs @@ -1,7 +1,8 @@ //! Contains utility functions to create server and client for test purposes. use { - super::quic::{spawn_server_multi, SpawnNonBlockingServerResult, ALPN_TPU_PROTOCOL_ID}, + super::quic::{SpawnNonBlockingServerResult, ALPN_TPU_PROTOCOL_ID}, crate::{ + nonblocking::quic::spawn_server_with_cancel, quic::{QuicServerParams, StreamerStats}, streamer::StakedNodes, }, @@ -20,10 +21,11 @@ use { solana_tls_utils::{new_dummy_x509_certificate, tls_client_config_builder}, std::{ net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket}, - sync::{atomic::AtomicBool, Arc, RwLock}, + sync::{Arc, RwLock}, time::{Duration, Instant}, }, tokio::{task::JoinHandle, time::sleep}, + tokio_util::sync::CancellationToken, }; pub fn get_client_config(keypair: &Keypair) -> ClientConfig { @@ -50,10 +52,10 @@ pub fn get_client_config(keypair: &Keypair) -> ClientConfig { pub struct SpawnTestServerResult { pub join_handle: JoinHandle<()>, - pub exit: Arc, pub receiver: crossbeam_channel::Receiver, pub server_address: SocketAddr, pub stats: Arc, + pub cancel: CancellationToken, } pub fn create_quic_server_sockets() -> Vec { @@ -86,33 +88,33 @@ pub fn setup_quic_server_with_sockets( option_staked_nodes: Option, quic_server_params: QuicServerParams, ) -> SpawnTestServerResult { - let exit = Arc::new(AtomicBool::new(false)); let (sender, receiver) = unbounded(); let keypair = Keypair::new(); let server_address = sockets[0].local_addr().unwrap(); let staked_nodes = Arc::new(RwLock::new(option_staked_nodes.unwrap_or_default())); + let cancel = CancellationToken::new(); let SpawnNonBlockingServerResult { endpoints: _, stats, thread: handle, max_concurrent_connections: _, - } = spawn_server_multi( + } = spawn_server_with_cancel( "quic_streamer_test", sockets, &keypair, sender, - exit.clone(), staked_nodes, quic_server_params, + cancel.clone(), ) .unwrap(); SpawnTestServerResult { join_handle: handle, - exit, receiver, server_address, stats, + cancel, } } diff --git a/streamer/src/packet.rs b/streamer/src/packet.rs index 69beb2eae42747..3d89a2cc845f35 100644 --- a/streamer/src/packet.rs +++ b/streamer/src/packet.rs @@ -61,14 +61,14 @@ pub(crate) fn recv_from( } } Err(e) => { - trace!("recv_from err {:?}", e); + trace!("recv_from err {e:?}"); return Err(e); } Ok(npkts) => { if i == 0 { socket.set_nonblocking(true)?; } - trace!("got {} packets", npkts); + trace!("got {npkts} packets"); i += npkts; // Try to batch into big enough buffers // will cause less re-shuffling later on. diff --git a/streamer/src/quic.rs b/streamer/src/quic.rs index 6f1156760f6ff4..7e1301aec81011 100644 --- a/streamer/src/quic.rs +++ b/streamer/src/quic.rs @@ -24,10 +24,11 @@ use { atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, Arc, Mutex, RwLock, }, - thread, + thread::{self}, time::Duration, }, tokio::runtime::Runtime, + tokio_util::sync::CancellationToken, }; // allow multiple connections for NAT and any open/close overlap @@ -115,7 +116,7 @@ pub(crate) fn configure_server( Ok((server_config, cert_chain_pem)) } -pub fn rt(name: String, num_threads: NonZeroUsize) -> Runtime { +fn rt(name: String, num_threads: NonZeroUsize) -> Runtime { tokio::runtime::Builder::new_multi_thread() .thread_name(name) .worker_threads(num_threads.get()) @@ -152,12 +153,11 @@ impl NotifyKeyUpdate for EndpointKeyUpdater { pub struct StreamerStats { pub(crate) total_connections: AtomicUsize, pub(crate) total_new_connections: AtomicUsize, - pub(crate) total_streams: AtomicUsize, + pub(crate) active_streams: AtomicUsize, pub(crate) total_new_streams: AtomicUsize, pub(crate) invalid_stream_size: AtomicUsize, pub(crate) total_packets_allocated: AtomicUsize, pub(crate) total_packet_batches_allocated: AtomicUsize, - pub(crate) total_chunks_received: AtomicUsize, pub(crate) total_staked_chunks_received: AtomicUsize, pub(crate) total_unstaked_chunks_received: AtomicUsize, pub(crate) total_packet_batch_send_err: AtomicUsize, @@ -211,6 +211,8 @@ pub struct StreamerStats { pub(crate) connection_rate_limiter_length: AtomicUsize, // All connections in various states such as Incoming, Connecting, Connection pub(crate) open_connections: AtomicUsize, + pub(crate) open_staked_connections: AtomicUsize, + pub(crate) open_unstaked_connections: AtomicUsize, pub(crate) refused_connections_too_many_open_connections: AtomicUsize, pub(crate) outstanding_incoming_connection_attempts: AtomicUsize, pub(crate) total_incoming_connection_attempts: AtomicUsize, @@ -235,7 +237,7 @@ impl StreamerStats { ), ( "active_streams", - self.total_streams.load(Ordering::Relaxed), + self.active_streams.load(Ordering::Relaxed), i64 ), ( @@ -424,11 +426,6 @@ impl StreamerStats { .swap(0, Ordering::Relaxed), i64 ), - ( - "chunks_received", - self.total_chunks_received.swap(0, Ordering::Relaxed), - i64 - ), ( "staked_chunks_received", self.total_staked_chunks_received.swap(0, Ordering::Relaxed), @@ -572,6 +569,16 @@ impl StreamerStats { self.open_connections.load(Ordering::Relaxed), i64 ), + ( + "open_staked_connections", + self.open_staked_connections.load(Ordering::Relaxed), + i64 + ), + ( + "open_unstaked_connections", + self.open_unstaked_connections.load(Ordering::Relaxed), + i64 + ), ( "refused_connections_too_many_open_connections", self.refused_connections_too_many_open_connections @@ -582,20 +589,22 @@ impl StreamerStats { } } -pub fn spawn_server( +#[deprecated(since = "3.0.0", note = "Use spawn_server_with_cancel instead")] +pub fn spawn_server_multi( thread_name: &'static str, metrics_name: &'static str, - socket: UdpSocket, + sockets: Vec, keypair: &Keypair, packet_sender: Sender, exit: Arc, staked_nodes: Arc>, quic_server_params: QuicServerParams, ) -> Result { - spawn_server_multi( + #[allow(deprecated)] + spawn_server( thread_name, metrics_name, - vec![socket], + sockets, keypair, packet_sender, exit, @@ -633,10 +642,11 @@ impl Default for QuicServerParams { } } -#[cfg(feature = "dev-context-only-utils")] impl QuicServerParams { + #[cfg(feature = "dev-context-only-utils")] pub const DEFAULT_NUM_SERVER_THREADS_FOR_TEST: NonZeroUsize = NonZeroUsize::new(8).unwrap(); + #[cfg(feature = "dev-context-only-utils")] pub fn default_for_tests() -> Self { // Shrink the channel size to avoid a massive allocation for tests Self { @@ -645,29 +655,69 @@ impl QuicServerParams { ..Self::default() } } + + pub(crate) fn max_concurrent_connections(&self) -> usize { + let conns = self.max_staked_connections + self.max_unstaked_connections; + conns + conns / 4 + } } -pub fn spawn_server_multi( +#[deprecated(since = "3.1.0", note = "Use spawn_server_with_cancel instead")] +pub fn spawn_server( thread_name: &'static str, metrics_name: &'static str, - sockets: Vec, + sockets: impl IntoIterator, keypair: &Keypair, packet_sender: Sender, exit: Arc, staked_nodes: Arc>, quic_server_params: QuicServerParams, +) -> Result { + let cancel = CancellationToken::new(); + thread::spawn({ + let cancel = cancel.clone(); + move || loop { + if exit.load(Ordering::Relaxed) { + cancel.cancel(); + break; + } + thread::sleep(Duration::from_millis(100)); + } + }); + spawn_server_with_cancel( + thread_name, + metrics_name, + sockets, + keypair, + packet_sender, + staked_nodes, + quic_server_params, + cancel, + ) +} + +/// Spawns a tokio runtime and a streamer instance inside it. +pub fn spawn_server_with_cancel( + thread_name: &'static str, + metrics_name: &'static str, + sockets: impl IntoIterator, + keypair: &Keypair, + packet_sender: Sender, + staked_nodes: Arc>, + quic_server_params: QuicServerParams, + cancel: CancellationToken, ) -> Result { let runtime = rt(format!("{thread_name}Rt"), quic_server_params.num_threads); let result = { let _guard = runtime.enter(); - crate::nonblocking::quic::spawn_server_multi( + crate::nonblocking::quic::spawn_server_with_cancel( metrics_name, sockets, keypair, packet_sender, - exit, staked_nodes, quic_server_params, + cancel, ) }?; let handle = thread::Builder::new() @@ -707,59 +757,59 @@ mod test { fn setup_quic_server() -> ( std::thread::JoinHandle<()>, - Arc, crossbeam_channel::Receiver, SocketAddr, + CancellationToken, ) { let s = bind_to_localhost_unique().expect("should bind"); - let exit = Arc::new(AtomicBool::new(false)); let (sender, receiver) = unbounded(); let keypair = Keypair::new(); let server_address = s.local_addr().unwrap(); let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); + let cancel = CancellationToken::new(); let SpawnServerResult { endpoints: _, thread: t, key_updater: _, - } = spawn_server( + } = spawn_server_with_cancel( "solQuicTest", "quic_streamer_test", - s, + [s], &keypair, sender, - exit.clone(), staked_nodes, QuicServerParams::default_for_tests(), + cancel.clone(), ) .unwrap(); - (t, exit, receiver, server_address) + (t, receiver, server_address, cancel) } #[test] fn test_quic_server_exit() { - let (t, exit, _receiver, _server_address) = setup_quic_server(); - exit.store(true, Ordering::Relaxed); + let (t, _receiver, _server_address, cancel) = setup_quic_server(); + cancel.cancel(); t.join().unwrap(); } #[test] fn test_quic_timeout() { solana_logger::setup(); - let (t, exit, receiver, server_address) = setup_quic_server(); + let (t, receiver, server_address, cancel) = setup_quic_server(); let runtime = rt_for_test(); runtime.block_on(check_timeout(receiver, server_address)); - exit.store(true, Ordering::Relaxed); + cancel.cancel(); t.join().unwrap(); } #[test] fn test_quic_server_block_multiple_connections() { solana_logger::setup(); - let (t, exit, _receiver, server_address) = setup_quic_server(); + let (t, _receiver, server_address, cancel) = setup_quic_server(); let runtime = rt_for_test(); runtime.block_on(check_block_multiple_connections(server_address)); - exit.store(true, Ordering::Relaxed); + cancel.cancel(); t.join().unwrap(); } @@ -767,44 +817,44 @@ mod test { fn test_quic_server_multiple_streams() { solana_logger::setup(); let s = bind_to_localhost_unique().expect("should bind"); - let exit = Arc::new(AtomicBool::new(false)); let (sender, receiver) = unbounded(); let keypair = Keypair::new(); let server_address = s.local_addr().unwrap(); let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); + let cancel = CancellationToken::new(); let SpawnServerResult { endpoints: _, thread: t, key_updater: _, - } = spawn_server( + } = spawn_server_with_cancel( "solQuicTest", "quic_streamer_test", - s, + [s], &keypair, sender, - exit.clone(), staked_nodes, QuicServerParams { max_connections_per_peer: 2, ..QuicServerParams::default_for_tests() }, + cancel.clone(), ) .unwrap(); let runtime = rt_for_test(); runtime.block_on(check_multiple_streams(receiver, server_address, None)); - exit.store(true, Ordering::Relaxed); + cancel.cancel(); t.join().unwrap(); } #[test] fn test_quic_server_multiple_writes() { solana_logger::setup(); - let (t, exit, receiver, server_address) = setup_quic_server(); + let (t, receiver, server_address, cancel) = setup_quic_server(); let runtime = rt_for_test(); runtime.block_on(check_multiple_writes(receiver, server_address, None)); - exit.store(true, Ordering::Relaxed); + cancel.cancel(); t.join().unwrap(); } @@ -812,33 +862,33 @@ mod test { fn test_quic_server_unstaked_node_connect_failure() { solana_logger::setup(); let s = bind_to_localhost_unique().expect("should bind"); - let exit = Arc::new(AtomicBool::new(false)); let (sender, _) = unbounded(); let keypair = Keypair::new(); let server_address = s.local_addr().unwrap(); let staked_nodes = Arc::new(RwLock::new(StakedNodes::default())); + let cancel = CancellationToken::new(); let SpawnServerResult { endpoints: _, thread: t, key_updater: _, - } = spawn_server( + } = spawn_server_with_cancel( "solQuicTest", "quic_streamer_test", - s, + [s], &keypair, sender, - exit.clone(), staked_nodes, QuicServerParams { max_unstaked_connections: 0, ..QuicServerParams::default_for_tests() }, + cancel.clone(), ) .unwrap(); let runtime = rt_for_test(); runtime.block_on(check_unstaked_node_connect_failure(server_address)); - exit.store(true, Ordering::Relaxed); + cancel.cancel(); t.join().unwrap(); } } diff --git a/streamer/src/recvmmsg.rs b/streamer/src/recvmmsg.rs index 2a48aa06cc2ccc..f8ee20114ec1cd 100644 --- a/streamer/src/recvmmsg.rs +++ b/streamer/src/recvmmsg.rs @@ -183,7 +183,7 @@ mod tests { use { crate::{packet::PACKET_DATA_SIZE, recvmmsg::*}, solana_net_utils::sockets::{ - bind_in_range_with_config, localhost_port_range_for_tests, + bind_in_range_with_config, localhost_port_range_for_tests, unique_port_range_for_tests, SocketConfiguration as SocketConfig, }, std::{ @@ -195,10 +195,20 @@ mod tests { type TestConfig = (UdpSocket, SocketAddr, UdpSocket, SocketAddr); fn test_setup_reader_sender(ip: IpAddr) -> io::Result { - let port_range = localhost_port_range_for_tests(); - let reader = bind_in_range_with_config(ip, port_range, SocketConfig::default())?.1; + let port_range = unique_port_range_for_tests(2); + let reader = bind_in_range_with_config( + ip, + (port_range.start, port_range.end), + SocketConfig::default(), + )? + .1; let reader_addr = reader.local_addr()?; - let sender = bind_in_range_with_config(ip, port_range, SocketConfig::default())?.1; + let sender = bind_in_range_with_config( + ip, + (port_range.start, port_range.end), + SocketConfig::default(), + )? + .1; let sender_addr = sender.local_addr()?; Ok((reader, reader_addr, sender, sender_addr)) } diff --git a/streamer/src/streamer.rs b/streamer/src/streamer.rs index 1b740901c45457..00b3d71ce95a90 100644 --- a/streamer/src/streamer.rs +++ b/streamer/src/streamer.rs @@ -3,10 +3,6 @@ use { crate::{ - atomic_udp_socket::{ - AtomicSocketProvider, AtomicUdpSocket, CurrentSocket, FixedSocketProvider, - SocketProvider, - }, packet::{ self, PacketBatch, PacketBatchRecycler, PacketRef, PinnedPacketBatch, PACKETS_PER_BATCH, }, @@ -16,6 +12,9 @@ use { crossbeam_channel::{Receiver, RecvTimeoutError, SendError, Sender, TrySendError}, histogram::Histogram, itertools::Itertools, + solana_net_utils::multihomed_sockets::{ + BindIpAddrs, CurrentSocket, FixedSocketProvider, MultihomedSocketProvider, SocketProvider, + }, solana_packet::Packet, solana_pubkey::Pubkey, solana_time_utils::timestamp, @@ -182,7 +181,7 @@ fn recv_loop( } let mut socket = provider.current_socket_ref(); - setup_socket(&socket)?; + setup_socket(socket)?; #[cfg(unix)] let mut poll_fd = [PollFd::new(socket.as_fd(), PollFlags::POLLIN)]; @@ -209,9 +208,9 @@ fn recv_loop( } #[cfg(unix)] - let result = packet::recv_from(&mut packet_batch, &socket, coalesce, &mut poll_fd); + let result = packet::recv_from(&mut packet_batch, socket, coalesce, &mut poll_fd); #[cfg(not(unix))] - let result = packet::recv_from(&mut packet_batch, &socket, coalesce); + let result = packet::recv_from(&mut packet_batch, socket, coalesce); if let Ok(len) = result { if len > 0 { @@ -248,7 +247,7 @@ fn recv_loop( if let CurrentSocket::Changed(s) = provider.current_socket() { socket = s; - setup_socket(&socket)?; + setup_socket(socket)?; #[cfg(unix)] { @@ -293,7 +292,8 @@ pub fn receiver( #[allow(clippy::too_many_arguments)] pub fn receiver_atomic( thread_name: String, - socket: Arc, + sockets: Arc<[UdpSocket]>, + bind_ip_addrs: Arc, exit: Arc, packet_batch_sender: impl ChannelSend, recycler: PacketBatchRecycler, @@ -306,7 +306,7 @@ pub fn receiver_atomic( Builder::new() .name(thread_name) .spawn(move || { - let mut provider = AtomicSocketProvider::new(socket); + let mut provider = MultihomedSocketProvider::new(sockets, bind_ip_addrs); let _ = recv_loop( &mut provider, &exit, @@ -536,7 +536,8 @@ pub fn recv_packet_batches( pub fn responder_atomic( name: &'static str, - sock: Arc, + sockets: Arc<[UdpSocket]>, + bind_ip_addrs: Arc, r: PacketBatchReceiver, socket_addr_space: SocketAddrSpace, stats_reporter_sender: Option>>, @@ -545,7 +546,7 @@ pub fn responder_atomic( .name(format!("solRspndr{name}")) .spawn(move || { responder_loop( - AtomicSocketProvider::new(sock), + MultihomedSocketProvider::new(sockets, bind_ip_addrs), name, r, socket_addr_space, @@ -594,7 +595,7 @@ fn responder_loop( loop { let sock = provider.current_socket_ref(); - if let Err(e) = recv_send(&sock, &r, &socket_addr_space, &mut stats) { + if let Err(e) = recv_send(sock, &r, &socket_addr_space, &mut stats) { match e { StreamerError::RecvTimeout(RecvTimeoutError::Disconnected) => break, StreamerError::RecvTimeout(RecvTimeoutError::Timeout) => (), diff --git a/svm-callback/Cargo.toml b/svm-callback/Cargo.toml index f2a5b68f59aae1..c24bd164467c3e 100644 --- a/svm-callback/Cargo.toml +++ b/svm-callback/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "solana-svm-callback" -version = "3.0.0" description = "Solana SVM callback" +version = { workspace = true } authors = { workspace = true } repository = { workspace = true } homepage = { workspace = true } @@ -11,6 +11,7 @@ readme = false [dependencies] solana-account = { workspace = true } +solana-clock = { workspace = true } solana-precompile-error = { workspace = true } solana-pubkey = { workspace = true } diff --git a/svm-callback/src/lib.rs b/svm-callback/src/lib.rs index 016fff9beca25c..57c7afa2df5a4c 100644 --- a/svm-callback/src/lib.rs +++ b/svm-callback/src/lib.rs @@ -1,6 +1,6 @@ use { - solana_account::AccountSharedData, solana_precompile_error::PrecompileError, - solana_pubkey::Pubkey, + solana_account::AccountSharedData, solana_clock::Slot, + solana_precompile_error::PrecompileError, solana_pubkey::Pubkey, }; /// Callback used by InvokeContext in SVM @@ -33,23 +33,10 @@ pub trait InvokeContextCallback { /// Runtime callbacks for transaction processing. pub trait TransactionProcessingCallback: InvokeContextCallback { - fn account_matches_owners(&self, account: &Pubkey, owners: &[Pubkey]) -> Option; - - fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option; - - fn add_builtin_account(&self, _name: &str, _program_id: &Pubkey) {} + fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option<(AccountSharedData, Slot)>; fn inspect_account(&self, _address: &Pubkey, _account_state: AccountState, _is_writable: bool) { } - - #[deprecated( - since = "2.3.0", - note = "Use `get_epoch_stake_for_vote_account` on the `InvokeContextCallback` trait \ - instead" - )] - fn get_current_epoch_vote_account_stake(&self, vote_address: &Pubkey) -> u64 { - Self::get_epoch_stake_for_vote_account(self, vote_address) - } } /// The state the account is in initially, before transaction processing diff --git a/svm-conformance/Cargo.toml b/svm-conformance/Cargo.toml deleted file mode 100644 index 78a2a3d8d870ea..00000000000000 --- a/svm-conformance/Cargo.toml +++ /dev/null @@ -1,20 +0,0 @@ -[package] -name = "solana-svm-conformance" -description = "Solana SVM conformance" -documentation = "https://docs.rs/solana-svm-conformance" -version = { workspace = true } -authors = { workspace = true } -repository = { workspace = true } -homepage = { workspace = true } -license = { workspace = true } -edition = { workspace = true } - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -prost = { workspace = true } -prost-types = { workspace = true } - -[build-dependencies] -prost-build = { workspace = true } diff --git a/svm-conformance/build.rs b/svm-conformance/build.rs deleted file mode 100644 index 09f10dfc4cd342..00000000000000 --- a/svm-conformance/build.rs +++ /dev/null @@ -1,20 +0,0 @@ -fn main() { - let proto_base_path = std::path::PathBuf::from("proto"); - let protos = [ - "context.proto", - "invoke.proto", - "metadata.proto", - "txn.proto", - ]; - let protos_path: Vec<_> = protos - .iter() - .map(|name| proto_base_path.join(name)) - .collect(); - - protos_path - .iter() - .for_each(|proto| println!("cargo:rerun-if-changed={}", proto.display())); - - prost_build::compile_protos(protos_path.as_ref(), &[proto_base_path]) - .expect("Failed to compile protobuf files"); -} diff --git a/svm-conformance/proto/context.proto b/svm-conformance/proto/context.proto deleted file mode 100644 index d13568c6002924..00000000000000 --- a/svm-conformance/proto/context.proto +++ /dev/null @@ -1,61 +0,0 @@ -syntax = "proto3"; -package org.solana.sealevel.v1; - -// A set of feature flags. -message FeatureSet { - // Every item in this list marks an enabled feature. The value of - // each item is the first 8 bytes of the feature ID as a little- - // endian integer. - repeated fixed64 features = 1; -} - -// A seed address. This is not a PDA. -message SeedAddress { - // The seed address base. (32 bytes) - bytes base = 1; - - // The seed path (<= 32 bytes) - bytes seed = 2; - - // The seed address owner. (32 bytes) - bytes owner = 3; -} - -// The complete state of an account excluding its public key. -message AcctState { - // The account address. (32 bytes) - bytes address = 1; - - uint64 lamports = 2; - - // Account data is limited to 10 MiB on Solana mainnet as of 2024-Feb. - bytes data = 3; - - bool executable = 4; - - // The rent epoch is deprecated on Solana mainnet as of 2024-Feb. - // If ommitted, implies a value of UINT64_MAX. - uint64 rent_epoch = 5; - - // Address of the program that owns this account. (32 bytes) - bytes owner = 6; - - // The account address, but derived as a seed address. Overrides - // `address` if present. - // TODO: This is a solfuzz specific extension and is not compliant - // with the org.solana.sealevel.v1 API. - SeedAddress seed_addr = 7; -} - -// EpochContext includes context scoped to an epoch. -// On "real" ledgers, it is created during the epoch boundary. -message EpochContext { - FeatureSet features = 1; -} - -// SlotContext includes context scoped to a block. -// On "real" ledgers, it is created during the slot boundary. -message SlotContext { - // Slot number - fixed64 slot = 1; -} diff --git a/svm-conformance/proto/invoke.proto b/svm-conformance/proto/invoke.proto deleted file mode 100644 index 1ea2f383ff3a02..00000000000000 --- a/svm-conformance/proto/invoke.proto +++ /dev/null @@ -1,65 +0,0 @@ -syntax = "proto3"; -package org.solana.sealevel.v1; - -import "context.proto"; -import "metadata.proto"; - -message InstrAcct { - // Selects an account in an external list - uint32 index = 1; - bool is_writable = 2; - bool is_signer = 3; -} - -// The execution context of a program invocation (aka instruction). -// Contains all required information to independently replay an instruction. -// Also includes partial transaction context. -message InstrContext { - // The address of the program invoked. (32 bytes) - bytes program_id = 1; - - // Account state accessed by the instruction. This may include - // indirect accesses like sysvars. - repeated AcctState accounts = 3; - - // Account access list for this instruction (refers to above accounts list) - repeated InstrAcct instr_accounts = 4; - - // The input data passed to program execution. - bytes data = 5; - - uint64 cu_avail = 6; - - SlotContext slot_context = 8; - EpochContext epoch_context = 9; -} - -// The results of executing an InstrContext. -message InstrEffects { - // result is zero if the instruction executed succesfully. - // Otherwise, a non-zero error code. Error codes are not relevant to - // consensus. - int32 result = 1; - - // Some error cases additionally have a custom error code. Unlike - // the expected_result, this is stable across clients. - uint32 custom_err = 2; - - // Copies of accounts that were changed. May be in an arbitrary - // order. The pubkey of each account is unique in this list. Each - // account address modified here must also be in the - // InstrContext. - repeated AcctState modified_accounts = 3; - - uint64 cu_avail = 4; - - // Instruction return data. - bytes return_data = 5; -} - -// An instruction processing test fixture. -message InstrFixture { - FixtureMetadata metadata = 1; - InstrContext input = 2; - InstrEffects output = 3; -} diff --git a/svm-conformance/proto/metadata.proto b/svm-conformance/proto/metadata.proto deleted file mode 100644 index e790c0463d0974..00000000000000 --- a/svm-conformance/proto/metadata.proto +++ /dev/null @@ -1,7 +0,0 @@ -syntax = "proto3"; -package org.solana.sealevel.v1; - -// FixtureMetadata includes the metadata for the fixture -message FixtureMetadata { - string fn_entrypoint = 1; -} diff --git a/svm-conformance/proto/txn.proto b/svm-conformance/proto/txn.proto deleted file mode 100644 index 811f32dfc9ee89..00000000000000 --- a/svm-conformance/proto/txn.proto +++ /dev/null @@ -1,134 +0,0 @@ -syntax = "proto3"; -package org.solana.sealevel.v1; - -import "context.proto"; -import "metadata.proto"; - -// Message header contains the counts of required readonly and signatures -message MessageHeader { - uint32 num_required_signatures = 1; - uint32 num_readonly_signed_accounts = 2; - uint32 num_readonly_unsigned_accounts = 3; -} - -// The instruction a transaction executes -message CompiledInstruction { - // Index into the message pubkey array - uint32 program_id_index = 1; - // Indexes into the message pubkey array - repeated uint32 accounts = 2; - bytes data = 3; -} - -// List of address table lookups used to load additional accounts for a transaction -message MessageAddressTableLookup { - bytes account_key = 1; - repeated uint32 writable_indexes = 2; - repeated uint32 readonly_indexes = 3; -} - -// Message contains the transaction data -message TransactionMessage { - // Whether this is a legacy message or not - bool is_legacy = 1; - - MessageHeader header = 2; - - // Vector of pubkeys - repeated bytes account_keys = 3; - - // Data associated with the accounts referred above. Not all accounts need to be here. - repeated AcctState account_shared_data = 4; - - // Recent blockhash provided in message - bytes recent_blockhash = 5; - - // The instructions this transaction executes - repeated CompiledInstruction instructions = 6; - - // Not available in legacy message - repeated MessageAddressTableLookup address_table_lookups = 7; -} - -// A valid verified transaction -message SanitizedTransaction { - // The transaction information - TransactionMessage message = 1; - // The message hash - bytes message_hash = 2; - // Is this a voting transaction? - bool is_simple_vote_tx = 3; - // The signatures needed in the transaction - repeated bytes signatures = 4; -} - -// This Transaction context be used to fuzz either `load_execute_and_commit_transactions`, -// `load_and_execute_transactions` in `bank.rs` or `load_and_execute_sanitized_transactions` -// in `svm/transaction_processor.rs` -message TxnContext { - // The transaction data - SanitizedTransaction tx = 1; - - // Up to 300 (actually 301) most recent blockhashes (ordered from oldest to newest) - repeated bytes blockhash_queue = 3; - - EpochContext epoch_ctx = 4; - SlotContext slot_ctx = 5; -} - -// The resulting state of an account after a transaction -message ResultingState { - repeated AcctState acct_states = 1; - repeated RentDebits rent_debits = 2; - uint64 transaction_rent = 3; -} - -// The rent state for an account after a transaction -message RentDebits { - bytes pubkey = 1; - int64 rent_collected = 2; -} - -message FeeDetails { - uint64 transaction_fee = 1; - uint64 prioritization_fee = 2; -} - -// The execution results for a transaction -message TxnResult { - // Whether this transaction was executed - bool executed = 1; - // Whether there was a sanitization error - bool sanitization_error = 2; - // The state of each account after the transaction - ResultingState resulting_state = 3; - uint64 rent = 4; - - // If an executed transaction has no error - bool is_ok = 5; - // The transaction status (error code) - uint32 status = 6; - // The instruction error, if any - uint32 instruction_error = 7; - // The instruction error index, if any - uint32 instruction_error_index = 8; - // Custom error, if any - uint32 custom_error = 9; - - - // The return data from this transaction, if any - bytes return_data = 10; - // Number of executed compute units - uint64 executed_units = 11; - // The collected fees in this transaction - FeeDetails fee_details = 12; -} - -// Txn fixtures -message TxnFixture { - FixtureMetadata metadata = 1; - // Context - TxnContext input = 2; - // Effects - TxnResult output = 3; -} diff --git a/svm-conformance/src/lib.rs b/svm-conformance/src/lib.rs deleted file mode 100644 index 937880c131d7ce..00000000000000 --- a/svm-conformance/src/lib.rs +++ /dev/null @@ -1,3 +0,0 @@ -pub mod proto { - include!(concat!(env!("OUT_DIR"), "/org.solana.sealevel.v1.rs")); -} diff --git a/svm-feature-set/Cargo.toml b/svm-feature-set/Cargo.toml index 6dbfb4b68d869b..c55d661d51a323 100644 --- a/svm-feature-set/Cargo.toml +++ b/svm-feature-set/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "solana-svm-feature-set" -version = "3.0.0" description = "Solana SVM Feature Set" +version = { workspace = true } authors = { workspace = true } repository = { workspace = true } homepage = { workspace = true } diff --git a/svm-feature-set/src/lib.rs b/svm-feature-set/src/lib.rs index d2046761869e83..e66cd3bdc43843 100644 --- a/svm-feature-set/src/lib.rs +++ b/svm-feature-set/src/lib.rs @@ -1,8 +1,8 @@ #[derive(Clone, Copy, Default)] pub struct SVMFeatureSet { pub move_precompile_verification_to_svm: bool, - pub remove_accounts_executable_flag_checks: bool, pub stricter_abi_and_runtime_constraints: bool, + pub account_data_direct_mapping: bool, pub enable_bpf_loader_set_authority_checked_ix: bool, pub enable_loader_v4: bool, pub deplete_cu_meter_on_vm_failure: bool, @@ -31,21 +31,21 @@ pub struct SVMFeatureSet { pub mask_out_rent_epoch_in_vm_serialization: bool, pub simplify_alt_bn128_syscall_error_codes: bool, pub fix_alt_bn128_multiplication_input_length: bool, - pub loosen_cpi_size_restriction: bool, pub increase_tx_account_lock_limit: bool, pub enable_extend_program_checked: bool, pub formalize_loaded_transaction_data_size: bool, pub disable_zk_elgamal_proof_program: bool, pub reenable_zk_elgamal_proof_program: bool, pub raise_cpi_nesting_limit_to_8: bool, + pub provide_instruction_data_offset_in_vm_r2: bool, } impl SVMFeatureSet { pub fn all_enabled() -> Self { Self { move_precompile_verification_to_svm: true, - remove_accounts_executable_flag_checks: true, stricter_abi_and_runtime_constraints: true, + account_data_direct_mapping: true, enable_bpf_loader_set_authority_checked_ix: true, enable_loader_v4: true, deplete_cu_meter_on_vm_failure: true, @@ -74,13 +74,13 @@ impl SVMFeatureSet { mask_out_rent_epoch_in_vm_serialization: true, simplify_alt_bn128_syscall_error_codes: true, fix_alt_bn128_multiplication_input_length: true, - loosen_cpi_size_restriction: true, increase_tx_account_lock_limit: true, enable_extend_program_checked: true, formalize_loaded_transaction_data_size: true, disable_zk_elgamal_proof_program: true, reenable_zk_elgamal_proof_program: true, raise_cpi_nesting_limit_to_8: true, + provide_instruction_data_offset_in_vm_r2: true, } } } diff --git a/log-collector/Cargo.toml b/svm-log-collector/Cargo.toml similarity index 78% rename from log-collector/Cargo.toml rename to svm-log-collector/Cargo.toml index 693d5fd6b4812f..919bb386599f9b 100644 --- a/log-collector/Cargo.toml +++ b/svm-log-collector/Cargo.toml @@ -1,7 +1,7 @@ [package] -name = "solana-log-collector" +name = "solana-svm-log-collector" description = "Solana log collector" -documentation = "https://docs.rs/solana-log-collector" +documentation = "https://docs.rs/solana-svm-log-collector" version = { workspace = true } authors = { workspace = true } repository = { workspace = true } diff --git a/log-collector/src/lib.rs b/svm-log-collector/src/lib.rs similarity index 100% rename from log-collector/src/lib.rs rename to svm-log-collector/src/lib.rs diff --git a/upload-perf/Cargo.toml b/svm-measure/Cargo.toml similarity index 53% rename from upload-perf/Cargo.toml rename to svm-measure/Cargo.toml index 544acc55c76f20..1d9df9bb4f67f5 100644 --- a/upload-perf/Cargo.toml +++ b/svm-measure/Cargo.toml @@ -1,7 +1,8 @@ [package] -name = "solana-upload-perf" -description = "Metrics Upload Utility" -publish = false +name = "solana-svm-measure" +description = "Timing measurement utilities for SVM" +documentation = "https://docs.rs/solana-svm-measure" +readme = "../README.md" version = { workspace = true } authors = { workspace = true } repository = { workspace = true } @@ -11,11 +12,3 @@ edition = { workspace = true } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] - -[[bin]] -name = "solana-upload-perf" -path = "src/upload-perf.rs" - -[dependencies] -serde_json = { workspace = true } -solana-metrics = { workspace = true } diff --git a/svm-measure/src/lib.rs b/svm-measure/src/lib.rs new file mode 100644 index 00000000000000..a1c86feb89a43f --- /dev/null +++ b/svm-measure/src/lib.rs @@ -0,0 +1,3 @@ +#![allow(clippy::arithmetic_side_effects)] +pub mod macros; +pub mod measure; diff --git a/svm-measure/src/macros.rs b/svm-measure/src/macros.rs new file mode 100644 index 00000000000000..0c8f425d17526d --- /dev/null +++ b/svm-measure/src/macros.rs @@ -0,0 +1,238 @@ +/// Measure this expression +/// +/// Use `measure_time!()` when you have an expression that you want to measure. `measure_time!()` will start +/// a new [`Measure`], evaluate your expression, stop the [`Measure`], and then return the +/// [`Measure`] object along with your expression's return value. +/// +/// Use `measure_us!()` when you want to measure an expression in microseconds. +/// +/// Use `meas_dur!()` when you want to measure an expression and get the Duration. +/// +/// [`Measure`]: crate::measure::Measure +/// +/// # Examples +/// +/// ``` +/// // Measure functions +/// # use solana_svm_measure::{measure_time, measure_us, meas_dur}; +/// # fn foo() {} +/// # fn bar(x: i32) {} +/// # fn add(x: i32, y: i32) -> i32 {x + y} +/// let (result, measure) = measure_time!(foo(), "foo takes no parameters"); +/// let (result, measure) = measure_time!(bar(42), "bar takes one parameter"); +/// let (result, measure) = measure_time!(add(1, 2), "add takes two parameters and returns a value"); +/// let (result, measure_us) = measure_us!(add(1, 2)); +/// let (result, duration) = meas_dur!(add(1, 2)); +/// # assert_eq!(result, 1 + 2); +/// ``` +/// +/// ``` +/// // Measure methods +/// # use solana_svm_measure::{measure_time, measure_us, meas_dur}; +/// # struct Foo { +/// # f: i32, +/// # } +/// # impl Foo { +/// # fn frobnicate(&self, bar: i32) -> i32 { +/// # self.f * bar +/// # } +/// # } +/// let foo = Foo { f: 42 }; +/// let (result, measure) = measure_time!(foo.frobnicate(2), "measure methods"); +/// let (result, measure_us) = measure_us!(foo.frobnicate(2)); +/// let (result, duration) = meas_dur!(foo.frobnicate(2)); +/// # assert_eq!(result, 42 * 2); +/// ``` +/// +/// ``` +/// // Measure expression blocks +/// # use solana_svm_measure::measure_time; +/// # fn complex_calculation() -> i32 { 42 } +/// # fn complex_transform(x: i32) -> i32 { x + 3 } +/// # fn record_result(y: i32) {} +/// let (result, measure) = measure_time!( +/// { +/// let x = complex_calculation(); +/// # assert_eq!(x, 42); +/// let y = complex_transform(x); +/// # assert_eq!(y, 42 + 3); +/// record_result(y); +/// y +/// }, +/// "measure a block of many operations", +/// ); +/// # assert_eq!(result, 42 + 3); +/// ``` +/// +/// ``` +/// // The `name` parameter is optional +/// # use solana_svm_measure::{measure_time, measure_us}; +/// # fn meow() {}; +/// let (result, measure) = measure_time!(meow()); +/// let (result, measure_us) = measure_us!(meow()); +/// ``` +#[macro_export] +macro_rules! measure_time { + ($val:expr, $name:tt $(,)?) => {{ + let mut measure = $crate::measure::Measure::start($name); + let result = $val; + measure.stop(); + (result, measure) + }}; + ($val:expr) => { + measure_time!($val, "") + }; +} + +#[macro_export] +macro_rules! measure_us { + ($expr:expr) => {{ + let (result, duration) = $crate::meas_dur!($expr); + (result, duration.as_micros() as u64) + }}; +} + +/// Measures how long it takes to execute an expression, and returns a Duration +/// +/// # Examples +/// +/// ``` +/// # use solana_svm_measure::meas_dur; +/// # fn meow(x: i32, y: i32) -> i32 {x + y} +/// let (result, duration) = meas_dur!(meow(1, 2) + 3); +/// # assert_eq!(result, 1 + 2 + 3); +/// ``` +// +// The macro name, `meas_dur`, is "measure" + "duration". +// When said aloud, the pronunciation is close to "measure". +#[macro_export] +macro_rules! meas_dur { + ($expr:expr) => {{ + let start = std::time::Instant::now(); + let result = $expr; + (result, start.elapsed()) + }}; +} + +#[cfg(test)] +mod tests { + use std::{thread::sleep, time::Duration}; + + fn my_multiply(x: i32, y: i32) -> i32 { + x * y + } + + fn square(x: i32) -> i32 { + my_multiply(x, x) + } + + struct SomeStruct { + x: i32, + } + impl SomeStruct { + fn add_to(&self, x: i32) -> i32 { + x + self.x + } + } + + #[test] + fn test_measure_macro() { + // Ensure that the measurement side actually works + { + let (_result, measure) = measure_time!(sleep(Duration::from_millis(1)), "test"); + assert!(measure.as_s() > 0.0); + assert!(measure.as_ms() > 0); + assert!(measure.as_us() > 0); + } + + // Ensure that the macro can be called with functions + { + let (result, _measure) = measure_time!(my_multiply(3, 4), "test"); + assert_eq!(result, 3 * 4); + + let (result, _measure) = measure_time!(square(5), "test"); + assert_eq!(result, 5 * 5) + } + + // Ensure that the macro can be called with methods + { + let some_struct = SomeStruct { x: 42 }; + let (result, _measure) = measure_time!(some_struct.add_to(4), "test"); + assert_eq!(result, 42 + 4); + } + + // Ensure that the macro can be called with blocks + { + let (result, _measure) = measure_time!({ 1 + 2 }, "test"); + assert_eq!(result, 3); + } + + // Ensure that the macro can be called with a trailing comma + { + let (result, _measure) = measure_time!(square(5), "test",); + assert_eq!(result, 5 * 5) + } + + // Ensure that the macro can be called without a name + { + let (result, _measure) = measure_time!(square(5)); + assert_eq!(result, 5 * 5) + } + } + + #[test] + fn test_measure_us_macro() { + // Ensure that the measurement side actually works + { + let (_result, measure) = measure_us!(sleep(Duration::from_millis(1))); + assert!(measure > 0); + } + + // Ensure that the macro can be called with functions + { + let (result, _measure) = measure_us!(my_multiply(3, 4)); + assert_eq!(result, 3 * 4); + + let (result, _measure) = measure_us!(square(5)); + assert_eq!(result, 5 * 5) + } + + // Ensure that the macro can be called with methods + { + let some_struct = SomeStruct { x: 42 }; + let (result, _measure) = measure_us!(some_struct.add_to(4)); + assert_eq!(result, 42 + 4); + } + + // Ensure that the macro can be called with blocks + { + let (result, _measure) = measure_us!({ 1 + 2 }); + assert_eq!(result, 3); + } + } + + #[test] + fn test_meas_dur_macro() { + // Ensure that the macro can be called with functions + { + let (result, _duration) = meas_dur!(my_multiply(3, 4)); + assert_eq!(result, 3 * 4); + + let (result, _duration) = meas_dur!(square(5)); + assert_eq!(result, 5 * 5) + } + + // Ensure that the macro can be called with methods + { + let some_struct = SomeStruct { x: 42 }; + let (result, _duration) = meas_dur!(some_struct.add_to(4)); + assert_eq!(result, 42 + 4); + } + + // Ensure that the macro can be called with blocks + { + let (result, _duration) = meas_dur!({ 1 + 2 }); + assert_eq!(result, 3); + } + } +} diff --git a/svm-measure/src/measure.rs b/svm-measure/src/measure.rs new file mode 100644 index 00000000000000..190abb30cb568e --- /dev/null +++ b/svm-measure/src/measure.rs @@ -0,0 +1,145 @@ +use std::{ + fmt, + time::{Duration, Instant}, +}; + +#[derive(Debug)] +pub struct Measure { + name: &'static str, + start: Instant, + duration: u64, +} + +impl Measure { + pub fn start(name: &'static str) -> Self { + Self { + name, + start: Instant::now(), + duration: 0, + } + } + + pub fn stop(&mut self) { + self.duration = self.start.elapsed().as_nanos() as u64; + } + + pub fn as_ns(&self) -> u64 { + self.duration + } + + pub fn as_us(&self) -> u64 { + self.duration / 1000 + } + + pub fn as_ms(&self) -> u64 { + self.duration / (1000 * 1000) + } + + pub fn as_s(&self) -> f32 { + self.duration as f32 / (1000.0f32 * 1000.0f32 * 1000.0f32) + } + + pub fn as_duration(&self) -> Duration { + Duration::from_nanos(self.as_ns()) + } + + pub fn end_as_ns(self) -> u64 { + self.start.elapsed().as_nanos() as u64 + } + + pub fn end_as_us(self) -> u64 { + self.start.elapsed().as_micros() as u64 + } + + pub fn end_as_ms(self) -> u64 { + self.start.elapsed().as_millis() as u64 + } + + pub fn end_as_s(self) -> f32 { + self.start.elapsed().as_secs_f32() + } + + pub fn end_as_duration(self) -> Duration { + self.start.elapsed() + } +} + +impl fmt::Display for Measure { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + if self.duration == 0 { + write!(f, "{} running", self.name) + } else if self.as_us() < 1 { + write!(f, "{} took {}ns", self.name, self.duration) + } else if self.as_ms() < 1 { + write!(f, "{} took {}us", self.name, self.as_us()) + } else if self.as_s() < 1. { + write!(f, "{} took {}ms", self.name, self.as_ms()) + } else { + write!(f, "{} took {:.1}s", self.name, self.as_s()) + } + } +} + +#[cfg(test)] +mod tests { + use {super::*, std::thread::sleep}; + + #[test] + fn test_measure() { + let test_duration = Duration::from_millis(100); + let mut measure = Measure::start("test"); + sleep(test_duration); + measure.stop(); + assert!(measure.as_duration() >= test_duration); + } + + #[test] + fn test_measure_as() { + let test_duration = Duration::from_millis(100); + let measure = Measure { + name: "test", + start: Instant::now(), + duration: test_duration.as_nanos() as u64, + }; + + assert!(f32::abs(measure.as_s() - 0.1f32) <= f32::EPSILON); + assert_eq!(measure.as_ms(), 100); + assert_eq!(measure.as_us(), 100_000); + assert_eq!(measure.as_ns(), 100_000_000); + assert_eq!(measure.as_duration(), test_duration); + } + + #[test] + fn test_measure_display() { + let measure = Measure { + name: "test_ns", + start: Instant::now(), + duration: 1, + }; + assert_eq!(format!("{measure}"), "test_ns took 1ns"); + + let measure = Measure { + name: "test_us", + start: Instant::now(), + duration: 1000, + }; + assert_eq!(format!("{measure}"), "test_us took 1us"); + + let measure = Measure { + name: "test_ms", + start: Instant::now(), + duration: 1000 * 1000, + }; + assert_eq!(format!("{measure}"), "test_ms took 1ms"); + + let measure = Measure { + name: "test_s", + start: Instant::now(), + duration: 1000 * 1000 * 1000, + }; + assert_eq!(format!("{measure}"), "test_s took 1.0s"); + + let measure = Measure::start("test_not_stopped"); + assert_eq!(format!("{measure}"), "test_not_stopped running"); + } +} diff --git a/timings/Cargo.toml b/svm-timings/Cargo.toml similarity index 84% rename from timings/Cargo.toml rename to svm-timings/Cargo.toml index 5d66b66cde6b6f..c0a59118777917 100644 --- a/timings/Cargo.toml +++ b/svm-timings/Cargo.toml @@ -1,7 +1,7 @@ [package] -name = "solana-timings" +name = "solana-svm-timings" description = "Solana Execution Timings" -documentation = "https://docs.rs/solana-timings" +documentation = "https://docs.rs/solana-svm-timings" version = { workspace = true } authors = { workspace = true } repository = { workspace = true } diff --git a/timings/src/lib.rs b/svm-timings/src/lib.rs similarity index 100% rename from timings/src/lib.rs rename to svm-timings/src/lib.rs diff --git a/svm-transaction/Cargo.toml b/svm-transaction/Cargo.toml index dc689d97c28877..629b788584b185 100644 --- a/svm-transaction/Cargo.toml +++ b/svm-transaction/Cargo.toml @@ -20,6 +20,6 @@ solana-transaction = { workspace = true } [dev-dependencies] solana-message = { workspace = true, features = ["bincode"] } solana-nonce = { workspace = true } -solana-system-interface = { workspace = true } +solana-system-interface = { workspace = true, features = ["bincode"] } static_assertions = { workspace = true } test-case = { workspace = true } diff --git a/type-overrides/Cargo.toml b/svm-type-overrides/Cargo.toml similarity index 93% rename from type-overrides/Cargo.toml rename to svm-type-overrides/Cargo.toml index 8acf34abdfc1a7..1159c6b6ae8d6b 100644 --- a/type-overrides/Cargo.toml +++ b/svm-type-overrides/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "solana-type-overrides" +name = "solana-svm-type-overrides" description = "Type overrides for specialized testing" version = { workspace = true } authors = { workspace = true } diff --git a/type-overrides/src/lib.rs b/svm-type-overrides/src/lib.rs similarity index 71% rename from type-overrides/src/lib.rs rename to svm-type-overrides/src/lib.rs index 324697019df426..c6573956336943 100644 --- a/type-overrides/src/lib.rs +++ b/svm-type-overrides/src/lib.rs @@ -1,11 +1,10 @@ -/// -/// This lib contains both standard imports and imports shuttle. -/// Shuttle is a Rust crate that facilitates multithreaded testing. It has its own scheduler -/// and can efficiently detect bugs in concurrent code. The downside is that we need to replace -/// all imports by those from Shuttle. -/// -/// Instead of importing from std, rand, and so on, import the following from solana-type-override, -/// and include the 'shuttle-test' feature in your crate to use shuttle. +//! This lib contains both standard imports and imports shuttle. +//! Shuttle is a Rust crate that facilitates multithreaded testing. It has its own scheduler +//! and can efficiently detect bugs in concurrent code. The downside is that we need to replace +//! all imports by those from Shuttle. +//! +//! Instead of importing from std, rand, and so on, import the following from solana-type-override, +//! and include the 'shuttle-test' feature in your crate to use shuttle. #[cfg(feature = "executor")] pub mod executor { diff --git a/svm/Cargo.toml b/svm/Cargo.toml index c184db31df7d23..c635ad6d2e261c 100644 --- a/svm/Cargo.toml +++ b/svm/Cargo.toml @@ -27,16 +27,15 @@ frozen-abi = [ "solana-program-runtime/frozen-abi", ] shuttle-test = [ - "solana-type-overrides/shuttle-test", - "solana-program-runtime/shuttle-test", "solana-bpf-loader-program/shuttle-test", "solana-loader-v4-program/shuttle-test", + "solana-program-runtime/shuttle-test", + "solana-svm-type-overrides/shuttle-test", ] svm-internal = [] [dependencies] ahash = { workspace = true } -itertools = { workspace = true } log = { workspace = true } percentage = { workspace = true } qualifier_attr = { workspace = true, optional = true } @@ -57,8 +56,6 @@ solana-instructions-sysvar = { workspace = true } solana-loader-v3-interface = { workspace = true, features = ["bincode"] } solana-loader-v4-interface = { workspace = true } solana-loader-v4-program = { workspace = true } -solana-log-collector = { workspace = true } -solana-measure = { workspace = true } solana-message = { workspace = true } solana-nonce = { workspace = true } solana-nonce-account = { workspace = true } @@ -67,36 +64,33 @@ solana-program-pack = { workspace = true } solana-program-runtime = { workspace = true, features = ["metrics"] } solana-pubkey = { workspace = true } solana-rent = { workspace = true } -solana-rent-collector = { workspace = true } solana-sdk-ids = { workspace = true } -solana-slot-hashes = { workspace = true } solana-svm-callback = { workspace = true } solana-svm-feature-set = { workspace = true } +solana-svm-log-collector = { workspace = true } +solana-svm-measure = { workspace = true } +solana-svm-timings = { workspace = true } solana-svm-transaction = { workspace = true } +solana-svm-type-overrides = { workspace = true } solana-system-interface = { workspace = true } solana-sysvar-id = { workspace = true } -solana-timings = { workspace = true } solana-transaction-context = { workspace = true } solana-transaction-error = { workspace = true } -solana-type-overrides = { workspace = true } spl-generic-token = { workspace = true } thiserror = { workspace = true } [dev-dependencies] -agave-feature-set = { workspace = true } -agave-reserved-account-keys = { workspace = true } agave-syscalls = { workspace = true } assert_matches = { workspace = true } bincode = { workspace = true } ed25519-dalek = { workspace = true } libsecp256k1 = { workspace = true } openssl = { workspace = true } -prost = { workspace = true } rand0-7 = { workspace = true } shuttle = { workspace = true } solana-bpf-loader-program = { workspace = true } solana-clock = { workspace = true } -solana-compute-budget-instruction = { workspace = true } +solana-compute-budget = { workspace = true } solana-compute-budget-interface = { workspace = true } solana-compute-budget-program = { workspace = true } solana-ed25519-program = { workspace = true } @@ -106,17 +100,17 @@ solana-keypair = { workspace = true } solana-logger = { workspace = true } solana-native-token = { workspace = true } solana-precompile-error = { workspace = true } +solana-program-binaries = { workspace = true } solana-program-runtime = { workspace = true, features = ["dev-context-only-utils"] } solana-pubkey = { workspace = true, features = ["rand"] } solana-rent = { workspace = true } -solana-sbpf = { workspace = true } -solana-secp256k1-program = { workspace = true } +solana-sbpf = { workspace = true, features = ["jit"] } +solana-secp256k1-program = { workspace = true, features = ["bincode"] } solana-secp256r1-program = { workspace = true, features = ["openssl-vendored"] } solana-signature = { workspace = true, features = ["rand"] } solana-signer = { workspace = true } # See order-crates-for-publishing.py for using this unusual `path = "."` solana-svm = { path = ".", features = ["dev-context-only-utils", "svm-internal"] } -solana-svm-conformance = { workspace = true } solana-system-program = { workspace = true } solana-system-transaction = { workspace = true } solana-sysvar = { workspace = true } diff --git a/svm/doc/spec.md b/svm/doc/spec.md index d256eaea1ea81c..7ca0accb5d265b 100644 --- a/svm/doc/spec.md +++ b/svm/doc/spec.md @@ -128,13 +128,9 @@ information. ```rust pub trait TransactionProcessingCallback { - fn account_matches_owners(&self, account: &Pubkey, owners: &[Pubkey]) -> Option; - - fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option; + fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option<(AccountSharedData, Slot)>; fn add_builtin_account(&self, _name: &str, _program_id: &Pubkey) {} - - fn get_current_epoch_vote_account_stake(&self, _vote_address: &Pubkey) -> u64; } ``` @@ -277,7 +273,7 @@ Steps of `load_and_execute_sanitized_transactions` - `programs_loaded_for_tx_batch` contains a reference to all the `ProgramCacheEntry`s necessary for the transaction. It maintains an `Arc` to the programs in the global `ProgramCacheEntry` data structure. - 6. Call `MessageProcessor::process_message` to execute the + 6. Call `MessageProcessor::process_message` to execute the transaction. `MessageProcessor` is contained in solana-program-runtime crate. The result of processing message is either `ProcessedMessageInfo` which is an i64 wrapped in a diff --git a/svm/examples/Cargo.lock b/svm/examples/Cargo.lock deleted file mode 100644 index 61b6b3e152498b..00000000000000 --- a/svm/examples/Cargo.lock +++ /dev/null @@ -1,11024 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 4 - -[[package]] -name = "Inflector" -version = "0.11.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" -dependencies = [ - "lazy_static", - "regex", -] - -[[package]] -name = "addr2line" -version = "0.24.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" -dependencies = [ - "gimli", -] - -[[package]] -name = "adler2" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" - -[[package]] -name = "aead" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" -dependencies = [ - "crypto-common", - "generic-array", -] - -[[package]] -name = "aes" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" -dependencies = [ - "cfg-if 1.0.0", - "cipher", - "cpufeatures", -] - -[[package]] -name = "aes-gcm-siv" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae0784134ba9375416d469ec31e7c5f9fa94405049cf08c5ce5b4698be673e0d" -dependencies = [ - "aead", - "aes", - "cipher", - "ctr", - "polyval", - "subtle", - "zeroize", -] - -[[package]] -name = "agave-banking-stage-ingress-types" -version = "3.0.0" -dependencies = [ - "crossbeam-channel", - "solana-perf", -] - -[[package]] -name = "agave-feature-set" -version = "3.0.0" -dependencies = [ - "ahash 0.8.11", - "solana-epoch-schedule", - "solana-hash", - "solana-pubkey", - "solana-sha256-hasher", - "solana-svm-feature-set", -] - -[[package]] -name = "agave-geyser-plugin-interface" -version = "3.0.0" -dependencies = [ - "log", - "solana-clock", - "solana-hash", - "solana-signature", - "solana-transaction", - "solana-transaction-status", - "thiserror 2.0.12", -] - -[[package]] -name = "agave-io-uring" -version = "3.0.0" -dependencies = [ - "io-uring", - "libc", - "log", - "slab", - "smallvec", -] - -[[package]] -name = "agave-precompiles" -version = "3.0.0" -dependencies = [ - "agave-feature-set", - "bincode", - "digest 0.10.7", - "ed25519-dalek", - "libsecp256k1", - "openssl", - "sha3", - "solana-ed25519-program", - "solana-message", - "solana-precompile-error", - "solana-pubkey", - "solana-sdk-ids", - "solana-secp256k1-program", - "solana-secp256r1-program", -] - -[[package]] -name = "agave-reserved-account-keys" -version = "3.0.0" -dependencies = [ - "agave-feature-set", - "solana-pubkey", - "solana-sdk-ids", -] - -[[package]] -name = "agave-syscalls" -version = "3.0.0" -dependencies = [ - "bincode", - "libsecp256k1", - "num-traits", - "solana-account", - "solana-account-info", - "solana-big-mod-exp", - "solana-blake3-hasher", - "solana-bn254", - "solana-clock", - "solana-cpi", - "solana-curve25519 3.0.0", - "solana-hash", - "solana-instruction", - "solana-keccak-hasher", - "solana-loader-v3-interface 5.0.0", - "solana-log-collector", - "solana-measure", - "solana-poseidon", - "solana-program-entrypoint", - "solana-program-runtime", - "solana-pubkey", - "solana-sbpf", - "solana-sdk-ids", - "solana-secp256k1-recover", - "solana-sha256-hasher", - "solana-stable-layout", - "solana-svm-callback", - "solana-svm-feature-set", - "solana-sysvar", - "solana-sysvar-id", - "solana-timings", - "solana-transaction-context", - "solana-type-overrides", - "thiserror 2.0.12", -] - -[[package]] -name = "agave-transaction-view" -version = "3.0.0" -dependencies = [ - "solana-hash", - "solana-message", - "solana-packet", - "solana-pubkey", - "solana-sdk-ids", - "solana-short-vec", - "solana-signature", - "solana-svm-transaction", -] - -[[package]] -name = "agave-verified-packet-receiver" -version = "3.0.0" -dependencies = [ - "solana-perf", - "solana-streamer", -] - -[[package]] -name = "agave-xdp" -version = "3.0.0" -dependencies = [ - "aya", - "caps", - "crossbeam-channel", - "libc", - "log", - "thiserror 2.0.12", -] - -[[package]] -name = "ahash" -version = "0.7.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" -dependencies = [ - "getrandom 0.2.15", - "once_cell", - "version_check", -] - -[[package]] -name = "ahash" -version = "0.8.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" -dependencies = [ - "cfg-if 1.0.0", - "getrandom 0.2.15", - "once_cell", - "version_check", - "zerocopy 0.7.35", -] - -[[package]] -name = "aho-corasick" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" -dependencies = [ - "memchr", -] - -[[package]] -name = "alloc-no-stdlib" -version = "2.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc7bb162ec39d46ab1ca8c77bf72e890535becd1751bb45f64c597edb4c8c6b3" - -[[package]] -name = "alloc-stdlib" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94fb8275041c72129eb51b7d0322c29b8387a0386127718b096429201a5d6ece" -dependencies = [ - "alloc-no-stdlib", -] - -[[package]] -name = "allocator-api2" -version = "0.2.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" - -[[package]] -name = "android-tzdata" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" - -[[package]] -name = "android_system_properties" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" -dependencies = [ - "libc", -] - -[[package]] -name = "ansi_term" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" -dependencies = [ - "winapi 0.3.9", -] - -[[package]] -name = "anyhow" -version = "1.0.98" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" - -[[package]] -name = "aquamarine" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f50776554130342de4836ba542aa85a4ddb361690d7e8df13774d7284c3d5c2" -dependencies = [ - "include_dir", - "itertools 0.10.5", - "proc-macro-error2", - "proc-macro2", - "quote", - "syn 2.0.96", -] - -[[package]] -name = "arc-swap" -version = "1.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" - -[[package]] -name = "ark-bn254" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a22f4561524cd949590d78d7d4c5df8f592430d221f7f3c9497bbafd8972120f" -dependencies = [ - "ark-ec", - "ark-ff", - "ark-std", -] - -[[package]] -name = "ark-ec" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defd9a439d56ac24968cca0571f598a61bc8c55f71d50a89cda591cb750670ba" -dependencies = [ - "ark-ff", - "ark-poly", - "ark-serialize", - "ark-std", - "derivative", - "hashbrown 0.13.2", - "itertools 0.10.5", - "num-traits", - "zeroize", -] - -[[package]] -name = "ark-ff" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" -dependencies = [ - "ark-ff-asm", - "ark-ff-macros", - "ark-serialize", - "ark-std", - "derivative", - "digest 0.10.7", - "itertools 0.10.5", - "num-bigint 0.4.6", - "num-traits", - "paste", - "rustc_version", - "zeroize", -] - -[[package]] -name = "ark-ff-asm" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" -dependencies = [ - "quote", - "syn 1.0.109", -] - -[[package]] -name = "ark-ff-macros" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" -dependencies = [ - "num-bigint 0.4.6", - "num-traits", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "ark-poly" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d320bfc44ee185d899ccbadfa8bc31aab923ce1558716e1997a1e74057fe86bf" -dependencies = [ - "ark-ff", - "ark-serialize", - "ark-std", - "derivative", - "hashbrown 0.13.2", -] - -[[package]] -name = "ark-serialize" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" -dependencies = [ - "ark-serialize-derive", - "ark-std", - "digest 0.10.7", - "num-bigint 0.4.6", -] - -[[package]] -name = "ark-serialize-derive" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae3281bc6d0fd7e549af32b52511e1302185bd688fd3359fa36423346ff682ea" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "ark-std" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" -dependencies = [ - "num-traits", - "rand 0.8.5", -] - -[[package]] -name = "arrayref" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" - -[[package]] -name = "arrayvec" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" - -[[package]] -name = "ascii" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eab1c04a571841102f5345a8fc0f6bb3d31c315dec879b5c6e42e40ce7ffa34e" - -[[package]] -name = "asn1-rs" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f6fd5ddaf0351dff5b8da21b2fb4ff8e08ddd02857f0bf69c47639106c0fff0" -dependencies = [ - "asn1-rs-derive", - "asn1-rs-impl", - "displaydoc", - "nom", - "num-traits", - "rusticata-macros", - "thiserror 1.0.69", - "time", -] - -[[package]] -name = "asn1-rs-derive" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", - "synstructure 0.12.6", -] - -[[package]] -name = "asn1-rs-impl" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "assert_matches" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" - -[[package]] -name = "async-channel" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" -dependencies = [ - "concurrent-queue", - "event-listener 2.5.3", - "futures-core", -] - -[[package]] -name = "async-compression" -version = "0.4.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df895a515f70646414f4b45c0b79082783b80552b373a68283012928df56f522" -dependencies = [ - "brotli", - "flate2", - "futures-core", - "memchr", - "pin-project-lite", - "tokio", -] - -[[package]] -name = "async-lock" -version = "3.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fd03604047cee9b6ce9de9f70c6cd540a0520c813cbd49bae61f33ab80ed1dc" -dependencies = [ - "event-listener 5.4.0", - "event-listener-strategy", - "pin-project-lite", -] - -[[package]] -name = "async-stream" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" -dependencies = [ - "async-stream-impl", - "futures-core", - "pin-project-lite", -] - -[[package]] -name = "async-stream-impl" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.96", -] - -[[package]] -name = "async-trait" -version = "0.1.88" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.96", -] - -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi 0.1.19", - "libc", - "winapi 0.3.9", -] - -[[package]] -name = "autocfg" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" - -[[package]] -name = "autotools" -version = "0.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef941527c41b0fc0dd48511a8154cd5fc7e29200a0ff8b7203c5d777dbc795cf" -dependencies = [ - "cc", -] - -[[package]] -name = "axum" -version = "0.6.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" -dependencies = [ - "async-trait", - "axum-core", - "bitflags 1.2.1", - "bytes", - "futures-util", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.32", - "itoa", - "matchit", - "memchr", - "mime", - "percent-encoding 2.3.1", - "pin-project-lite", - "rustversion", - "serde", - "sync_wrapper 0.1.2", - "tower 0.4.13", - "tower-layer", - "tower-service", -] - -[[package]] -name = "axum-core" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" -dependencies = [ - "async-trait", - "bytes", - "futures-util", - "http 0.2.12", - "http-body 0.4.6", - "mime", - "rustversion", - "tower-layer", - "tower-service", -] - -[[package]] -name = "aya" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d18bc4e506fbb85ab7392ed993a7db4d1a452c71b75a246af4a80ab8c9d2dd50" -dependencies = [ - "assert_matches", - "aya-obj", - "bitflags 2.9.1", - "bytes", - "libc", - "log", - "object", - "once_cell", - "thiserror 1.0.69", -] - -[[package]] -name = "aya-obj" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c51b96c5a8ed8705b40d655273bc4212cbbf38d4e3be2788f36306f154523ec7" -dependencies = [ - "bytes", - "core-error", - "hashbrown 0.15.2", - "log", - "object", - "thiserror 1.0.69", -] - -[[package]] -name = "backoff" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1" -dependencies = [ - "futures-core", - "getrandom 0.2.15", - "instant", - "pin-project-lite", - "rand 0.8.5", - "tokio", -] - -[[package]] -name = "backtrace" -version = "0.3.74" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" -dependencies = [ - "addr2line", - "cfg-if 1.0.0", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", - "windows-targets 0.52.6", -] - -[[package]] -name = "base64" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" - -[[package]] -name = "base64" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" - -[[package]] -name = "base64" -version = "0.21.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" - -[[package]] -name = "base64" -version = "0.22.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" - -[[package]] -name = "bincode" -version = "1.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" -dependencies = [ - "serde", -] - -[[package]] -name = "bindgen" -version = "0.69.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" -dependencies = [ - "bitflags 2.9.1", - "cexpr", - "clang-sys", - "itertools 0.12.1", - "lazy_static", - "lazycell", - "proc-macro2", - "quote", - "regex", - "rustc-hash 1.1.0", - "shlex", - "syn 2.0.96", -] - -[[package]] -name = "bitflags" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" - -[[package]] -name = "bitflags" -version = "2.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" -dependencies = [ - "serde", -] - -[[package]] -name = "bitmaps" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "031043d04099746d8db04daf1fa424b2bc8bd69d92b25962dcde24da39ab64a2" -dependencies = [ - "typenum", -] - -[[package]] -name = "blake3" -version = "1.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3888aaa89e4b2a40fca9848e400f6a658a5a3978de7be858e209cafa8be9a4a0" -dependencies = [ - "arrayref", - "arrayvec", - "cc", - "cfg-if 1.0.0", - "constant_time_eq", - "digest 0.10.7", -] - -[[package]] -name = "block-buffer" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" -dependencies = [ - "generic-array", -] - -[[package]] -name = "block-buffer" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" -dependencies = [ - "generic-array", -] - -[[package]] -name = "borsh" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "115e54d64eb62cdebad391c19efc9dce4981c690c85a33a12199d99bb9546fee" -dependencies = [ - "borsh-derive 0.10.4", - "hashbrown 0.13.2", -] - -[[package]] -name = "borsh" -version = "1.5.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad8646f98db542e39fc66e68a20b2144f6a732636df7c2354e74645faaa433ce" -dependencies = [ - "borsh-derive 1.5.7", - "cfg_aliases", -] - -[[package]] -name = "borsh-derive" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "831213f80d9423998dd696e2c5345aba6be7a0bd8cd19e31c5243e13df1cef89" -dependencies = [ - "borsh-derive-internal", - "borsh-schema-derive-internal", - "proc-macro-crate 0.1.5", - "proc-macro2", - "syn 1.0.109", -] - -[[package]] -name = "borsh-derive" -version = "1.5.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdd1d3c0c2f5833f22386f252fe8ed005c7f59fdcddeef025c01b4c3b9fd9ac3" -dependencies = [ - "once_cell", - "proc-macro-crate 3.2.0", - "proc-macro2", - "quote", - "syn 2.0.96", -] - -[[package]] -name = "borsh-derive-internal" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65d6ba50644c98714aa2a70d13d7df3cd75cd2b523a2b452bf010443800976b3" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "borsh-schema-derive-internal" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "276691d96f063427be83e6692b86148e488ebba9f48f77788724ca027ba3b6d4" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "brotli" -version = "7.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc97b8f16f944bba54f0433f07e30be199b6dc2bd25937444bbad560bcea29bd" -dependencies = [ - "alloc-no-stdlib", - "alloc-stdlib", - "brotli-decompressor", -] - -[[package]] -name = "brotli-decompressor" -version = "4.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74fa05ad7d803d413eb8380983b092cbbaf9a85f151b871360e7b00cd7060b37" -dependencies = [ - "alloc-no-stdlib", - "alloc-stdlib", -] - -[[package]] -name = "bs58" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf88ba1141d185c399bee5288d850d63b8369520c1eafc32a0430b5b6c287bf4" -dependencies = [ - "tinyvec", -] - -[[package]] -name = "bstr" -version = "1.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "531a9155a481e2ee699d4f98f43c0ca4ff8ee1bfd55c31e9e98fb29d2b176fe0" -dependencies = [ - "memchr", - "serde", -] - -[[package]] -name = "bumpalo" -version = "3.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" - -[[package]] -name = "bv" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8834bb1d8ee5dc048ee3124f2c7c1afcc6bc9aed03f11e9dfd8c69470a5db340" -dependencies = [ - "feature-probe", - "serde", -] - -[[package]] -name = "bytemuck" -version = "1.23.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c76a5792e44e4abe34d3abf15636779261d45a7450612059293d1d2cfc63422" -dependencies = [ - "bytemuck_derive", -] - -[[package]] -name = "bytemuck_derive" -version = "1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "441473f2b4b0459a68628c744bc61d23e730fb00128b841d30fa4bb3972257e4" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.96", -] - -[[package]] -name = "byteorder" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" - -[[package]] -name = "bytes" -version = "1.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" -dependencies = [ - "serde", -] - -[[package]] -name = "bzip2" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdb116a6ef3f6c3698828873ad02c3014b3c85cadb88496095628e3ef1e347f8" -dependencies = [ - "bzip2-sys", - "libc", -] - -[[package]] -name = "bzip2-sys" -version = "0.1.11+1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc" -dependencies = [ - "cc", - "libc", - "pkg-config", -] - -[[package]] -name = "caps" -version = "0.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "190baaad529bcfbde9e1a19022c42781bdb6ff9de25721abdb8fd98c0807730b" -dependencies = [ - "libc", - "thiserror 1.0.69", -] - -[[package]] -name = "cc" -version = "1.2.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13208fcbb66eaeffe09b99fffbe1af420f00a7b35aa99ad683dfc1aa76145229" -dependencies = [ - "jobserver", - "libc", - "shlex", -] - -[[package]] -name = "cesu8" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" - -[[package]] -name = "cexpr" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" -dependencies = [ - "nom", -] - -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "cfg_aliases" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" - -[[package]] -name = "cfg_eval" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45565fc9416b9896014f5732ac776f810ee53a66730c17e4020c3ec064a8f88f" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.96", -] - -[[package]] -name = "chrono" -version = "0.4.41" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d" -dependencies = [ - "android-tzdata", - "iana-time-zone", - "js-sys", - "num-traits", - "serde", - "wasm-bindgen", - "windows-link", -] - -[[package]] -name = "chrono-humanize" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "799627e6b4d27827a814e837b9d8a504832086081806d45b1afa34dc982b023b" -dependencies = [ - "chrono", -] - -[[package]] -name = "cipher" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" -dependencies = [ - "crypto-common", - "inout", -] - -[[package]] -name = "clang-sys" -version = "1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" -dependencies = [ - "glob", - "libc", -] - -[[package]] -name = "clap" -version = "2.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" -dependencies = [ - "ansi_term", - "atty", - "bitflags 1.2.1", - "strsim 0.8.0", - "textwrap", - "unicode-width 0.1.14", - "vec_map", -] - -[[package]] -name = "combine" -version = "3.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da3da6baa321ec19e1cc41d31bf599f00c783d0517095cdaf0332e3fe8d20680" -dependencies = [ - "ascii", - "byteorder", - "either", - "memchr", - "unreachable", -] - -[[package]] -name = "combine" -version = "4.6.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" -dependencies = [ - "bytes", - "memchr", -] - -[[package]] -name = "concurrent-queue" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" -dependencies = [ - "crossbeam-utils", -] - -[[package]] -name = "conditional-mod" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67935045d95e19071aae6ee98d649f2a5593e510802040c622200c8d6666a9ca" - -[[package]] -name = "console" -version = "0.15.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "054ccb5b10f9f2cbf51eb355ca1d05c2d279ce1804688d0db74b4733a5aeafd8" -dependencies = [ - "encode_unicode", - "libc", - "once_cell", - "unicode-width 0.2.0", - "windows-sys 0.59.0", -] - -[[package]] -name = "console" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e09ced7ebbccb63b4c65413d821f2e00ce54c5ca4514ddc6b3c892fdbcbc69d" -dependencies = [ - "encode_unicode", - "libc", - "once_cell", - "unicode-width 0.2.0", - "windows-sys 0.60.2", -] - -[[package]] -name = "console_error_panic_hook" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a06aeb73f470f66dcdbf7223caeebb85984942f22f1adb2a088cf9668146bbbc" -dependencies = [ - "cfg-if 1.0.0", - "wasm-bindgen", -] - -[[package]] -name = "console_log" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89f72f65e8501878b8a004d5a1afb780987e2ce2b4532c562e367a72c57499f" -dependencies = [ - "log", - "web-sys", -] - -[[package]] -name = "constant_time_eq" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" - -[[package]] -name = "convert_case" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" - -[[package]] -name = "convert_case" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec182b0ca2f35d8fc196cf3404988fd8b8c739a4d270ff118a398feb0cbec1ca" -dependencies = [ - "unicode-segmentation", -] - -[[package]] -name = "core-error" -version = "0.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efcdb2972eb64230b4c50646d8498ff73f5128d196a90c7236eec4cbe8619b8f" -dependencies = [ - "version_check", -] - -[[package]] -name = "core-foundation" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "core-foundation" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b55271e5c8c478ad3f38ad24ef34923091e0548492a266d19b3c0b4d82574c63" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "core-foundation-sys" -version = "0.8.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" - -[[package]] -name = "core_affinity" -version = "0.5.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f8a03115cc34fb0d7c321dd154a3914b3ca082ccc5c11d91bf7117dbbe7171f" -dependencies = [ - "kernel32-sys", - "libc", - "num_cpus", - "winapi 0.2.8", -] - -[[package]] -name = "cpufeatures" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" -dependencies = [ - "libc", -] - -[[package]] -name = "crc32fast" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" -dependencies = [ - "cfg-if 1.0.0", -] - -[[package]] -name = "crossbeam-channel" -version = "0.5.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" -dependencies = [ - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-deque" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" -dependencies = [ - "crossbeam-epoch", - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.9.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" -dependencies = [ - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-utils" -version = "0.8.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" - -[[package]] -name = "crunchy" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929" - -[[package]] -name = "crypto-common" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" -dependencies = [ - "generic-array", - "rand_core 0.6.4", - "typenum", -] - -[[package]] -name = "crypto-mac" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" -dependencies = [ - "generic-array", - "subtle", -] - -[[package]] -name = "ctr" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" -dependencies = [ - "cipher", -] - -[[package]] -name = "curve25519-dalek" -version = "3.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" -dependencies = [ - "byteorder", - "digest 0.9.0", - "rand_core 0.5.1", - "subtle", - "zeroize", -] - -[[package]] -name = "curve25519-dalek" -version = "4.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" -dependencies = [ - "cfg-if 1.0.0", - "cpufeatures", - "curve25519-dalek-derive", - "digest 0.10.7", - "fiat-crypto", - "rand_core 0.6.4", - "rustc_version", - "serde", - "subtle", - "zeroize", -] - -[[package]] -name = "curve25519-dalek-derive" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.96", -] - -[[package]] -name = "darling" -version = "0.20.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" -dependencies = [ - "darling_core", - "darling_macro", -] - -[[package]] -name = "darling_core" -version = "0.20.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim 0.11.1", - "syn 2.0.96", -] - -[[package]] -name = "darling_macro" -version = "0.20.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" -dependencies = [ - "darling_core", - "quote", - "syn 2.0.96", -] - -[[package]] -name = "dashmap" -version = "5.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" -dependencies = [ - "cfg-if 1.0.0", - "hashbrown 0.14.5", - "lock_api", - "once_cell", - "parking_lot_core 0.9.10", - "rayon", -] - -[[package]] -name = "data-encoding" -version = "2.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e60eed09d8c01d3cee5b7d30acb059b76614c918fa0f992e0dd6eeb10daad6f" - -[[package]] -name = "der-parser" -version = "8.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbd676fbbab537128ef0278adb5576cf363cff6aa22a7b24effe97347cfab61e" -dependencies = [ - "asn1-rs", - "displaydoc", - "nom", - "num-bigint 0.4.6", - "num-traits", - "rusticata-macros", -] - -[[package]] -name = "deranged" -version = "0.3.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" -dependencies = [ - "powerfmt", -] - -[[package]] -name = "derivation-path" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e5c37193a1db1d8ed868c03ec7b152175f26160a5b740e5e484143877e0adf0" - -[[package]] -name = "derivative" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "derive-where" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "510c292c8cf384b1a340b816a9a6cf2599eb8f566a44949024af88418000c50b" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.96", -] - -[[package]] -name = "derive_more" -version = "0.99.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" -dependencies = [ - "convert_case 0.4.0", - "proc-macro2", - "quote", - "rustc_version", - "syn 2.0.96", -] - -[[package]] -name = "derive_more" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" -dependencies = [ - "derive_more-impl", -] - -[[package]] -name = "derive_more-impl" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" -dependencies = [ - "convert_case 0.6.0", - "proc-macro2", - "quote", - "syn 2.0.96", - "unicode-xid", -] - -[[package]] -name = "dialoguer" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59c6f2989294b9a498d3ad5491a79c6deb604617378e1cdc4bfc1c1361fe2f87" -dependencies = [ - "console 0.15.11", - "shell-words", - "tempfile", - "zeroize", -] - -[[package]] -name = "difflib" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8" - -[[package]] -name = "digest" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" -dependencies = [ - "generic-array", -] - -[[package]] -name = "digest" -version = "0.10.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" -dependencies = [ - "block-buffer 0.10.4", - "crypto-common", - "subtle", -] - -[[package]] -name = "dir-diff" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7ad16bf5f84253b50d6557681c58c3ab67c47c77d39fed9aeb56e947290bd10" -dependencies = [ - "walkdir", -] - -[[package]] -name = "dirs-next" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" -dependencies = [ - "cfg-if 1.0.0", - "dirs-sys-next", -] - -[[package]] -name = "dirs-sys-next" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" -dependencies = [ - "libc", - "redox_users", - "winapi 0.3.9", -] - -[[package]] -name = "displaydoc" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.96", -] - -[[package]] -name = "dlopen2" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09b4f5f101177ff01b8ec4ecc81eead416a8aa42819a2869311b3420fa114ffa" -dependencies = [ - "dlopen2_derive", - "libc", - "once_cell", - "winapi 0.3.9", -] - -[[package]] -name = "dlopen2_derive" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.96", -] - -[[package]] -name = "downcast" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" - -[[package]] -name = "dyn-clone" -version = "1.0.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" - -[[package]] -name = "eager" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abe71d579d1812060163dff96056261deb5bf6729b100fa2e36a68b9649ba3d3" - -[[package]] -name = "ed25519" -version = "1.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91cff35c70bba8a626e3185d8cd48cc11b5437e1a5bcd15b9b5fa3c64b6dfee7" -dependencies = [ - "signature", -] - -[[package]] -name = "ed25519-dalek" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" -dependencies = [ - "curve25519-dalek 3.2.0", - "ed25519", - "rand 0.7.3", - "serde", - "sha2 0.9.9", - "zeroize", -] - -[[package]] -name = "ed25519-dalek-bip32" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d2be62a4061b872c8c0873ee4fc6f101ce7b889d039f019c5fa2af471a59908" -dependencies = [ - "derivation-path", - "ed25519-dalek", - "hmac 0.12.1", - "sha2 0.10.9", -] - -[[package]] -name = "educe" -version = "0.4.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f0042ff8246a363dbe77d2ceedb073339e85a804b9a47636c6e016a9a32c05f" -dependencies = [ - "enum-ordinalize", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "either" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" - -[[package]] -name = "encode_unicode" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" - -[[package]] -name = "encoding_rs" -version = "0.8.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" -dependencies = [ - "cfg-if 1.0.0", -] - -[[package]] -name = "enum-iterator" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fd242f399be1da0a5354aa462d57b4ab2b4ee0683cc552f7c007d2d12d36e94" -dependencies = [ - "enum-iterator-derive", -] - -[[package]] -name = "enum-iterator-derive" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1ab991c1362ac86c61ab6f556cff143daa22e5a15e4e189df818b2fd19fe65b" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.96", -] - -[[package]] -name = "enum-ordinalize" -version = "3.1.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bf1fa3f06bbff1ea5b1a9c7b14aa992a39657db60a2759457328d7e058f49ee" -dependencies = [ - "num-bigint 0.4.6", - "num-traits", - "proc-macro2", - "quote", - "syn 2.0.96", -] - -[[package]] -name = "env_logger" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" -dependencies = [ - "atty", - "humantime", - "log", - "regex", - "termcolor", -] - -[[package]] -name = "equivalent" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" - -[[package]] -name = "errno" -version = "0.3.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" -dependencies = [ - "libc", - "windows-sys 0.59.0", -] - -[[package]] -name = "etcd-client" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4b0ea5ef6dc2388a4b1669fa32097249bc03a15417b97cb75e38afb309e4a89" -dependencies = [ - "http 0.2.12", - "prost", - "tokio", - "tokio-stream", - "tonic", - "tonic-build", - "tower 0.4.13", - "tower-service", -] - -[[package]] -name = "event-listener" -version = "2.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" - -[[package]] -name = "event-listener" -version = "5.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3492acde4c3fc54c845eaab3eed8bd00c7a7d881f78bfc801e43a93dec1331ae" -dependencies = [ - "concurrent-queue", - "parking", - "pin-project-lite", -] - -[[package]] -name = "event-listener-strategy" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c3e4e0dd3673c1139bf041f3008816d9cf2946bbfac2945c09e523b8d7b05b2" -dependencies = [ - "event-listener 5.4.0", - "pin-project-lite", -] - -[[package]] -name = "fast-math" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2465292146cdfc2011350fe3b1c616ac83cf0faeedb33463ba1c332ed8948d66" -dependencies = [ - "ieee754", -] - -[[package]] -name = "fastbloom" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27cea6e7f512d43b098939ff4d5a5d6fe3db07971e1d05176fe26c642d33f5b8" -dependencies = [ - "getrandom 0.3.1", - "rand 0.9.0", - "siphasher 1.0.1", - "wide", -] - -[[package]] -name = "fastrand" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" - -[[package]] -name = "feature-probe" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "835a3dc7d1ec9e75e2b5fb4ba75396837112d2060b03f7d43bc1897c7f7211da" - -[[package]] -name = "fiat-crypto" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" - -[[package]] -name = "filetime" -version = "0.2.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35c0522e981e68cbfa8c3f978441a5f34b30b96e146b33cd3359176b50fe8586" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "libredox", - "windows-sys 0.59.0", -] - -[[package]] -name = "five8" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75b8549488b4715defcb0d8a8a1c1c76a80661b5fa106b4ca0e7fce59d7d875" -dependencies = [ - "five8_core", -] - -[[package]] -name = "five8_const" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26dec3da8bc3ef08f2c04f61eab298c3ab334523e55f076354d6d6f613799a7b" -dependencies = [ - "five8_core", -] - -[[package]] -name = "five8_core" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94474d15a76982be62ca8a39570dccce148d98c238ebb7408b0a21b2c4bdddc4" - -[[package]] -name = "fixedbitset" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" - -[[package]] -name = "flate2" -version = "1.0.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c936bfdafb507ebbf50b8074c54fa31c5be9a1e7e5f467dd659697041407d07c" -dependencies = [ - "crc32fast", - "miniz_oxide", -] - -[[package]] -name = "float-cmp" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98de4bbd547a563b716d8dfa9aad1cb19bfab00f4fa09a6a4ed21dbcf44ce9c4" -dependencies = [ - "num-traits", -] - -[[package]] -name = "fnv" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" - -[[package]] -name = "foldhash" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0d2fde1f7b3d48b8395d5f2de76c18a528bd6a9cdde438df747bfcba3e05d6f" - -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" - -[[package]] -name = "form_urlencoded" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" -dependencies = [ - "percent-encoding 2.3.1", -] - -[[package]] -name = "fragile" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" - -[[package]] -name = "fs_extra" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" - -[[package]] -name = "futures" -version = "0.1.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" - -[[package]] -name = "futures" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" -dependencies = [ - "futures-channel", - "futures-core", - "futures-executor", - "futures-io", - "futures-sink", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-channel" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" -dependencies = [ - "futures-core", - "futures-sink", -] - -[[package]] -name = "futures-core" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" - -[[package]] -name = "futures-executor" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" -dependencies = [ - "futures-core", - "futures-task", - "futures-util", - "num_cpus", -] - -[[package]] -name = "futures-io" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" - -[[package]] -name = "futures-macro" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.96", -] - -[[package]] -name = "futures-sink" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" - -[[package]] -name = "futures-task" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" - -[[package]] -name = "futures-timer" -version = "3.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" - -[[package]] -name = "futures-util" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" -dependencies = [ - "futures 0.1.31", - "futures-channel", - "futures-core", - "futures-io", - "futures-macro", - "futures-sink", - "futures-task", - "memchr", - "pin-project-lite", - "pin-utils", - "slab", -] - -[[package]] -name = "generic-array" -version = "0.14.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" -dependencies = [ - "typenum", - "version_check", -] - -[[package]] -name = "gethostname" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1ebd34e35c46e00bb73e81363248d627782724609fe1b6396f553f68fe3862e" -dependencies = [ - "libc", - "winapi 0.3.9", -] - -[[package]] -name = "getrandom" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" -dependencies = [ - "cfg-if 1.0.0", - "js-sys", - "libc", - "wasi 0.9.0+wasi-snapshot-preview1", - "wasm-bindgen", -] - -[[package]] -name = "getrandom" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" -dependencies = [ - "cfg-if 1.0.0", - "js-sys", - "libc", - "wasi 0.11.0+wasi-snapshot-preview1", - "wasm-bindgen", -] - -[[package]] -name = "getrandom" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" -dependencies = [ - "cfg-if 1.0.0", - "js-sys", - "libc", - "wasi 0.13.3+wasi-0.2.2", - "wasm-bindgen", - "windows-targets 0.52.6", -] - -[[package]] -name = "gimli" -version = "0.31.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" - -[[package]] -name = "glob" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" - -[[package]] -name = "globset" -version = "0.4.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15f1ce686646e7f1e19bf7d5533fe443a45dbfb990e00629110797578b42fb19" -dependencies = [ - "aho-corasick", - "bstr", - "log", - "regex-automata", - "regex-syntax", -] - -[[package]] -name = "goauth" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8af59a261bcf42f45d1b261232847b9b850ba0a1419d6100698246fb66e9240" -dependencies = [ - "arc-swap", - "futures 0.3.31", - "log", - "reqwest 0.11.27", - "serde", - "serde_derive", - "serde_json", - "simpl", - "smpl_jwt", - "time", - "tokio", -] - -[[package]] -name = "governor" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68a7f542ee6b35af73b06abc0dad1c1bae89964e4e253bc4b587b91c9637867b" -dependencies = [ - "cfg-if 1.0.0", - "dashmap", - "futures 0.3.31", - "futures-timer", - "no-std-compat", - "nonzero_ext", - "parking_lot 0.12.3", - "portable-atomic", - "quanta", - "rand 0.8.5", - "smallvec", - "spinning_top", -] - -[[package]] -name = "h2" -version = "0.3.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" -dependencies = [ - "bytes", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http 0.2.12", - "indexmap 2.10.0", - "slab", - "tokio", - "tokio-util 0.7.16", - "tracing", -] - -[[package]] -name = "hash32" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47d60b12902ba28e2730cd37e95b8c9223af2808df9e902d4df49588d1470606" -dependencies = [ - "byteorder", -] - -[[package]] -name = "hashbrown" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" -dependencies = [ - "ahash 0.7.8", -] - -[[package]] -name = "hashbrown" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" -dependencies = [ - "ahash 0.8.11", -] - -[[package]] -name = "hashbrown" -version = "0.14.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" - -[[package]] -name = "hashbrown" -version = "0.15.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" -dependencies = [ - "allocator-api2", - "equivalent", - "foldhash", -] - -[[package]] -name = "headers" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06683b93020a07e3dbcf5f8c0f6d40080d725bea7936fc01ad345c01b97dc270" -dependencies = [ - "base64 0.21.7", - "bytes", - "headers-core", - "http 0.2.12", - "httpdate", - "mime", - "sha1", -] - -[[package]] -name = "headers-core" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" -dependencies = [ - "http 0.2.12", -] - -[[package]] -name = "heck" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" - -[[package]] -name = "hermit-abi" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] - -[[package]] -name = "hermit-abi" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f154ce46856750ed433c8649605bf7ed2de3bc35fd9d2a9f30cddd873c80cb08" - -[[package]] -name = "hidapi" -version = "2.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03b876ecf37e86b359573c16c8366bc3eba52b689884a0fc42ba3f67203d2a8b" -dependencies = [ - "cc", - "cfg-if 1.0.0", - "libc", - "pkg-config", - "windows-sys 0.48.0", -] - -[[package]] -name = "histogram" -version = "0.6.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12cb882ccb290b8646e554b157ab0b71e64e8d5bef775cd66b6531e52d302669" - -[[package]] -name = "hmac" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" -dependencies = [ - "crypto-mac", - "digest 0.9.0", -] - -[[package]] -name = "hmac" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" -dependencies = [ - "digest 0.10.7", -] - -[[package]] -name = "hmac-drbg" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" -dependencies = [ - "digest 0.9.0", - "generic-array", - "hmac 0.8.1", -] - -[[package]] -name = "home" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "589533453244b0995c858700322199b2becb13b627df2851f64a2775d024abcf" -dependencies = [ - "windows-sys 0.59.0", -] - -[[package]] -name = "http" -version = "0.2.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" -dependencies = [ - "bytes", - "fnv", - "itoa", -] - -[[package]] -name = "http" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea" -dependencies = [ - "bytes", - "fnv", - "itoa", -] - -[[package]] -name = "http-body" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" -dependencies = [ - "bytes", - "http 0.2.12", - "pin-project-lite", -] - -[[package]] -name = "http-body" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" -dependencies = [ - "bytes", - "http 1.2.0", -] - -[[package]] -name = "http-body-util" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" -dependencies = [ - "bytes", - "futures-util", - "http 1.2.0", - "http-body 1.0.1", - "pin-project-lite", -] - -[[package]] -name = "httparse" -version = "1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2d708df4e7140240a16cd6ab0ab65c972d7433ab77819ea693fde9c43811e2a" - -[[package]] -name = "httpdate" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" - -[[package]] -name = "humantime" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b112acc8b3adf4b107a8ec20977da0273a8c386765a3ec0229bd500a1443f9f" - -[[package]] -name = "hyper" -version = "0.14.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" -dependencies = [ - "bytes", - "futures-channel", - "futures-core", - "futures-util", - "h2", - "http 0.2.12", - "http-body 0.4.6", - "httparse", - "httpdate", - "itoa", - "pin-project-lite", - "socket2 0.5.10", - "tokio", - "tower-service", - "tracing", - "want", -] - -[[package]] -name = "hyper" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc2b571658e38e0c01b1fdca3bbbe93c00d3d71693ff2770043f8c29bc7d6f80" -dependencies = [ - "bytes", - "futures-channel", - "futures-util", - "http 1.2.0", - "http-body 1.0.1", - "httparse", - "itoa", - "pin-project-lite", - "smallvec", - "tokio", - "want", -] - -[[package]] -name = "hyper-proxy" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca815a891b24fdfb243fa3239c86154392b0953ee584aa1a2a1f66d20cbe75cc" -dependencies = [ - "bytes", - "futures 0.3.31", - "headers", - "http 0.2.12", - "hyper 0.14.32", - "hyper-tls", - "native-tls", - "tokio", - "tokio-native-tls", - "tower-service", -] - -[[package]] -name = "hyper-rustls" -version = "0.27.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d191583f3da1305256f22463b9bb0471acad48a4e534a5218b9963e9c1f59b2" -dependencies = [ - "futures-util", - "http 1.2.0", - "hyper 1.6.0", - "hyper-util", - "rustls 0.23.31", - "rustls-pki-types", - "tokio", - "tokio-rustls 0.26.2", - "tower-service", - "webpki-roots 0.26.8", -] - -[[package]] -name = "hyper-timeout" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" -dependencies = [ - "hyper 0.14.32", - "pin-project-lite", - "tokio", - "tokio-io-timeout", -] - -[[package]] -name = "hyper-tls" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" -dependencies = [ - "bytes", - "hyper 0.14.32", - "native-tls", - "tokio", - "tokio-native-tls", -] - -[[package]] -name = "hyper-util" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1c293b6b3d21eca78250dc7dbebd6b9210ec5530e038cbfe0661b5c47ab06e8" -dependencies = [ - "base64 0.22.1", - "bytes", - "futures-channel", - "futures-core", - "futures-util", - "http 1.2.0", - "http-body 1.0.1", - "hyper 1.6.0", - "ipnet", - "libc", - "percent-encoding 2.3.1", - "pin-project-lite", - "socket2 0.5.10", - "tokio", - "tower-service", - "tracing", -] - -[[package]] -name = "iana-time-zone" -version = "0.1.61" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" -dependencies = [ - "android_system_properties", - "core-foundation-sys", - "iana-time-zone-haiku", - "js-sys", - "wasm-bindgen", - "windows-core", -] - -[[package]] -name = "iana-time-zone-haiku" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" -dependencies = [ - "cc", -] - -[[package]] -name = "icu_collections" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" -dependencies = [ - "displaydoc", - "yoke", - "zerofrom", - "zerovec", -] - -[[package]] -name = "icu_locid" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" -dependencies = [ - "displaydoc", - "litemap", - "tinystr", - "writeable", - "zerovec", -] - -[[package]] -name = "icu_locid_transform" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_locid_transform_data", - "icu_provider", - "tinystr", - "zerovec", -] - -[[package]] -name = "icu_locid_transform_data" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" - -[[package]] -name = "icu_normalizer" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" -dependencies = [ - "displaydoc", - "icu_collections", - "icu_normalizer_data", - "icu_properties", - "icu_provider", - "smallvec", - "utf16_iter", - "utf8_iter", - "write16", - "zerovec", -] - -[[package]] -name = "icu_normalizer_data" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" - -[[package]] -name = "icu_properties" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" -dependencies = [ - "displaydoc", - "icu_collections", - "icu_locid_transform", - "icu_properties_data", - "icu_provider", - "tinystr", - "zerovec", -] - -[[package]] -name = "icu_properties_data" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" - -[[package]] -name = "icu_provider" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_provider_macros", - "stable_deref_trait", - "tinystr", - "writeable", - "yoke", - "zerofrom", - "zerovec", -] - -[[package]] -name = "icu_provider_macros" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.96", -] - -[[package]] -name = "ident_case" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" - -[[package]] -name = "idna" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f09e0f0b1fb55fdee1f17470ad800da77af5186a1a76c026b679358b7e844e" -dependencies = [ - "matches", - "unicode-bidi", - "unicode-normalization", -] - -[[package]] -name = "idna" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" -dependencies = [ - "idna_adapter", - "smallvec", - "utf8_iter", -] - -[[package]] -name = "idna_adapter" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" -dependencies = [ - "icu_normalizer", - "icu_properties", -] - -[[package]] -name = "ieee754" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9007da9cacbd3e6343da136e98b0d2df013f553d35bdec8b518f07bea768e19c" - -[[package]] -name = "im" -version = "15.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0acd33ff0285af998aaf9b57342af478078f53492322fafc47450e09397e0e9" -dependencies = [ - "bitmaps", - "rand_core 0.6.4", - "rand_xoshiro", - "rayon", - "serde", - "sized-chunks", - "typenum", - "version_check", -] - -[[package]] -name = "include_dir" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "923d117408f1e49d914f1a379a309cffe4f18c05cf4e3d12e613a15fc81bd0dd" -dependencies = [ - "include_dir_macros", -] - -[[package]] -name = "include_dir_macros" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cab85a7ed0bd5f0e76d93846e0147172bed2e2d3f859bcc33a8d9699cad1a75" -dependencies = [ - "proc-macro2", - "quote", -] - -[[package]] -name = "indexmap" -version = "1.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" -dependencies = [ - "autocfg", - "hashbrown 0.12.3", -] - -[[package]] -name = "indexmap" -version = "2.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661" -dependencies = [ - "equivalent", - "hashbrown 0.15.2", - "rayon", -] - -[[package]] -name = "indicatif" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70a646d946d06bedbbc4cac4c218acf4bbf2d87757a784857025f4d447e4e1cd" -dependencies = [ - "console 0.16.0", - "portable-atomic", - "unicode-width 0.2.0", - "unit-prefix", - "web-time", -] - -[[package]] -name = "inout" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" -dependencies = [ - "generic-array", -] - -[[package]] -name = "instant" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" -dependencies = [ - "cfg-if 1.0.0", -] - -[[package]] -name = "io-uring" -version = "0.7.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4" -dependencies = [ - "bitflags 2.9.1", - "cfg-if 1.0.0", - "libc", -] - -[[package]] -name = "ipnet" -version = "2.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" - -[[package]] -name = "iri-string" -version = "0.7.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbc5ebe9c3a1a7a5127f920a418f7585e9e758e911d0466ed004f393b0e380b2" -dependencies = [ - "memchr", - "serde", -] - -[[package]] -name = "itertools" -version = "0.10.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" -dependencies = [ - "either", -] - -[[package]] -name = "itertools" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" -dependencies = [ - "either", -] - -[[package]] -name = "itoa" -version = "1.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674" - -[[package]] -name = "jni" -version = "0.21.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a87aa2bb7d2af34197c04845522473242e1aa17c12f4935d5856491a7fb8c97" -dependencies = [ - "cesu8", - "cfg-if 1.0.0", - "combine 4.6.7", - "jni-sys", - "log", - "thiserror 1.0.69", - "walkdir", - "windows-sys 0.45.0", -] - -[[package]] -name = "jni-sys" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" - -[[package]] -name = "jobserver" -version = "0.1.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" -dependencies = [ - "libc", -] - -[[package]] -name = "js-sys" -version = "0.3.77" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" -dependencies = [ - "once_cell", - "wasm-bindgen", -] - -[[package]] -name = "json-rpc-client" -version = "3.0.0" -dependencies = [ - "borsh 1.5.7", - "clap", - "home", - "solana-client", - "solana-commitment-config", - "solana-instruction", - "solana-keypair", - "solana-message", - "solana-pubkey", - "solana-signer", - "solana-transaction", - "thiserror 1.0.69", - "yaml-rust", -] - -[[package]] -name = "json-rpc-server" -version = "3.0.0" -dependencies = [ - "agave-feature-set", - "agave-reserved-account-keys", - "agave-syscalls", - "base64 0.22.1", - "bincode", - "bs58", - "clap", - "crossbeam-channel", - "env_logger", - "jsonrpc-core", - "jsonrpc-core-client", - "jsonrpc-derive", - "jsonrpc-http-server", - "log", - "serde", - "serde_json", - "solana-account", - "solana-account-decoder", - "solana-bpf-loader-program", - "solana-clock", - "solana-commitment-config", - "solana-compute-budget", - "solana-compute-budget-interface", - "solana-hash", - "solana-message", - "solana-nonce", - "solana-perf", - "solana-program-runtime", - "solana-pubkey", - "solana-rent", - "solana-rpc-client-api", - "solana-sdk-ids", - "solana-signature", - "solana-svm", - "solana-svm-callback", - "solana-system-interface", - "solana-system-program", - "solana-sysvar", - "solana-sysvar-id", - "solana-transaction", - "solana-transaction-context", - "solana-transaction-error", - "solana-transaction-status", - "solana-validator-exit", - "solana-version", - "spl-token-2022-interface", - "tokio", - "tokio-util 0.7.16", -] - -[[package]] -name = "json5" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96b0db21af676c1ce64250b5f40f3ce2cf27e4e47cb91ed91eb6fe9350b430c1" -dependencies = [ - "pest", - "pest_derive", - "serde", -] - -[[package]] -name = "jsonrpc-client-transports" -version = "18.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2b99d4207e2a04fb4581746903c2bb7eb376f88de9c699d0f3e10feeac0cd3a" -dependencies = [ - "derive_more 0.99.18", - "futures 0.3.31", - "jsonrpc-core", - "jsonrpc-pubsub", - "log", - "serde", - "serde_json", - "url 1.7.2", -] - -[[package]] -name = "jsonrpc-core" -version = "18.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14f7f76aef2d054868398427f6c54943cf3d1caa9a7ec7d0c38d69df97a965eb" -dependencies = [ - "futures 0.3.31", - "futures-executor", - "futures-util", - "log", - "serde", - "serde_derive", - "serde_json", -] - -[[package]] -name = "jsonrpc-core-client" -version = "18.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b51da17abecbdab3e3d4f26b01c5ec075e88d3abe3ab3b05dc9aa69392764ec0" -dependencies = [ - "futures 0.3.31", - "jsonrpc-client-transports", -] - -[[package]] -name = "jsonrpc-derive" -version = "18.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b939a78fa820cdfcb7ee7484466746a7377760970f6f9c6fe19f9edcc8a38d2" -dependencies = [ - "proc-macro-crate 0.1.5", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "jsonrpc-http-server" -version = "18.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1dea6e07251d9ce6a552abfb5d7ad6bc290a4596c8dcc3d795fae2bbdc1f3ff" -dependencies = [ - "futures 0.3.31", - "hyper 0.14.32", - "jsonrpc-core", - "jsonrpc-server-utils", - "log", - "net2", - "parking_lot 0.11.2", - "unicase", -] - -[[package]] -name = "jsonrpc-pubsub" -version = "18.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "240f87695e6c6f62fb37f05c02c04953cf68d6408b8c1c89de85c7a0125b1011" -dependencies = [ - "futures 0.3.31", - "jsonrpc-core", - "lazy_static", - "log", - "parking_lot 0.11.2", - "rand 0.7.3", - "serde", -] - -[[package]] -name = "jsonrpc-server-utils" -version = "18.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa4fdea130485b572c39a460d50888beb00afb3e35de23ccd7fad8ff19f0e0d4" -dependencies = [ - "bytes", - "futures 0.3.31", - "globset", - "jsonrpc-core", - "lazy_static", - "log", - "tokio", - "tokio-stream", - "tokio-util 0.6.10", - "unicase", -] - -[[package]] -name = "kaigan" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ba15de5aeb137f0f65aa3bf82187647f1285abfe5b20c80c2c37f7007ad519a" -dependencies = [ - "borsh 0.10.4", - "serde", -] - -[[package]] -name = "keccak" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" -dependencies = [ - "cpufeatures", -] - -[[package]] -name = "kernel32-sys" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" -dependencies = [ - "winapi 0.2.8", - "winapi-build", -] - -[[package]] -name = "lazy-lru" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a35523c6dfa972e1fd19132ef647eff4360a6546c6271807e1327ca6e8797f96" -dependencies = [ - "hashbrown 0.15.2", -] - -[[package]] -name = "lazy_static" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" - -[[package]] -name = "lazycell" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" - -[[package]] -name = "libc" -version = "0.2.174" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" - -[[package]] -name = "libloading" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" -dependencies = [ - "cfg-if 1.0.0", - "winapi 0.3.9", -] - -[[package]] -name = "libm" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" - -[[package]] -name = "libredox" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" -dependencies = [ - "bitflags 2.9.1", - "libc", - "redox_syscall 0.5.8", -] - -[[package]] -name = "librocksdb-sys" -version = "0.17.1+9.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b7869a512ae9982f4d46ba482c2a304f1efd80c6412a3d4bf57bb79a619679f" -dependencies = [ - "bindgen", - "bzip2-sys", - "cc", - "libc", - "libz-sys", - "lz4-sys", -] - -[[package]] -name = "libsecp256k1" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9d220bc1feda2ac231cb78c3d26f27676b8cf82c96971f7aeef3d0cf2797c73" -dependencies = [ - "arrayref", - "base64 0.12.3", - "digest 0.9.0", - "hmac-drbg", - "libsecp256k1-core", - "libsecp256k1-gen-ecmult", - "libsecp256k1-gen-genmult", - "rand 0.7.3", - "serde", - "sha2 0.9.9", - "typenum", -] - -[[package]] -name = "libsecp256k1-core" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0f6ab710cec28cef759c5f18671a27dae2a5f952cdaaee1d8e2908cb2478a80" -dependencies = [ - "crunchy", - "digest 0.9.0", - "subtle", -] - -[[package]] -name = "libsecp256k1-gen-ecmult" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccab96b584d38fac86a83f07e659f0deafd0253dc096dab5a36d53efe653c5c3" -dependencies = [ - "libsecp256k1-core", -] - -[[package]] -name = "libsecp256k1-gen-genmult" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67abfe149395e3aa1c48a2beb32b068e2334402df8181f818d3aee2b304c4f5d" -dependencies = [ - "libsecp256k1-core", -] - -[[package]] -name = "libz-sys" -version = "1.1.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df9b68e50e6e0b26f672573834882eb57759f6db9b3be2ea3c35c91188bb4eaa" -dependencies = [ - "cc", - "pkg-config", - "vcpkg", -] - -[[package]] -name = "light-poseidon" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c9a85a9752c549ceb7578064b4ed891179d20acd85f27318573b64d2d7ee7ee" -dependencies = [ - "ark-bn254", - "ark-ff", - "num-bigint 0.4.6", - "thiserror 1.0.69", -] - -[[package]] -name = "linked-hash-map" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" - -[[package]] -name = "linux-raw-sys" -version = "0.4.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" - -[[package]] -name = "linux-raw-sys" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db9c683daf087dc577b7506e9695b3d556a9f3849903fa28186283afd6809e9" - -[[package]] -name = "litemap" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104" - -[[package]] -name = "lock_api" -version = "0.4.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" -dependencies = [ - "autocfg", - "scopeguard", -] - -[[package]] -name = "log" -version = "0.4.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" - -[[package]] -name = "lru" -version = "0.7.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999beba7b6e8345721bd280141ed958096a2e4abdf74f67ff4ce49b4b54e47a" -dependencies = [ - "hashbrown 0.12.3", -] - -[[package]] -name = "lru-slab" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" - -[[package]] -name = "lz4" -version = "1.28.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a20b523e860d03443e98350ceaac5e71c6ba89aea7d960769ec3ce37f4de5af4" -dependencies = [ - "lz4-sys", -] - -[[package]] -name = "lz4-sys" -version = "1.11.1+lz4-1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bd8c0d6c6ed0cd30b3652886bb8711dc4bb01d637a68105a3d5158039b418e6" -dependencies = [ - "cc", - "libc", -] - -[[package]] -name = "matches" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" - -[[package]] -name = "matchit" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" - -[[package]] -name = "memchr" -version = "2.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" - -[[package]] -name = "memmap2" -version = "0.5.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83faa42c0a078c393f6b29d5db232d8be22776a891f8f56e5284faee4a20b327" -dependencies = [ - "libc", -] - -[[package]] -name = "memmap2" -version = "0.9.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "483758ad303d734cec05e5c12b41d7e93e6a6390c5e9dae6bdeb7c1259012d28" -dependencies = [ - "libc", -] - -[[package]] -name = "memoffset" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a" -dependencies = [ - "autocfg", -] - -[[package]] -name = "merlin" -version = "3.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58c38e2799fc0978b65dfff8023ec7843e2330bb462f19198840b34b6582397d" -dependencies = [ - "byteorder", - "keccak", - "rand_core 0.6.4", - "zeroize", -] - -[[package]] -name = "mime" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" - -[[package]] -name = "min-max-heap" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2687e6cf9c00f48e9284cf9fd15f2ef341d03cc7743abf9df4c5f07fdee50b18" - -[[package]] -name = "minimal-lexical" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" - -[[package]] -name = "miniz_oxide" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8402cab7aefae129c6977bb0ff1b8fd9a04eb5b51efc50a70bea51cda0c7924" -dependencies = [ - "adler2", -] - -[[package]] -name = "mio" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" -dependencies = [ - "libc", - "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.52.0", -] - -[[package]] -name = "mockall" -version = "0.11.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c84490118f2ee2d74570d114f3d0493cbf02790df303d2707606c3e14e07c96" -dependencies = [ - "cfg-if 1.0.0", - "downcast", - "fragile", - "lazy_static", - "mockall_derive", - "predicates", - "predicates-tree", -] - -[[package]] -name = "mockall_derive" -version = "0.11.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22ce75669015c4f47b289fd4d4f56e894e4c96003ffdf3ac51313126f94c6cbb" -dependencies = [ - "cfg-if 1.0.0", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "modular-bitfield" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a53d79ba8304ac1c4f9eb3b9d281f21f7be9d4626f72ce7df4ad8fbde4f38a74" -dependencies = [ - "modular-bitfield-impl", - "static_assertions", -] - -[[package]] -name = "modular-bitfield-impl" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a7d5f7076603ebc68de2dc6a650ec331a062a13abaa346975be747bbfa4b789" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "multimap" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" - -[[package]] -name = "native-tls" -version = "0.2.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dab59f8e050d5df8e4dd87d9206fb6f65a483e20ac9fda365ade4fab353196c" -dependencies = [ - "libc", - "log", - "openssl", - "openssl-probe", - "openssl-sys", - "schannel", - "security-framework 2.11.1", - "security-framework-sys", - "tempfile", -] - -[[package]] -name = "net2" -version = "0.2.39" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b13b648036a2339d06de780866fbdfda0dde886de7b3af2ddeba8b14f4ee34ac" -dependencies = [ - "cfg-if 0.1.10", - "libc", - "winapi 0.3.9", -] - -[[package]] -name = "nix" -version = "0.30.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74523f3a35e05aba87a1d978330aef40f67b0304ac79c1c00b294c9830543db6" -dependencies = [ - "bitflags 2.9.1", - "cfg-if 1.0.0", - "cfg_aliases", - "libc", - "memoffset", -] - -[[package]] -name = "no-std-compat" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b93853da6d84c2e3c7d730d6473e8817692dd89be387eb01b94d7f108ecb5b8c" - -[[package]] -name = "nom" -version = "7.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" -dependencies = [ - "memchr", - "minimal-lexical", -] - -[[package]] -name = "nonzero_ext" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38bf9645c8b145698bb0b18a4637dcacbc421ea49bef2317e4fd8065a387cf21" - -[[package]] -name = "normalize-line-endings" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" - -[[package]] -name = "num" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8536030f9fea7127f841b45bb6243b27255787fb4eb83958aa1ef9d2fdc0c36" -dependencies = [ - "num-bigint 0.2.6", - "num-complex", - "num-integer", - "num-iter", - "num-rational", - "num-traits", -] - -[[package]] -name = "num-bigint" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" -dependencies = [ - "autocfg", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-bigint" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" -dependencies = [ - "num-integer", - "num-traits", -] - -[[package]] -name = "num-complex" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6b19411a9719e753aff12e5187b74d60d3dc449ec3f4dc21e3989c3f554bc95" -dependencies = [ - "autocfg", - "num-traits", -] - -[[package]] -name = "num-conv" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" - -[[package]] -name = "num-derive" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.96", -] - -[[package]] -name = "num-integer" -version = "0.1.46" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" -dependencies = [ - "num-traits", -] - -[[package]] -name = "num-iter" -version = "0.1.45" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" -dependencies = [ - "autocfg", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-rational" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" -dependencies = [ - "autocfg", - "num-bigint 0.2.6", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-traits" -version = "0.2.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" -dependencies = [ - "autocfg", -] - -[[package]] -name = "num_cpus" -version = "1.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" -dependencies = [ - "hermit-abi 0.5.1", - "libc", -] - -[[package]] -name = "num_enum" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a973b4e44ce6cad84ce69d797acf9a044532e4184c4f267913d1b546a0727b7a" -dependencies = [ - "num_enum_derive", - "rustversion", -] - -[[package]] -name = "num_enum_derive" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77e878c846a8abae00dd069496dbe8751b16ac1c3d6bd2a7283a938e8228f90d" -dependencies = [ - "proc-macro-crate 3.2.0", - "proc-macro2", - "quote", - "syn 2.0.96", -] - -[[package]] -name = "object" -version = "0.36.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" -dependencies = [ - "crc32fast", - "hashbrown 0.15.2", - "indexmap 2.10.0", - "memchr", -] - -[[package]] -name = "oid-registry" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bedf36ffb6ba96c2eb7144ef6270557b52e54b20c0a8e1eb2ff99a6c6959bff" -dependencies = [ - "asn1-rs", -] - -[[package]] -name = "once_cell" -version = "1.20.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" - -[[package]] -name = "opaque-debug" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" - -[[package]] -name = "openssl" -version = "0.10.73" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8505734d46c8ab1e19a1dce3aef597ad87dcb4c37e7188231769bd6bd51cebf8" -dependencies = [ - "bitflags 2.9.1", - "cfg-if 1.0.0", - "foreign-types", - "libc", - "once_cell", - "openssl-macros", - "openssl-sys", -] - -[[package]] -name = "openssl-macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.96", -] - -[[package]] -name = "openssl-probe" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" - -[[package]] -name = "openssl-src" -version = "300.4.1+3.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faa4eac4138c62414b5622d1b31c5c304f34b406b013c079c2bbc652fdd6678c" -dependencies = [ - "cc", -] - -[[package]] -name = "openssl-sys" -version = "0.9.109" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90096e2e47630d78b7d1c20952dc621f957103f8bc2c8359ec81290d75238571" -dependencies = [ - "cc", - "libc", - "openssl-src", - "pkg-config", - "vcpkg", -] - -[[package]] -name = "opentelemetry" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6105e89802af13fdf48c49d7646d3b533a70e536d818aae7e78ba0433d01acb8" -dependencies = [ - "async-trait", - "crossbeam-channel", - "futures-channel", - "futures-executor", - "futures-util", - "js-sys", - "lazy_static", - "percent-encoding 2.3.1", - "pin-project", - "rand 0.8.5", - "thiserror 1.0.69", -] - -[[package]] -name = "parking" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" - -[[package]] -name = "parking_lot" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" -dependencies = [ - "instant", - "lock_api", - "parking_lot_core 0.8.6", -] - -[[package]] -name = "parking_lot" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" -dependencies = [ - "lock_api", - "parking_lot_core 0.9.10", -] - -[[package]] -name = "parking_lot_core" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" -dependencies = [ - "cfg-if 1.0.0", - "instant", - "libc", - "redox_syscall 0.2.16", - "smallvec", - "winapi 0.3.9", -] - -[[package]] -name = "parking_lot_core" -version = "0.9.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "redox_syscall 0.5.8", - "smallvec", - "windows-targets 0.52.6", -] - -[[package]] -name = "paste" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" - -[[package]] -name = "pbkdf2" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "216eaa586a190f0a738f2f918511eecfa90f13295abec0e457cdebcceda80cbd" -dependencies = [ - "crypto-mac", -] - -[[package]] -name = "pbkdf2" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" -dependencies = [ - "digest 0.10.7", -] - -[[package]] -name = "pem" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8" -dependencies = [ - "base64 0.13.1", -] - -[[package]] -name = "percent-encoding" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" - -[[package]] -name = "percent-encoding" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" - -[[package]] -name = "percentage" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fd23b938276f14057220b707937bcb42fa76dda7560e57a2da30cb52d557937" -dependencies = [ - "num", -] - -[[package]] -name = "pest" -version = "2.7.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc" -dependencies = [ - "memchr", - "thiserror 2.0.12", - "ucd-trie", -] - -[[package]] -name = "pest_derive" -version = "2.7.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "816518421cfc6887a0d62bf441b6ffb4536fcc926395a69e1a85852d4363f57e" -dependencies = [ - "pest", - "pest_generator", -] - -[[package]] -name = "pest_generator" -version = "2.7.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d1396fd3a870fc7838768d171b4616d5c91f6cc25e377b673d714567d99377b" -dependencies = [ - "pest", - "pest_meta", - "proc-macro2", - "quote", - "syn 2.0.96", -] - -[[package]] -name = "pest_meta" -version = "2.7.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1e58089ea25d717bfd31fb534e4f3afcc2cc569c70de3e239778991ea3b7dea" -dependencies = [ - "once_cell", - "pest", - "sha2 0.10.9", -] - -[[package]] -name = "petgraph" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" -dependencies = [ - "fixedbitset", - "indexmap 2.10.0", -] - -[[package]] -name = "pin-project" -version = "1.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e2ec53ad785f4d35dac0adea7f7dc6f1bb277ad84a680c7afefeae05d1f5916" -dependencies = [ - "pin-project-internal", -] - -[[package]] -name = "pin-project-internal" -version = "1.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d56a66c0c55993aa927429d0f8a0abfd74f084e4d9c192cffed01e418d83eefb" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.96", -] - -[[package]] -name = "pin-project-lite" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" - -[[package]] -name = "pin-utils" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" - -[[package]] -name = "pkg-config" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" - -[[package]] -name = "polyval" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25" -dependencies = [ - "cfg-if 1.0.0", - "cpufeatures", - "opaque-debug", - "universal-hash", -] - -[[package]] -name = "portable-atomic" -version = "1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "280dc24453071f1b63954171985a0b0d30058d287960968b9b2aca264c8d4ee6" - -[[package]] -name = "powerfmt" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" - -[[package]] -name = "ppv-lite86" -version = "0.2.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" -dependencies = [ - "zerocopy 0.7.35", -] - -[[package]] -name = "predicates" -version = "2.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59230a63c37f3e18569bdb90e4a89cbf5bf8b06fea0b84e65ea10cc4df47addd" -dependencies = [ - "difflib", - "float-cmp", - "itertools 0.10.5", - "normalize-line-endings", - "predicates-core", - "regex", -] - -[[package]] -name = "predicates-core" -version = "1.0.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "727e462b119fe9c93fd0eb1429a5f7647394014cf3c04ab2c0350eeb09095ffa" - -[[package]] -name = "predicates-tree" -version = "1.0.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72dd2d6d381dfb73a193c7fca536518d7caee39fc8503f74e7dc0be0531b425c" -dependencies = [ - "predicates-core", - "termtree", -] - -[[package]] -name = "pretty-hex" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6fa0831dd7cc608c38a5e323422a0077678fa5744aa2be4ad91c4ece8eec8d5" - -[[package]] -name = "prettyplease" -version = "0.1.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" -dependencies = [ - "proc-macro2", - "syn 1.0.109", -] - -[[package]] -name = "prio-graph" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f28921629370a46cf564f6ba1828bd8d1c97f7fad4ee9d1c6438f92feed6b8d" -dependencies = [ - "ahash 0.8.11", -] - -[[package]] -name = "proc-macro-crate" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" -dependencies = [ - "toml", -] - -[[package]] -name = "proc-macro-crate" -version = "3.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" -dependencies = [ - "toml_edit", -] - -[[package]] -name = "proc-macro-error-attr2" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" -dependencies = [ - "proc-macro2", - "quote", -] - -[[package]] -name = "proc-macro-error2" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" -dependencies = [ - "proc-macro-error-attr2", - "proc-macro2", - "quote", -] - -[[package]] -name = "proc-macro2" -version = "1.0.95" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "prost" -version = "0.11.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" -dependencies = [ - "bytes", - "prost-derive", -] - -[[package]] -name = "prost-build" -version = "0.11.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" -dependencies = [ - "bytes", - "heck", - "itertools 0.10.5", - "lazy_static", - "log", - "multimap", - "petgraph", - "prettyplease", - "prost", - "prost-types", - "regex", - "syn 1.0.109", - "tempfile", - "which", -] - -[[package]] -name = "prost-derive" -version = "0.11.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" -dependencies = [ - "anyhow", - "itertools 0.10.5", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "prost-types" -version = "0.11.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "213622a1460818959ac1181aaeb2dc9c7f63df720db7d788b3e24eacd1983e13" -dependencies = [ - "prost", -] - -[[package]] -name = "protobuf-src" -version = "1.1.0+21.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7ac8852baeb3cc6fb83b93646fb93c0ffe5d14bf138c945ceb4b9948ee0e3c1" -dependencies = [ - "autotools", -] - -[[package]] -name = "qstring" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d464fae65fff2680baf48019211ce37aaec0c78e9264c84a3e484717f965104e" -dependencies = [ - "percent-encoding 2.3.1", -] - -[[package]] -name = "qualifier_attr" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.96", -] - -[[package]] -name = "quanta" -version = "0.12.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bd1fe6824cea6538803de3ff1bc0cf3949024db3d43c9643024bfb33a807c0e" -dependencies = [ - "crossbeam-utils", - "libc", - "once_cell", - "raw-cpuid", - "wasi 0.11.0+wasi-snapshot-preview1", - "web-sys", - "winapi 0.3.9", -] - -[[package]] -name = "quinn" -version = "0.11.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "626214629cda6781b6dc1d316ba307189c85ba657213ce642d9c77670f8202c8" -dependencies = [ - "bytes", - "cfg_aliases", - "pin-project-lite", - "quinn-proto", - "quinn-udp", - "rustc-hash 2.1.0", - "rustls 0.23.31", - "socket2 0.5.10", - "thiserror 2.0.12", - "tokio", - "tracing", - "web-time", -] - -[[package]] -name = "quinn-proto" -version = "0.11.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49df843a9161c85bb8aae55f101bc0bac8bcafd637a620d9122fd7e0b2f7422e" -dependencies = [ - "bytes", - "fastbloom", - "getrandom 0.3.1", - "lru-slab", - "rand 0.9.0", - "ring", - "rustc-hash 2.1.0", - "rustls 0.23.31", - "rustls-pki-types", - "rustls-platform-verifier", - "slab", - "thiserror 2.0.12", - "tinyvec", - "tracing", - "web-time", -] - -[[package]] -name = "quinn-udp" -version = "0.5.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c40286217b4ba3a71d644d752e6a0b71f13f1b6a2c5311acfcbe0c2418ed904" -dependencies = [ - "cfg_aliases", - "libc", - "once_cell", - "socket2 0.5.10", - "tracing", - "windows-sys 0.59.0", -] - -[[package]] -name = "quote" -version = "1.0.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "rand" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" -dependencies = [ - "getrandom 0.1.16", - "libc", - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc", -] - -[[package]] -name = "rand" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" -dependencies = [ - "libc", - "rand_chacha 0.3.1", - "rand_core 0.6.4", -] - -[[package]] -name = "rand" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" -dependencies = [ - "rand_chacha 0.9.0", - "rand_core 0.9.3", - "zerocopy 0.8.23", -] - -[[package]] -name = "rand_chacha" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -dependencies = [ - "ppv-lite86", - "rand_core 0.5.1", -] - -[[package]] -name = "rand_chacha" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" -dependencies = [ - "ppv-lite86", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_chacha" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" -dependencies = [ - "ppv-lite86", - "rand_core 0.9.3", -] - -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -dependencies = [ - "getrandom 0.1.16", -] - -[[package]] -name = "rand_core" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" -dependencies = [ - "getrandom 0.2.15", -] - -[[package]] -name = "rand_core" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" -dependencies = [ - "getrandom 0.3.1", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -dependencies = [ - "rand_core 0.5.1", -] - -[[package]] -name = "rand_xoshiro" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f97cdb2a36ed4183de61b2f824cc45c9f1037f28afe0a322e9fff4c108b5aaa" -dependencies = [ - "rand_core 0.6.4", -] - -[[package]] -name = "raw-cpuid" -version = "11.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6928fa44c097620b706542d428957635951bade7143269085389d42c8a4927e" -dependencies = [ - "bitflags 2.9.1", -] - -[[package]] -name = "rayon" -version = "1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" -dependencies = [ - "either", - "rayon-core", -] - -[[package]] -name = "rayon-core" -version = "1.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" -dependencies = [ - "crossbeam-deque", - "crossbeam-utils", -] - -[[package]] -name = "redox_syscall" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" -dependencies = [ - "bitflags 1.2.1", -] - -[[package]] -name = "redox_syscall" -version = "0.5.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834" -dependencies = [ - "bitflags 2.9.1", -] - -[[package]] -name = "redox_users" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" -dependencies = [ - "getrandom 0.2.15", - "libredox", - "thiserror 1.0.69", -] - -[[package]] -name = "reed-solomon-erasure" -version = "6.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7263373d500d4d4f505d43a2a662d475a894aa94503a1ee28e9188b5f3960d4f" -dependencies = [ - "cc", - "libc", - "libm", - "lru", - "parking_lot 0.11.2", - "smallvec", - "spin", -] - -[[package]] -name = "regex" -version = "1.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" -dependencies = [ - "aho-corasick", - "memchr", - "regex-automata", - "regex-syntax", -] - -[[package]] -name = "regex-automata" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" -dependencies = [ - "aho-corasick", - "memchr", - "regex-syntax", -] - -[[package]] -name = "regex-syntax" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" - -[[package]] -name = "reqwest" -version = "0.11.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" -dependencies = [ - "base64 0.21.7", - "bytes", - "encoding_rs", - "futures-core", - "futures-util", - "h2", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.32", - "hyper-tls", - "ipnet", - "js-sys", - "log", - "mime", - "native-tls", - "once_cell", - "percent-encoding 2.3.1", - "pin-project-lite", - "rustls-pemfile", - "serde", - "serde_json", - "serde_urlencoded", - "sync_wrapper 0.1.2", - "system-configuration", - "tokio", - "tokio-native-tls", - "tower-service", - "url 2.5.4", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "winreg", -] - -[[package]] -name = "reqwest" -version = "0.12.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbc931937e6ca3a06e3b6c0aa7841849b160a90351d6ab467a8b9b9959767531" -dependencies = [ - "async-compression", - "base64 0.22.1", - "bytes", - "futures-channel", - "futures-core", - "futures-util", - "http 1.2.0", - "http-body 1.0.1", - "http-body-util", - "hyper 1.6.0", - "hyper-rustls", - "hyper-util", - "js-sys", - "log", - "percent-encoding 2.3.1", - "pin-project-lite", - "quinn", - "rustls 0.23.31", - "rustls-pki-types", - "serde", - "serde_json", - "serde_urlencoded", - "sync_wrapper 1.0.2", - "tokio", - "tokio-rustls 0.26.2", - "tokio-util 0.7.16", - "tower 0.5.2", - "tower-http", - "tower-service", - "url 2.5.4", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "webpki-roots 1.0.0", -] - -[[package]] -name = "reqwest-middleware" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57f17d28a6e6acfe1733fe24bcd30774d13bffa4b8a22535b4c8c98423088d4e" -dependencies = [ - "anyhow", - "async-trait", - "http 1.2.0", - "reqwest 0.12.22", - "serde", - "thiserror 1.0.69", - "tower-service", -] - -[[package]] -name = "ring" -version = "0.17.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" -dependencies = [ - "cc", - "cfg-if 1.0.0", - "getrandom 0.2.15", - "libc", - "untrusted", - "windows-sys 0.52.0", -] - -[[package]] -name = "rocksdb" -version = "0.23.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26ec73b20525cb235bad420f911473b69f9fe27cc856c5461bccd7e4af037f43" -dependencies = [ - "libc", - "librocksdb-sys", -] - -[[package]] -name = "rolling-file" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8395b4f860856b740f20a296ea2cd4d823e81a2658cf05ef61be22916026a906" -dependencies = [ - "chrono", -] - -[[package]] -name = "rpassword" -version = "7.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66d4c8b64f049c6721ec8ccec37ddfc3d641c4a7fca57e8f2a89de509c73df39" -dependencies = [ - "libc", - "rtoolbox", - "windows-sys 0.59.0", -] - -[[package]] -name = "rtoolbox" -version = "0.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c247d24e63230cdb56463ae328478bd5eac8b8faa8c69461a77e8e323afac90e" -dependencies = [ - "libc", - "windows-sys 0.48.0", -] - -[[package]] -name = "rustc-demangle" -version = "0.1.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" - -[[package]] -name = "rustc-hash" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" - -[[package]] -name = "rustc-hash" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7fb8039b3032c191086b10f11f319a6e99e1e82889c5cc6046f515c9db1d497" - -[[package]] -name = "rustc_version" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" -dependencies = [ - "semver", -] - -[[package]] -name = "rusticata-macros" -version = "4.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632" -dependencies = [ - "nom", -] - -[[package]] -name = "rustix" -version = "0.38.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" -dependencies = [ - "bitflags 2.9.1", - "errno", - "libc", - "linux-raw-sys 0.4.15", - "windows-sys 0.59.0", -] - -[[package]] -name = "rustix" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7178faa4b75a30e269c71e61c353ce2748cf3d76f0c44c393f4e60abf49b825" -dependencies = [ - "bitflags 2.9.1", - "errno", - "libc", - "linux-raw-sys 0.9.2", - "windows-sys 0.59.0", -] - -[[package]] -name = "rustls" -version = "0.21.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" -dependencies = [ - "log", - "ring", - "rustls-webpki 0.101.7", - "sct", -] - -[[package]] -name = "rustls" -version = "0.23.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0ebcbd2f03de0fc1122ad9bb24b127a5a6cd51d72604a3f3c50ac459762b6cc" -dependencies = [ - "once_cell", - "ring", - "rustls-pki-types", - "rustls-webpki 0.103.4", - "subtle", - "zeroize", -] - -[[package]] -name = "rustls-native-certs" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3" -dependencies = [ - "openssl-probe", - "rustls-pki-types", - "schannel", - "security-framework 3.2.0", -] - -[[package]] -name = "rustls-pemfile" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" -dependencies = [ - "base64 0.21.7", -] - -[[package]] -name = "rustls-pki-types" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" -dependencies = [ - "web-time", - "zeroize", -] - -[[package]] -name = "rustls-platform-verifier" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a5467026f437b4cb2a533865eaa73eb840019a0916f4b9ec563c6e617e086c9" -dependencies = [ - "core-foundation 0.10.0", - "core-foundation-sys", - "jni", - "log", - "once_cell", - "rustls 0.23.31", - "rustls-native-certs", - "rustls-platform-verifier-android", - "rustls-webpki 0.103.4", - "security-framework 3.2.0", - "security-framework-sys", - "webpki-root-certs", - "windows-sys 0.59.0", -] - -[[package]] -name = "rustls-platform-verifier-android" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" - -[[package]] -name = "rustls-webpki" -version = "0.101.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "rustls-webpki" -version = "0.103.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a17884ae0c1b773f1ccd2bd4a8c72f16da897310a98b0e84bf349ad5ead92fc" -dependencies = [ - "ring", - "rustls-pki-types", - "untrusted", -] - -[[package]] -name = "rustversion" -version = "1.0.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4" - -[[package]] -name = "ryu" -version = "1.0.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ea1a2d0a644769cc99faa24c3ad26b379b786fe7c36fd3c546254801650e6dd" - -[[package]] -name = "safe_arch" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96b02de82ddbe1b636e6170c21be622223aea188ef2e139be0a5b219ec215323" -dependencies = [ - "bytemuck", -] - -[[package]] -name = "same-file" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "schannel" -version = "0.1.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" -dependencies = [ - "windows-sys 0.59.0", -] - -[[package]] -name = "scopeguard" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" - -[[package]] -name = "sct" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "security-framework" -version = "2.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" -dependencies = [ - "bitflags 2.9.1", - "core-foundation 0.9.4", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework" -version = "3.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" -dependencies = [ - "bitflags 2.9.1", - "core-foundation 0.10.0", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework-sys" -version = "2.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "semver" -version = "1.0.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" - -[[package]] -name = "seqlock" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5c67b6f14ecc5b86c66fa63d76b5092352678545a8a3cdae80aef5128371910" -dependencies = [ - "parking_lot 0.12.3", -] - -[[package]] -name = "serde" -version = "1.0.219" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde-big-array" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11fc7cc2c76d73e0f27ee52abbd64eec84d46f370c88371120433196934e4b7f" -dependencies = [ - "serde", -] - -[[package]] -name = "serde_bytes" -version = "0.11.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8437fd221bde2d4ca316d61b90e337e9e702b3820b87d63caa9ba6c02bd06d96" -dependencies = [ - "serde", -] - -[[package]] -name = "serde_derive" -version = "1.0.219" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.96", -] - -[[package]] -name = "serde_json" -version = "1.0.142" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "030fedb782600dcbd6f02d479bf0d817ac3bb40d644745b769d6a96bc3afc5a7" -dependencies = [ - "itoa", - "memchr", - "ryu", - "serde", -] - -[[package]] -name = "serde_urlencoded" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" -dependencies = [ - "form_urlencoded", - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "serde_with" -version = "3.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2c45cd61fefa9db6f254525d46e392b852e0e61d9a1fd36e5bd183450a556d5" -dependencies = [ - "serde", - "serde_derive", - "serde_with_macros", -] - -[[package]] -name = "serde_with_macros" -version = "3.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de90945e6565ce0d9a25098082ed4ee4002e047cb59892c318d66821e14bb30f" -dependencies = [ - "darling", - "proc-macro2", - "quote", - "syn 2.0.96", -] - -[[package]] -name = "serde_yaml" -version = "0.9.34+deprecated" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" -dependencies = [ - "indexmap 2.10.0", - "itoa", - "ryu", - "serde", - "unsafe-libyaml", -] - -[[package]] -name = "sha-1" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if 1.0.0", - "cpufeatures", - "digest 0.9.0", - "opaque-debug", -] - -[[package]] -name = "sha1" -version = "0.10.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" -dependencies = [ - "cfg-if 1.0.0", - "cpufeatures", - "digest 0.10.7", -] - -[[package]] -name = "sha2" -version = "0.9.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if 1.0.0", - "cpufeatures", - "digest 0.9.0", - "opaque-debug", -] - -[[package]] -name = "sha2" -version = "0.10.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" -dependencies = [ - "cfg-if 1.0.0", - "cpufeatures", - "digest 0.10.7", -] - -[[package]] -name = "sha3" -version = "0.10.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" -dependencies = [ - "digest 0.10.7", - "keccak", -] - -[[package]] -name = "sharded-slab" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" -dependencies = [ - "lazy_static", -] - -[[package]] -name = "shell-words" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" - -[[package]] -name = "shlex" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" - -[[package]] -name = "signal-hook" -version = "0.3.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d881a16cf4426aa584979d30bd82cb33429027e42122b169753d6ef1085ed6e2" -dependencies = [ - "libc", - "signal-hook-registry", -] - -[[package]] -name = "signal-hook-registry" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" -dependencies = [ - "libc", -] - -[[package]] -name = "signature" -version = "1.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" - -[[package]] -name = "simpl" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a30f10c911c0355f80f1c2faa8096efc4a58cdf8590b954d5b395efa071c711" - -[[package]] -name = "siphasher" -version = "0.3.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" - -[[package]] -name = "siphasher" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" - -[[package]] -name = "sized-chunks" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16d69225bde7a69b235da73377861095455d298f2b970996eec25ddbb42b3d1e" -dependencies = [ - "bitmaps", - "typenum", -] - -[[package]] -name = "slab" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04dc19736151f35336d325007ac991178d504a119863a2fcb3758cdb5e52c50d" - -[[package]] -name = "smallvec" -version = "1.15.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" - -[[package]] -name = "smpl_jwt" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95b6ff8c21c74ce7744643a7cddbb02579a44f1f77e4316bff1ddb741aca8ac9" -dependencies = [ - "base64 0.13.1", - "log", - "openssl", - "serde", - "serde_derive", - "serde_json", - "simpl", - "time", -] - -[[package]] -name = "socket2" -version = "0.5.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" -dependencies = [ - "libc", - "windows-sys 0.52.0", -] - -[[package]] -name = "socket2" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233504af464074f9d066d7b5416c5f9b894a5862a6506e306f7b816cdd6f1807" -dependencies = [ - "libc", - "windows-sys 0.59.0", -] - -[[package]] -name = "soketto" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" -dependencies = [ - "base64 0.13.1", - "bytes", - "futures 0.3.31", - "httparse", - "log", - "rand 0.8.5", - "sha-1", -] - -[[package]] -name = "solana-account" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f949fe4edaeaea78c844023bfc1c898e0b1f5a100f8a8d2d0f85d0a7b090258" -dependencies = [ - "bincode", - "serde", - "serde_bytes", - "serde_derive", - "solana-account-info", - "solana-clock", - "solana-instruction", - "solana-pubkey", - "solana-sdk-ids", - "solana-sysvar", -] - -[[package]] -name = "solana-account-decoder" -version = "3.0.0" -dependencies = [ - "Inflector", - "base64 0.22.1", - "bincode", - "bs58", - "bv", - "serde", - "serde_derive", - "serde_json", - "solana-account", - "solana-account-decoder-client-types", - "solana-address-lookup-table-interface", - "solana-clock", - "solana-config-program-client", - "solana-epoch-schedule", - "solana-fee-calculator", - "solana-instruction", - "solana-loader-v3-interface 5.0.0", - "solana-nonce", - "solana-program-option", - "solana-program-pack", - "solana-pubkey", - "solana-rent", - "solana-sdk-ids", - "solana-slot-hashes", - "solana-slot-history", - "solana-stake-interface", - "solana-sysvar", - "solana-vote-interface", - "spl-generic-token", - "spl-token-2022-interface", - "spl-token-group-interface", - "spl-token-interface", - "spl-token-metadata-interface", - "thiserror 2.0.12", - "zstd", -] - -[[package]] -name = "solana-account-decoder-client-types" -version = "3.0.0" -dependencies = [ - "base64 0.22.1", - "bs58", - "serde", - "serde_derive", - "serde_json", - "solana-account", - "solana-pubkey", - "zstd", -] - -[[package]] -name = "solana-account-info" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8f5152a288ef1912300fc6efa6c2d1f9bb55d9398eb6c72326360b8063987da" -dependencies = [ - "bincode", - "serde", - "solana-program-error", - "solana-program-memory", - "solana-pubkey", -] - -[[package]] -name = "solana-accounts-db" -version = "3.0.0" -dependencies = [ - "agave-io-uring", - "ahash 0.8.11", - "bincode", - "blake3", - "bv", - "bytemuck", - "bytemuck_derive", - "bzip2", - "crossbeam-channel", - "dashmap", - "indexmap 2.10.0", - "io-uring", - "itertools 0.12.1", - "libc", - "log", - "lz4", - "memmap2 0.9.7", - "modular-bitfield", - "num_cpus", - "num_enum", - "rand 0.8.5", - "rayon", - "seqlock", - "serde", - "serde_derive", - "slab", - "smallvec", - "solana-account", - "solana-address-lookup-table-interface", - "solana-bucket-map", - "solana-clock", - "solana-epoch-schedule", - "solana-fee-calculator", - "solana-genesis-config", - "solana-hash", - "solana-lattice-hash", - "solana-measure", - "solana-message", - "solana-metrics", - "solana-nohash-hasher", - "solana-pubkey", - "solana-rayon-threadlimit", - "solana-rent-collector", - "solana-reward-info", - "solana-sha256-hasher", - "solana-slot-hashes", - "solana-svm-transaction", - "solana-system-interface", - "solana-sysvar", - "solana-time-utils", - "solana-transaction", - "solana-transaction-context", - "solana-transaction-error", - "spl-generic-token", - "static_assertions", - "tar", - "tempfile", - "thiserror 2.0.12", -] - -[[package]] -name = "solana-address-lookup-table-interface" -version = "2.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1673f67efe870b64a65cb39e6194be5b26527691ce5922909939961a6e6b395" -dependencies = [ - "bincode", - "bytemuck", - "serde", - "serde_derive", - "solana-clock", - "solana-instruction", - "solana-pubkey", - "solana-sdk-ids", - "solana-slot-hashes", -] - -[[package]] -name = "solana-atomic-u64" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52e52720efe60465b052b9e7445a01c17550666beec855cce66f44766697bc2" -dependencies = [ - "parking_lot 0.12.3", -] - -[[package]] -name = "solana-banks-client" -version = "3.0.0" -dependencies = [ - "borsh 1.5.7", - "futures 0.3.31", - "solana-account", - "solana-banks-interface", - "solana-clock", - "solana-commitment-config", - "solana-hash", - "solana-message", - "solana-program-pack", - "solana-pubkey", - "solana-rent", - "solana-signature", - "solana-sysvar", - "solana-transaction", - "solana-transaction-context", - "solana-transaction-error", - "tarpc", - "thiserror 2.0.12", - "tokio", - "tokio-serde", -] - -[[package]] -name = "solana-banks-interface" -version = "3.0.0" -dependencies = [ - "serde", - "serde_derive", - "solana-account", - "solana-clock", - "solana-commitment-config", - "solana-hash", - "solana-message", - "solana-pubkey", - "solana-signature", - "solana-transaction", - "solana-transaction-context", - "solana-transaction-error", - "tarpc", -] - -[[package]] -name = "solana-banks-server" -version = "3.0.0" -dependencies = [ - "agave-feature-set", - "bincode", - "crossbeam-channel", - "futures 0.3.31", - "solana-account", - "solana-banks-interface", - "solana-client", - "solana-clock", - "solana-commitment-config", - "solana-hash", - "solana-message", - "solana-pubkey", - "solana-runtime", - "solana-runtime-transaction", - "solana-send-transaction-service", - "solana-signature", - "solana-svm", - "solana-transaction", - "solana-transaction-error", - "tarpc", - "tokio", - "tokio-serde", -] - -[[package]] -name = "solana-big-mod-exp" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75db7f2bbac3e62cfd139065d15bcda9e2428883ba61fc8d27ccb251081e7567" -dependencies = [ - "num-bigint 0.4.6", - "num-traits", - "solana-define-syscall", -] - -[[package]] -name = "solana-bincode" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19a3787b8cf9c9fe3dd360800e8b70982b9e5a8af9e11c354b6665dd4a003adc" -dependencies = [ - "bincode", - "serde", - "solana-instruction", -] - -[[package]] -name = "solana-blake3-hasher" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1a0801e25a1b31a14494fc80882a036be0ffd290efc4c2d640bfcca120a4672" -dependencies = [ - "blake3", - "solana-define-syscall", - "solana-hash", - "solana-sanitize", -] - -[[package]] -name = "solana-bloom" -version = "3.0.0" -dependencies = [ - "bv", - "fnv", - "rand 0.8.5", - "serde", - "serde_derive", - "solana-sanitize", - "solana-time-utils", -] - -[[package]] -name = "solana-bn254" -version = "2.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4420f125118732833f36facf96a27e7b78314b2d642ba07fa9ffdacd8d79e243" -dependencies = [ - "ark-bn254", - "ark-ec", - "ark-ff", - "ark-serialize", - "bytemuck", - "solana-define-syscall", - "thiserror 2.0.12", -] - -[[package]] -name = "solana-borsh" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "718333bcd0a1a7aed6655aa66bef8d7fb047944922b2d3a18f49cbc13e73d004" -dependencies = [ - "borsh 0.10.4", - "borsh 1.5.7", -] - -[[package]] -name = "solana-bpf-loader-program" -version = "3.0.0" -dependencies = [ - "agave-syscalls", - "bincode", - "qualifier_attr", - "solana-account", - "solana-bincode", - "solana-clock", - "solana-instruction", - "solana-loader-v3-interface 5.0.0", - "solana-loader-v4-interface", - "solana-log-collector", - "solana-measure", - "solana-packet", - "solana-program-entrypoint", - "solana-program-runtime", - "solana-pubkey", - "solana-sbpf", - "solana-sdk-ids", - "solana-svm-feature-set", - "solana-system-interface", - "solana-transaction-context", - "solana-type-overrides", -] - -[[package]] -name = "solana-bucket-map" -version = "3.0.0" -dependencies = [ - "bv", - "bytemuck", - "bytemuck_derive", - "memmap2 0.9.7", - "modular-bitfield", - "num_enum", - "rand 0.8.5", - "solana-clock", - "solana-measure", - "solana-pubkey", - "tempfile", -] - -[[package]] -name = "solana-builtins" -version = "3.0.0" -dependencies = [ - "agave-feature-set", - "solana-bpf-loader-program", - "solana-compute-budget-program", - "solana-hash", - "solana-loader-v4-program", - "solana-program-runtime", - "solana-pubkey", - "solana-sdk-ids", - "solana-stake-program", - "solana-system-program", - "solana-vote-program", - "solana-zk-elgamal-proof-program", - "solana-zk-token-proof-program", -] - -[[package]] -name = "solana-builtins-default-costs" -version = "3.0.0" -dependencies = [ - "agave-feature-set", - "ahash 0.8.11", - "log", - "solana-bpf-loader-program", - "solana-compute-budget-program", - "solana-loader-v4-program", - "solana-pubkey", - "solana-sdk-ids", - "solana-stake-program", - "solana-system-program", - "solana-vote-program", -] - -[[package]] -name = "solana-clap-utils" -version = "3.0.0" -dependencies = [ - "chrono", - "clap", - "rpassword", - "solana-clock", - "solana-cluster-type", - "solana-commitment-config", - "solana-derivation-path", - "solana-hash", - "solana-keypair", - "solana-message", - "solana-native-token", - "solana-presigner", - "solana-pubkey", - "solana-remote-wallet", - "solana-seed-phrase", - "solana-signature", - "solana-signer", - "thiserror 2.0.12", - "tiny-bip39", - "uriparse", - "url 2.5.4", -] - -[[package]] -name = "solana-cli-config" -version = "3.0.0" -dependencies = [ - "dirs-next", - "serde", - "serde_derive", - "serde_yaml", - "solana-clap-utils", - "solana-commitment-config", - "url 2.5.4", -] - -[[package]] -name = "solana-cli-output" -version = "3.0.0" -dependencies = [ - "Inflector", - "agave-reserved-account-keys", - "base64 0.22.1", - "chrono", - "clap", - "console 0.16.0", - "humantime", - "indicatif", - "pretty-hex", - "semver", - "serde", - "serde_json", - "solana-account", - "solana-account-decoder", - "solana-bincode", - "solana-clap-utils", - "solana-cli-config", - "solana-clock", - "solana-epoch-info", - "solana-hash", - "solana-message", - "solana-native-token", - "solana-packet", - "solana-pubkey", - "solana-rpc-client-api", - "solana-sdk-ids", - "solana-signature", - "solana-stake-interface", - "solana-system-interface", - "solana-sysvar", - "solana-transaction", - "solana-transaction-error", - "solana-transaction-status", - "solana-transaction-status-client-types", - "solana-vote-program", - "spl-memo-interface", -] - -[[package]] -name = "solana-client" -version = "3.0.0" -dependencies = [ - "async-trait", - "bincode", - "dashmap", - "futures 0.3.31", - "futures-util", - "indexmap 2.10.0", - "indicatif", - "log", - "quinn", - "rayon", - "solana-account", - "solana-client-traits", - "solana-commitment-config", - "solana-connection-cache", - "solana-epoch-info", - "solana-hash", - "solana-instruction", - "solana-keypair", - "solana-measure", - "solana-message", - "solana-pubkey", - "solana-pubsub-client", - "solana-quic-client", - "solana-quic-definitions", - "solana-rpc-client", - "solana-rpc-client-api", - "solana-rpc-client-nonce-utils", - "solana-signature", - "solana-signer", - "solana-streamer", - "solana-time-utils", - "solana-tpu-client", - "solana-transaction", - "solana-transaction-error", - "solana-transaction-status-client-types", - "solana-udp-client", - "thiserror 2.0.12", - "tokio", -] - -[[package]] -name = "solana-client-traits" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83f0071874e629f29e0eb3dab8a863e98502ac7aba55b7e0df1803fc5cac72a7" -dependencies = [ - "solana-account", - "solana-commitment-config", - "solana-epoch-info", - "solana-hash", - "solana-instruction", - "solana-keypair", - "solana-message", - "solana-pubkey", - "solana-signature", - "solana-signer", - "solana-system-interface", - "solana-transaction", - "solana-transaction-error", -] - -[[package]] -name = "solana-clock" -version = "2.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bb482ab70fced82ad3d7d3d87be33d466a3498eb8aa856434ff3c0dfc2e2e31" -dependencies = [ - "serde", - "serde_derive", - "solana-sdk-ids", - "solana-sdk-macro", - "solana-sysvar-id", -] - -[[package]] -name = "solana-cluster-type" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ace9fea2daa28354d107ea879cff107181d85cd4e0f78a2bedb10e1a428c97e" -dependencies = [ - "serde", - "serde_derive", - "solana-hash", -] - -[[package]] -name = "solana-commitment-config" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac49c4dde3edfa832de1697e9bcdb7c3b3f7cb7a1981b7c62526c8bb6700fb73" -dependencies = [ - "serde", - "serde_derive", -] - -[[package]] -name = "solana-compute-budget" -version = "3.0.0" -dependencies = [ - "solana-fee-structure", - "solana-program-runtime", -] - -[[package]] -name = "solana-compute-budget-instruction" -version = "3.0.0" -dependencies = [ - "agave-feature-set", - "log", - "solana-borsh", - "solana-builtins-default-costs", - "solana-compute-budget", - "solana-compute-budget-interface", - "solana-instruction", - "solana-packet", - "solana-pubkey", - "solana-sdk-ids", - "solana-svm-transaction", - "solana-transaction-error", - "thiserror 2.0.12", -] - -[[package]] -name = "solana-compute-budget-interface" -version = "2.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8432d2c4c22d0499aa06d62e4f7e333f81777b3d7c96050ae9e5cb71a8c3aee4" -dependencies = [ - "borsh 1.5.7", - "solana-instruction", - "solana-sdk-ids", -] - -[[package]] -name = "solana-compute-budget-program" -version = "3.0.0" -dependencies = [ - "solana-program-runtime", -] - -[[package]] -name = "solana-config-interface" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fbdbcfedb467322ac9686ca61da0a1fdede2fd99a01fb2ed52b49452abd22e0" -dependencies = [ - "bincode", - "serde", - "serde_derive", - "solana-account", - "solana-instruction", - "solana-pubkey", - "solana-sdk-ids", - "solana-short-vec", - "solana-system-interface", -] - -[[package]] -name = "solana-config-program-client" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef9867b9ffae6e48a97ce6349e7796fcb34084298e909a8fa1fe427f41b52fd4" -dependencies = [ - "bincode", - "borsh 0.10.4", - "kaigan", - "serde", - "solana-config-interface", - "solana-program", -] - -[[package]] -name = "solana-connection-cache" -version = "3.0.0" -dependencies = [ - "async-trait", - "bincode", - "crossbeam-channel", - "futures-util", - "indexmap 2.10.0", - "log", - "rand 0.8.5", - "rayon", - "solana-keypair", - "solana-measure", - "solana-metrics", - "solana-time-utils", - "solana-transaction-error", - "thiserror 2.0.12", - "tokio", -] - -[[package]] -name = "solana-core" -version = "3.0.0" -dependencies = [ - "agave-banking-stage-ingress-types", - "agave-feature-set", - "agave-transaction-view", - "agave-verified-packet-receiver", - "ahash 0.8.11", - "anyhow", - "arrayvec", - "assert_matches", - "async-trait", - "base64 0.22.1", - "bincode", - "bs58", - "bytes", - "chrono", - "conditional-mod", - "crossbeam-channel", - "dashmap", - "derive_more 1.0.0", - "etcd-client", - "futures 0.3.31", - "histogram", - "itertools 0.12.1", - "log", - "lru", - "min-max-heap", - "num_cpus", - "num_enum", - "prio-graph", - "qualifier_attr", - "quinn", - "rand 0.8.5", - "rand_chacha 0.3.1", - "rayon", - "rolling-file", - "rustls 0.23.31", - "serde", - "serde_bytes", - "serde_derive", - "slab", - "solana-account", - "solana-accounts-db", - "solana-address-lookup-table-interface", - "solana-bincode", - "solana-bloom", - "solana-builtins-default-costs", - "solana-client", - "solana-clock", - "solana-compute-budget", - "solana-compute-budget-instruction", - "solana-compute-budget-interface", - "solana-connection-cache", - "solana-cost-model", - "solana-entry", - "solana-epoch-schedule", - "solana-fee", - "solana-fee-calculator", - "solana-fee-structure", - "solana-genesis-config", - "solana-geyser-plugin-manager", - "solana-gossip", - "solana-hard-forks", - "solana-hash", - "solana-instruction", - "solana-keypair", - "solana-ledger", - "solana-loader-v3-interface 5.0.0", - "solana-measure", - "solana-message", - "solana-metrics", - "solana-native-token", - "solana-net-utils", - "solana-nonce", - "solana-nonce-account", - "solana-packet", - "solana-perf", - "solana-poh", - "solana-poh-config", - "solana-pubkey", - "solana-quic-client", - "solana-quic-definitions", - "solana-rayon-threadlimit", - "solana-rent", - "solana-rpc", - "solana-rpc-client-api", - "solana-runtime", - "solana-runtime-transaction", - "solana-sanitize", - "solana-sdk-ids", - "solana-send-transaction-service", - "solana-sha256-hasher", - "solana-short-vec", - "solana-shred-version", - "solana-signature", - "solana-signer", - "solana-slot-hashes", - "solana-slot-history", - "solana-streamer", - "solana-svm", - "solana-svm-transaction", - "solana-system-interface", - "solana-system-transaction", - "solana-sysvar", - "solana-time-utils", - "solana-timings", - "solana-tls-utils", - "solana-tpu-client", - "solana-tpu-client-next", - "solana-transaction", - "solana-transaction-error", - "solana-transaction-status", - "solana-turbine", - "solana-unified-scheduler-pool", - "solana-validator-exit", - "solana-version", - "solana-vote", - "solana-vote-program", - "solana-wen-restart", - "static_assertions", - "strum", - "strum_macros", - "sys-info", - "sysctl", - "tempfile", - "thiserror 2.0.12", - "tikv-jemallocator", - "tokio", - "tokio-util 0.7.16", - "trees", -] - -[[package]] -name = "solana-cost-model" -version = "3.0.0" -dependencies = [ - "agave-feature-set", - "ahash 0.8.11", - "log", - "solana-bincode", - "solana-borsh", - "solana-builtins-default-costs", - "solana-clock", - "solana-compute-budget", - "solana-compute-budget-instruction", - "solana-compute-budget-interface", - "solana-fee-structure", - "solana-metrics", - "solana-packet", - "solana-pubkey", - "solana-runtime-transaction", - "solana-sdk-ids", - "solana-svm-transaction", - "solana-system-interface", - "solana-transaction-error", - "solana-vote-program", -] - -[[package]] -name = "solana-cpi" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dc71126edddc2ba014622fc32d0f5e2e78ec6c5a1e0eb511b85618c09e9ea11" -dependencies = [ - "solana-account-info", - "solana-define-syscall", - "solana-instruction", - "solana-program-error", - "solana-pubkey", - "solana-stable-layout", -] - -[[package]] -name = "solana-curve25519" -version = "2.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "def3cfe5279edb64fc39111cff6dcf77b01fbfba2c02c13ced41e6a48baf4cbe" -dependencies = [ - "bytemuck", - "bytemuck_derive", - "curve25519-dalek 4.1.3", - "solana-define-syscall", - "subtle", - "thiserror 2.0.12", -] - -[[package]] -name = "solana-curve25519" -version = "3.0.0" -dependencies = [ - "bytemuck", - "bytemuck_derive", - "curve25519-dalek 4.1.3", - "solana-define-syscall", - "subtle", - "thiserror 2.0.12", -] - -[[package]] -name = "solana-decode-error" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10a6a6383af236708048f8bd8d03db8ca4ff7baf4a48e5d580f4cce545925470" -dependencies = [ - "num-traits", -] - -[[package]] -name = "solana-define-syscall" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ae3e2abcf541c8122eafe9a625d4d194b4023c20adde1e251f94e056bb1aee2" - -[[package]] -name = "solana-derivation-path" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "939756d798b25c5ec3cca10e06212bdca3b1443cb9bb740a38124f58b258737b" -dependencies = [ - "derivation-path", - "qstring", - "uriparse", -] - -[[package]] -name = "solana-ed25519-program" -version = "2.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1feafa1691ea3ae588f99056f4bdd1293212c7ece28243d7da257c443e84753" -dependencies = [ - "bytemuck", - "bytemuck_derive", - "ed25519-dalek", - "solana-feature-set", - "solana-instruction", - "solana-precompile-error", - "solana-sdk-ids", -] - -[[package]] -name = "solana-entry" -version = "3.0.0" -dependencies = [ - "bincode", - "crossbeam-channel", - "dlopen2", - "log", - "num_cpus", - "rand 0.8.5", - "rayon", - "serde", - "solana-hash", - "solana-measure", - "solana-merkle-tree", - "solana-metrics", - "solana-packet", - "solana-perf", - "solana-runtime-transaction", - "solana-sha256-hasher", - "solana-transaction", - "solana-transaction-error", -] - -[[package]] -name = "solana-epoch-info" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90ef6f0b449290b0b9f32973eefd95af35b01c5c0c34c569f936c34c5b20d77b" -dependencies = [ - "serde", - "serde_derive", -] - -[[package]] -name = "solana-epoch-rewards" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b575d3dd323b9ea10bb6fe89bf6bf93e249b215ba8ed7f68f1a3633f384db7" -dependencies = [ - "serde", - "serde_derive", - "solana-hash", - "solana-sdk-ids", - "solana-sdk-macro", - "solana-sysvar-id", -] - -[[package]] -name = "solana-epoch-rewards-hasher" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96c5fd2662ae7574810904585fd443545ed2b568dbd304b25a31e79ccc76e81b" -dependencies = [ - "siphasher 0.3.11", - "solana-hash", - "solana-pubkey", -] - -[[package]] -name = "solana-epoch-schedule" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fce071fbddecc55d727b1d7ed16a629afe4f6e4c217bc8d00af3b785f6f67ed" -dependencies = [ - "serde", - "serde_derive", - "solana-sdk-ids", - "solana-sdk-macro", - "solana-sysvar-id", -] - -[[package]] -name = "solana-example-mocks" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84461d56cbb8bb8d539347151e0525b53910102e4bced875d49d5139708e39d3" -dependencies = [ - "serde", - "serde_derive", - "solana-address-lookup-table-interface", - "solana-clock", - "solana-hash", - "solana-instruction", - "solana-keccak-hasher", - "solana-message", - "solana-nonce", - "solana-pubkey", - "solana-sdk-ids", - "solana-system-interface", - "thiserror 2.0.12", -] - -[[package]] -name = "solana-faucet" -version = "3.0.0" -dependencies = [ - "bincode", - "clap", - "crossbeam-channel", - "log", - "serde", - "serde_derive", - "solana-clap-utils", - "solana-cli-config", - "solana-hash", - "solana-instruction", - "solana-keypair", - "solana-logger", - "solana-message", - "solana-metrics", - "solana-native-token", - "solana-packet", - "solana-pubkey", - "solana-signer", - "solana-system-interface", - "solana-system-transaction", - "solana-transaction", - "solana-version", - "spl-memo-interface", - "thiserror 2.0.12", - "tokio", -] - -[[package]] -name = "solana-feature-gate-interface" -version = "2.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43f5c5382b449e8e4e3016fb05e418c53d57782d8b5c30aa372fc265654b956d" -dependencies = [ - "bincode", - "serde", - "serde_derive", - "solana-account", - "solana-account-info", - "solana-instruction", - "solana-program-error", - "solana-pubkey", - "solana-rent", - "solana-sdk-ids", - "solana-system-interface", -] - -[[package]] -name = "solana-feature-set" -version = "2.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92f6c09cc41059c0e03ccbee7f5d4cc0a315d68ef0d59b67eb90246adfd8cc35" -dependencies = [ - "ahash 0.8.11", - "lazy_static", - "solana-epoch-schedule", - "solana-hash", - "solana-pubkey", - "solana-sha256-hasher", -] - -[[package]] -name = "solana-fee" -version = "3.0.0" -dependencies = [ - "agave-feature-set", - "solana-fee-structure", - "solana-svm-transaction", -] - -[[package]] -name = "solana-fee-calculator" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d89bc408da0fb3812bc3008189d148b4d3e08252c79ad810b245482a3f70cd8d" -dependencies = [ - "log", - "serde", - "serde_derive", -] - -[[package]] -name = "solana-fee-structure" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33adf673581c38e810bf618f745bf31b683a0a4a4377682e6aaac5d9a058dd4e" -dependencies = [ - "serde", - "serde_derive", - "solana-message", - "solana-native-token", -] - -[[package]] -name = "solana-genesis-config" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3725085d47b96d37fef07a29d78d2787fc89a0b9004c66eed7753d1e554989f" -dependencies = [ - "bincode", - "chrono", - "memmap2 0.5.10", - "serde", - "serde_derive", - "solana-account", - "solana-clock", - "solana-cluster-type", - "solana-epoch-schedule", - "solana-fee-calculator", - "solana-hash", - "solana-inflation", - "solana-keypair", - "solana-logger", - "solana-poh-config", - "solana-pubkey", - "solana-rent", - "solana-sdk-ids", - "solana-sha256-hasher", - "solana-shred-version", - "solana-signer", - "solana-time-utils", -] - -[[package]] -name = "solana-geyser-plugin-manager" -version = "3.0.0" -dependencies = [ - "agave-geyser-plugin-interface", - "bs58", - "crossbeam-channel", - "json5", - "jsonrpc-core", - "libloading", - "log", - "serde_json", - "solana-account", - "solana-accounts-db", - "solana-clock", - "solana-entry", - "solana-hash", - "solana-ledger", - "solana-measure", - "solana-metrics", - "solana-pubkey", - "solana-rpc", - "solana-runtime", - "solana-signature", - "solana-transaction", - "solana-transaction-status", - "thiserror 2.0.12", - "tokio", -] - -[[package]] -name = "solana-gossip" -version = "3.0.0" -dependencies = [ - "agave-feature-set", - "arrayvec", - "assert_matches", - "bincode", - "bv", - "clap", - "crossbeam-channel", - "flate2", - "indexmap 2.10.0", - "itertools 0.12.1", - "log", - "lru", - "num-traits", - "rand 0.8.5", - "rand_chacha 0.3.1", - "rayon", - "serde", - "serde-big-array", - "serde_bytes", - "serde_derive", - "siphasher 1.0.1", - "solana-bloom", - "solana-clap-utils", - "solana-client", - "solana-clock", - "solana-connection-cache", - "solana-entry", - "solana-epoch-schedule", - "solana-hash", - "solana-keypair", - "solana-ledger", - "solana-logger", - "solana-measure", - "solana-metrics", - "solana-native-token", - "solana-net-utils", - "solana-packet", - "solana-perf", - "solana-pubkey", - "solana-quic-definitions", - "solana-rayon-threadlimit", - "solana-rpc-client", - "solana-runtime", - "solana-sanitize", - "solana-serde-varint", - "solana-sha256-hasher", - "solana-short-vec", - "solana-signature", - "solana-signer", - "solana-streamer", - "solana-time-utils", - "solana-tpu-client", - "solana-transaction", - "solana-version", - "solana-vote", - "solana-vote-program", - "static_assertions", - "thiserror 2.0.12", -] - -[[package]] -name = "solana-hard-forks" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c28371f878e2ead55611d8ba1b5fb879847156d04edea13693700ad1a28baf" -dependencies = [ - "serde", - "serde_derive", -] - -[[package]] -name = "solana-hash" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5b96e9f0300fa287b545613f007dfe20043d7812bee255f418c1eb649c93b63" -dependencies = [ - "borsh 1.5.7", - "bytemuck", - "bytemuck_derive", - "five8", - "js-sys", - "serde", - "serde_derive", - "solana-atomic-u64", - "solana-sanitize", - "wasm-bindgen", -] - -[[package]] -name = "solana-inflation" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23eef6a09eb8e568ce6839573e4966850e85e9ce71e6ae1a6c930c1c43947de3" -dependencies = [ - "serde", - "serde_derive", -] - -[[package]] -name = "solana-instruction" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47298e2ce82876b64f71e9d13a46bc4b9056194e7f9937ad3084385befa50885" -dependencies = [ - "bincode", - "borsh 1.5.7", - "getrandom 0.2.15", - "js-sys", - "num-traits", - "serde", - "serde_derive", - "solana-define-syscall", - "solana-pubkey", - "wasm-bindgen", -] - -[[package]] -name = "solana-instructions-sysvar" -version = "2.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0e85a6fad5c2d0c4f5b91d34b8ca47118fc593af706e523cdbedf846a954f57" -dependencies = [ - "bitflags 2.9.1", - "solana-account-info", - "solana-instruction", - "solana-program-error", - "solana-pubkey", - "solana-sanitize", - "solana-sdk-ids", - "solana-serialize-utils", - "solana-sysvar-id", -] - -[[package]] -name = "solana-keccak-hasher" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7aeb957fbd42a451b99235df4942d96db7ef678e8d5061ef34c9b34cae12f79" -dependencies = [ - "sha3", - "solana-define-syscall", - "solana-hash", - "solana-sanitize", -] - -[[package]] -name = "solana-keypair" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dbb7042c2e0c561afa07242b2099d55c57bd1b1da3b6476932197d84e15e3e4" -dependencies = [ - "bs58", - "ed25519-dalek", - "ed25519-dalek-bip32", - "rand 0.7.3", - "solana-derivation-path", - "solana-pubkey", - "solana-seed-derivable", - "solana-seed-phrase", - "solana-signature", - "solana-signer", - "wasm-bindgen", -] - -[[package]] -name = "solana-last-restart-slot" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a6360ac2fdc72e7463565cd256eedcf10d7ef0c28a1249d261ec168c1b55cdd" -dependencies = [ - "serde", - "serde_derive", - "solana-sdk-ids", - "solana-sdk-macro", - "solana-sysvar-id", -] - -[[package]] -name = "solana-lattice-hash" -version = "3.0.0" -dependencies = [ - "base64 0.22.1", - "blake3", - "bs58", - "bytemuck", -] - -[[package]] -name = "solana-ledger" -version = "3.0.0" -dependencies = [ - "agave-feature-set", - "agave-reserved-account-keys", - "anyhow", - "assert_matches", - "bincode", - "bitflags 2.9.1", - "bzip2", - "chrono", - "chrono-humanize", - "crossbeam-channel", - "dashmap", - "eager", - "fs_extra", - "futures 0.3.31", - "itertools 0.12.1", - "lazy-lru", - "libc", - "log", - "lru", - "mockall", - "num_cpus", - "num_enum", - "prost", - "qualifier_attr", - "rand 0.8.5", - "rand_chacha 0.3.1", - "rayon", - "reed-solomon-erasure", - "rocksdb", - "scopeguard", - "serde", - "serde_bytes", - "sha2 0.10.9", - "solana-account", - "solana-account-decoder", - "solana-accounts-db", - "solana-address-lookup-table-interface", - "solana-bpf-loader-program", - "solana-clock", - "solana-cost-model", - "solana-entry", - "solana-epoch-schedule", - "solana-genesis-config", - "solana-hash", - "solana-instruction", - "solana-keypair", - "solana-measure", - "solana-message", - "solana-metrics", - "solana-native-token", - "solana-net-utils", - "solana-packet", - "solana-perf", - "solana-program-runtime", - "solana-pubkey", - "solana-rayon-threadlimit", - "solana-runtime", - "solana-runtime-transaction", - "solana-seed-derivable", - "solana-sha256-hasher", - "solana-shred-version", - "solana-signature", - "solana-signer", - "solana-stake-interface", - "solana-stake-program", - "solana-storage-bigtable", - "solana-storage-proto", - "solana-streamer", - "solana-svm", - "solana-svm-transaction", - "solana-system-interface", - "solana-system-transaction", - "solana-time-utils", - "solana-timings", - "solana-transaction", - "solana-transaction-context", - "solana-transaction-error", - "solana-transaction-status", - "solana-vote", - "solana-vote-program", - "static_assertions", - "strum", - "strum_macros", - "tar", - "tempfile", - "thiserror 2.0.12", - "tokio", - "tokio-stream", - "trees", -] - -[[package]] -name = "solana-loader-v2-interface" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8ab08006dad78ae7cd30df8eea0539e207d08d91eaefb3e1d49a446e1c49654" -dependencies = [ - "serde", - "serde_bytes", - "serde_derive", - "solana-instruction", - "solana-pubkey", - "solana-sdk-ids", -] - -[[package]] -name = "solana-loader-v3-interface" -version = "3.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa4be76cfa9afd84ca2f35ebc09f0da0f0092935ccdac0595d98447f259538c2" -dependencies = [ - "serde", - "serde_bytes", - "serde_derive", - "solana-instruction", - "solana-pubkey", - "solana-sdk-ids", - "solana-system-interface", -] - -[[package]] -name = "solana-loader-v3-interface" -version = "5.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f7162a05b8b0773156b443bccd674ea78bb9aa406325b467ea78c06c99a63a2" -dependencies = [ - "serde", - "serde_bytes", - "serde_derive", - "solana-instruction", - "solana-pubkey", - "solana-sdk-ids", - "solana-system-interface", -] - -[[package]] -name = "solana-loader-v4-interface" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "706a777242f1f39a83e2a96a2a6cb034cb41169c6ecbee2cf09cb873d9659e7e" -dependencies = [ - "serde", - "serde_bytes", - "serde_derive", - "solana-instruction", - "solana-pubkey", - "solana-sdk-ids", - "solana-system-interface", -] - -[[package]] -name = "solana-loader-v4-program" -version = "3.0.0" -dependencies = [ - "log", - "qualifier_attr", - "solana-account", - "solana-bincode", - "solana-bpf-loader-program", - "solana-instruction", - "solana-loader-v3-interface 5.0.0", - "solana-loader-v4-interface", - "solana-log-collector", - "solana-measure", - "solana-packet", - "solana-program-runtime", - "solana-pubkey", - "solana-sbpf", - "solana-sdk-ids", - "solana-transaction-context", - "solana-type-overrides", -] - -[[package]] -name = "solana-log-collector" -version = "3.0.0" -dependencies = [ - "log", -] - -[[package]] -name = "solana-logger" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db8e777ec1afd733939b532a42492d888ec7c88d8b4127a5d867eb45c6eb5cd5" -dependencies = [ - "env_logger", - "lazy_static", - "libc", - "log", - "signal-hook", -] - -[[package]] -name = "solana-measure" -version = "3.0.0" - -[[package]] -name = "solana-merkle-tree" -version = "3.0.0" -dependencies = [ - "fast-math", - "solana-hash", - "solana-sha256-hasher", -] - -[[package]] -name = "solana-message" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1796aabce376ff74bf89b78d268fa5e683d7d7a96a0a4e4813ec34de49d5314b" -dependencies = [ - "bincode", - "blake3", - "lazy_static", - "serde", - "serde_derive", - "solana-bincode", - "solana-hash", - "solana-instruction", - "solana-pubkey", - "solana-sanitize", - "solana-sdk-ids", - "solana-short-vec", - "solana-system-interface", - "solana-transaction-error", - "wasm-bindgen", -] - -[[package]] -name = "solana-metrics" -version = "3.0.0" -dependencies = [ - "crossbeam-channel", - "gethostname", - "log", - "reqwest 0.12.22", - "solana-cluster-type", - "solana-sha256-hasher", - "solana-time-utils", - "thiserror 2.0.12", -] - -[[package]] -name = "solana-msg" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f36a1a14399afaabc2781a1db09cb14ee4cc4ee5c7a5a3cfcc601811379a8092" -dependencies = [ - "solana-define-syscall", -] - -[[package]] -name = "solana-native-token" -version = "2.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "307fb2f78060995979e9b4f68f833623565ed4e55d3725f100454ce78a99a1a3" - -[[package]] -name = "solana-net-utils" -version = "3.0.0" -dependencies = [ - "anyhow", - "bincode", - "bytes", - "itertools 0.12.1", - "log", - "nix", - "rand 0.8.5", - "serde", - "serde_derive", - "socket2 0.6.0", - "solana-serde", - "tokio", - "url 2.5.4", -] - -[[package]] -name = "solana-nohash-hasher" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b8a731ed60e89177c8a7ab05fe0f1511cedd3e70e773f288f9de33a9cfdc21e" - -[[package]] -name = "solana-nonce" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "703e22eb185537e06204a5bd9d509b948f0066f2d1d814a6f475dafb3ddf1325" -dependencies = [ - "serde", - "serde_derive", - "solana-fee-calculator", - "solana-hash", - "solana-pubkey", - "solana-sha256-hasher", -] - -[[package]] -name = "solana-nonce-account" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cde971a20b8dbf60144d6a84439dda86b5466e00e2843091fe731083cda614da" -dependencies = [ - "solana-account", - "solana-hash", - "solana-nonce", - "solana-sdk-ids", -] - -[[package]] -name = "solana-offchain-message" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b526398ade5dea37f1f147ce55dae49aa017a5d7326606359b0445ca8d946581" -dependencies = [ - "num_enum", - "solana-hash", - "solana-packet", - "solana-sanitize", - "solana-sha256-hasher", - "solana-signature", - "solana-signer", -] - -[[package]] -name = "solana-packet" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "004f2d2daf407b3ec1a1ca5ec34b3ccdfd6866dd2d3c7d0715004a96e4b6d127" -dependencies = [ - "bincode", - "bitflags 2.9.1", - "cfg_eval", - "serde", - "serde_derive", - "serde_with", -] - -[[package]] -name = "solana-perf" -version = "3.0.0" -dependencies = [ - "ahash 0.8.11", - "bincode", - "bv", - "bytes", - "caps", - "curve25519-dalek 4.1.3", - "dlopen2", - "fnv", - "libc", - "log", - "nix", - "rand 0.8.5", - "rayon", - "serde", - "solana-hash", - "solana-message", - "solana-metrics", - "solana-packet", - "solana-pubkey", - "solana-rayon-threadlimit", - "solana-sdk-ids", - "solana-short-vec", - "solana-signature", - "solana-time-utils", -] - -[[package]] -name = "solana-poh" -version = "3.0.0" -dependencies = [ - "arc-swap", - "core_affinity", - "crossbeam-channel", - "log", - "qualifier_attr", - "solana-clock", - "solana-entry", - "solana-hash", - "solana-ledger", - "solana-measure", - "solana-metrics", - "solana-poh-config", - "solana-pubkey", - "solana-runtime", - "solana-time-utils", - "solana-transaction", - "thiserror 2.0.12", -] - -[[package]] -name = "solana-poh-config" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d650c3b4b9060082ac6b0efbbb66865089c58405bfb45de449f3f2b91eccee75" -dependencies = [ - "serde", - "serde_derive", -] - -[[package]] -name = "solana-poseidon" -version = "3.0.0" -dependencies = [ - "ark-bn254", - "light-poseidon", - "solana-define-syscall", - "thiserror 2.0.12", -] - -[[package]] -name = "solana-precompile-error" -version = "2.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d87b2c1f5de77dfe2b175ee8dd318d196aaca4d0f66f02842f80c852811f9f8" -dependencies = [ - "num-traits", - "solana-decode-error", -] - -[[package]] -name = "solana-presigner" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81a57a24e6a4125fc69510b6774cd93402b943191b6cddad05de7281491c90fe" -dependencies = [ - "solana-pubkey", - "solana-signature", - "solana-signer", -] - -[[package]] -name = "solana-program" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "586469467e93ceb79048f8d8e3a619bf61d05396ee7de95cb40280301a589d05" -dependencies = [ - "bincode", - "blake3", - "borsh 0.10.4", - "borsh 1.5.7", - "bs58", - "bytemuck", - "console_error_panic_hook", - "console_log", - "getrandom 0.2.15", - "lazy_static", - "log", - "memoffset", - "num-bigint 0.4.6", - "num-derive", - "num-traits", - "rand 0.8.5", - "serde", - "serde_bytes", - "serde_derive", - "solana-account-info", - "solana-address-lookup-table-interface", - "solana-atomic-u64", - "solana-big-mod-exp", - "solana-bincode", - "solana-blake3-hasher", - "solana-borsh", - "solana-clock", - "solana-cpi", - "solana-decode-error", - "solana-define-syscall", - "solana-epoch-rewards", - "solana-epoch-schedule", - "solana-example-mocks", - "solana-feature-gate-interface", - "solana-fee-calculator", - "solana-hash", - "solana-instruction", - "solana-instructions-sysvar", - "solana-keccak-hasher", - "solana-last-restart-slot", - "solana-loader-v2-interface", - "solana-loader-v3-interface 3.0.0", - "solana-loader-v4-interface", - "solana-message", - "solana-msg", - "solana-native-token", - "solana-nonce", - "solana-program-entrypoint", - "solana-program-error", - "solana-program-memory", - "solana-program-option", - "solana-program-pack", - "solana-pubkey", - "solana-rent", - "solana-sanitize", - "solana-sdk-ids", - "solana-sdk-macro", - "solana-secp256k1-recover", - "solana-serde-varint", - "solana-serialize-utils", - "solana-sha256-hasher", - "solana-short-vec", - "solana-slot-hashes", - "solana-slot-history", - "solana-stable-layout", - "solana-stake-interface", - "solana-system-interface", - "solana-sysvar", - "solana-sysvar-id", - "solana-vote-interface", - "thiserror 2.0.12", - "wasm-bindgen", -] - -[[package]] -name = "solana-program-entrypoint" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32ce041b1a0ed275290a5008ee1a4a6c48f5054c8a3d78d313c08958a06aedbd" -dependencies = [ - "solana-account-info", - "solana-msg", - "solana-program-error", - "solana-pubkey", -] - -[[package]] -name = "solana-program-error" -version = "2.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ee2e0217d642e2ea4bee237f37bd61bb02aec60da3647c48ff88f6556ade775" -dependencies = [ - "borsh 1.5.7", - "num-traits", - "serde", - "serde_derive", - "solana-decode-error", - "solana-instruction", - "solana-msg", - "solana-pubkey", -] - -[[package]] -name = "solana-program-memory" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b0268f6c89825fb634a34bd0c3b8fdaeaecfc3728be1d622a8ee6dd577b60d4" -dependencies = [ - "num-traits", - "solana-define-syscall", -] - -[[package]] -name = "solana-program-option" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc677a2e9bc616eda6dbdab834d463372b92848b2bfe4a1ed4e4b4adba3397d0" - -[[package]] -name = "solana-program-pack" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "319f0ef15e6e12dc37c597faccb7d62525a509fec5f6975ecb9419efddeb277b" -dependencies = [ - "solana-program-error", -] - -[[package]] -name = "solana-program-runtime" -version = "3.0.0" -dependencies = [ - "base64 0.22.1", - "bincode", - "enum-iterator", - "itertools 0.12.1", - "log", - "percentage", - "rand 0.8.5", - "serde", - "solana-account", - "solana-clock", - "solana-epoch-rewards", - "solana-epoch-schedule", - "solana-fee-structure", - "solana-hash", - "solana-instruction", - "solana-last-restart-slot", - "solana-log-collector", - "solana-measure", - "solana-metrics", - "solana-program-entrypoint", - "solana-pubkey", - "solana-rent", - "solana-sbpf", - "solana-sdk-ids", - "solana-slot-hashes", - "solana-stable-layout", - "solana-svm-callback", - "solana-svm-feature-set", - "solana-svm-transaction", - "solana-system-interface", - "solana-sysvar", - "solana-sysvar-id", - "solana-timings", - "solana-transaction-context", - "solana-type-overrides", - "thiserror 2.0.12", -] - -[[package]] -name = "solana-program-test" -version = "3.0.0" -dependencies = [ - "agave-feature-set", - "assert_matches", - "async-trait", - "base64 0.22.1", - "bincode", - "chrono-humanize", - "crossbeam-channel", - "log", - "serde", - "solana-account", - "solana-account-info", - "solana-accounts-db", - "solana-banks-client", - "solana-banks-interface", - "solana-banks-server", - "solana-clock", - "solana-commitment-config", - "solana-compute-budget", - "solana-epoch-rewards", - "solana-epoch-schedule", - "solana-fee-calculator", - "solana-genesis-config", - "solana-hash", - "solana-instruction", - "solana-keypair", - "solana-loader-v3-interface 5.0.0", - "solana-log-collector", - "solana-logger", - "solana-message", - "solana-msg", - "solana-native-token", - "solana-poh-config", - "solana-program-entrypoint", - "solana-program-error", - "solana-program-runtime", - "solana-pubkey", - "solana-rent", - "solana-runtime", - "solana-sbpf", - "solana-sdk-ids", - "solana-signer", - "solana-stable-layout", - "solana-stake-interface", - "solana-svm", - "solana-system-interface", - "solana-sysvar", - "solana-sysvar-id", - "solana-timings", - "solana-transaction", - "solana-transaction-context", - "solana-transaction-error", - "solana-vote-program", - "spl-generic-token", - "thiserror 2.0.12", - "tokio", -] - -[[package]] -name = "solana-pubkey" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b62adb9c3261a052ca1f999398c388f1daf558a1b492f60a6d9e64857db4ff1" -dependencies = [ - "borsh 0.10.4", - "borsh 1.5.7", - "bytemuck", - "bytemuck_derive", - "curve25519-dalek 4.1.3", - "five8", - "five8_const", - "getrandom 0.2.15", - "js-sys", - "num-traits", - "rand 0.8.5", - "serde", - "serde_derive", - "solana-atomic-u64", - "solana-decode-error", - "solana-define-syscall", - "solana-sanitize", - "solana-sha256-hasher", - "wasm-bindgen", -] - -[[package]] -name = "solana-pubsub-client" -version = "3.0.0" -dependencies = [ - "crossbeam-channel", - "futures-util", - "http 0.2.12", - "log", - "semver", - "serde", - "serde_derive", - "serde_json", - "solana-account-decoder-client-types", - "solana-clock", - "solana-pubkey", - "solana-rpc-client-types", - "solana-signature", - "thiserror 2.0.12", - "tokio", - "tokio-stream", - "tokio-tungstenite", - "tungstenite", - "url 2.5.4", -] - -[[package]] -name = "solana-quic-client" -version = "3.0.0" -dependencies = [ - "async-lock", - "async-trait", - "futures 0.3.31", - "itertools 0.12.1", - "log", - "quinn", - "quinn-proto", - "rustls 0.23.31", - "solana-connection-cache", - "solana-keypair", - "solana-measure", - "solana-metrics", - "solana-net-utils", - "solana-pubkey", - "solana-quic-definitions", - "solana-rpc-client-api", - "solana-signer", - "solana-streamer", - "solana-tls-utils", - "solana-transaction-error", - "thiserror 2.0.12", - "tokio", -] - -[[package]] -name = "solana-quic-definitions" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7011ee2af2baad991762b6d63ea94b08d06f7928effb76ce273b232c9902c205" -dependencies = [ - "solana-keypair", -] - -[[package]] -name = "solana-rayon-threadlimit" -version = "3.0.0" -dependencies = [ - "log", - "num_cpus", -] - -[[package]] -name = "solana-remote-wallet" -version = "3.0.0" -dependencies = [ - "console 0.16.0", - "dialoguer", - "hidapi", - "log", - "num-derive", - "num-traits", - "parking_lot 0.12.3", - "qstring", - "semver", - "solana-derivation-path", - "solana-offchain-message", - "solana-pubkey", - "solana-signature", - "solana-signer", - "thiserror 2.0.12", - "uriparse", -] - -[[package]] -name = "solana-rent" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1aea8fdea9de98ca6e8c2da5827707fb3842833521b528a713810ca685d2480" -dependencies = [ - "serde", - "serde_derive", - "solana-sdk-ids", - "solana-sdk-macro", - "solana-sysvar-id", -] - -[[package]] -name = "solana-rent-collector" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c1e19f5d5108b0d824244425e43bc78bbb9476e2199e979b0230c9f632d3bf4" -dependencies = [ - "serde", - "serde_derive", - "solana-account", - "solana-clock", - "solana-epoch-schedule", - "solana-genesis-config", - "solana-pubkey", - "solana-rent", - "solana-sdk-ids", -] - -[[package]] -name = "solana-reward-info" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18205b69139b1ae0ab8f6e11cdcb627328c0814422ad2482000fa2ca54ae4a2f" -dependencies = [ - "serde", - "serde_derive", -] - -[[package]] -name = "solana-rpc" -version = "3.0.0" -dependencies = [ - "agave-feature-set", - "base64 0.22.1", - "bincode", - "bs58", - "crossbeam-channel", - "dashmap", - "itertools 0.12.1", - "jsonrpc-core", - "jsonrpc-core-client", - "jsonrpc-derive", - "jsonrpc-http-server", - "jsonrpc-pubsub", - "libc", - "log", - "rayon", - "regex", - "serde", - "serde_derive", - "serde_json", - "soketto", - "solana-account", - "solana-account-decoder", - "solana-accounts-db", - "solana-client", - "solana-clock", - "solana-commitment-config", - "solana-entry", - "solana-epoch-info", - "solana-epoch-rewards-hasher", - "solana-epoch-schedule", - "solana-faucet", - "solana-genesis-config", - "solana-gossip", - "solana-hash", - "solana-keypair", - "solana-ledger", - "solana-measure", - "solana-message", - "solana-metrics", - "solana-native-token", - "solana-perf", - "solana-poh", - "solana-poh-config", - "solana-program-pack", - "solana-pubkey", - "solana-quic-definitions", - "solana-rayon-threadlimit", - "solana-rpc-client-api", - "solana-runtime", - "solana-runtime-transaction", - "solana-send-transaction-service", - "solana-signature", - "solana-signer", - "solana-slot-history", - "solana-stake-program", - "solana-storage-bigtable", - "solana-streamer", - "solana-svm", - "solana-system-interface", - "solana-system-transaction", - "solana-sysvar", - "solana-time-utils", - "solana-tpu-client", - "solana-transaction", - "solana-transaction-context", - "solana-transaction-error", - "solana-transaction-status", - "solana-validator-exit", - "solana-version", - "solana-vote", - "solana-vote-program", - "spl-generic-token", - "spl-token-2022-interface", - "spl-token-interface", - "stream-cancel", - "thiserror 2.0.12", - "tokio", - "tokio-util 0.7.16", -] - -[[package]] -name = "solana-rpc-client" -version = "3.0.0" -dependencies = [ - "async-trait", - "base64 0.22.1", - "bincode", - "bs58", - "futures 0.3.31", - "indicatif", - "log", - "reqwest 0.12.22", - "reqwest-middleware", - "semver", - "serde", - "serde_derive", - "serde_json", - "solana-account", - "solana-account-decoder-client-types", - "solana-clock", - "solana-commitment-config", - "solana-epoch-info", - "solana-epoch-schedule", - "solana-feature-gate-interface", - "solana-hash", - "solana-instruction", - "solana-message", - "solana-pubkey", - "solana-rpc-client-api", - "solana-signature", - "solana-transaction", - "solana-transaction-error", - "solana-transaction-status-client-types", - "solana-version", - "solana-vote-interface", - "tokio", -] - -[[package]] -name = "solana-rpc-client-api" -version = "3.0.0" -dependencies = [ - "anyhow", - "jsonrpc-core", - "reqwest 0.12.22", - "reqwest-middleware", - "serde", - "serde_derive", - "serde_json", - "solana-account-decoder-client-types", - "solana-clock", - "solana-rpc-client-types", - "solana-signer", - "solana-transaction-error", - "solana-transaction-status-client-types", - "thiserror 2.0.12", -] - -[[package]] -name = "solana-rpc-client-nonce-utils" -version = "3.0.0" -dependencies = [ - "solana-account", - "solana-commitment-config", - "solana-hash", - "solana-message", - "solana-nonce", - "solana-pubkey", - "solana-rpc-client", - "solana-sdk-ids", - "thiserror 2.0.12", -] - -[[package]] -name = "solana-rpc-client-types" -version = "3.0.0" -dependencies = [ - "base64 0.22.1", - "bs58", - "semver", - "serde", - "serde_derive", - "serde_json", - "solana-account", - "solana-account-decoder-client-types", - "solana-clock", - "solana-commitment-config", - "solana-fee-calculator", - "solana-inflation", - "solana-pubkey", - "solana-transaction-error", - "solana-transaction-status-client-types", - "solana-version", - "spl-generic-token", - "thiserror 2.0.12", -] - -[[package]] -name = "solana-runtime" -version = "3.0.0" -dependencies = [ - "agave-feature-set", - "agave-precompiles", - "agave-reserved-account-keys", - "agave-syscalls", - "ahash 0.8.11", - "aquamarine", - "arc-swap", - "arrayref", - "assert_matches", - "base64 0.22.1", - "bincode", - "blake3", - "bv", - "bytemuck", - "crossbeam-channel", - "dashmap", - "dir-diff", - "fnv", - "im", - "itertools 0.12.1", - "libc", - "log", - "lz4", - "memmap2 0.9.7", - "mockall", - "modular-bitfield", - "num-derive", - "num-traits", - "num_cpus", - "num_enum", - "percentage", - "qualifier_attr", - "rand 0.8.5", - "rayon", - "regex", - "serde", - "serde_derive", - "serde_json", - "serde_with", - "solana-account", - "solana-account-info", - "solana-accounts-db", - "solana-address-lookup-table-interface", - "solana-bpf-loader-program", - "solana-bucket-map", - "solana-builtins", - "solana-client-traits", - "solana-clock", - "solana-commitment-config", - "solana-compute-budget", - "solana-compute-budget-instruction", - "solana-compute-budget-interface", - "solana-cost-model", - "solana-cpi", - "solana-ed25519-program", - "solana-epoch-info", - "solana-epoch-rewards-hasher", - "solana-epoch-schedule", - "solana-feature-gate-interface", - "solana-fee", - "solana-fee-calculator", - "solana-fee-structure", - "solana-genesis-config", - "solana-hard-forks", - "solana-hash", - "solana-inflation", - "solana-instruction", - "solana-keypair", - "solana-lattice-hash", - "solana-loader-v3-interface 5.0.0", - "solana-loader-v4-interface", - "solana-measure", - "solana-message", - "solana-metrics", - "solana-native-token", - "solana-nohash-hasher", - "solana-nonce", - "solana-nonce-account", - "solana-packet", - "solana-perf", - "solana-poh-config", - "solana-precompile-error", - "solana-program-runtime", - "solana-pubkey", - "solana-rayon-threadlimit", - "solana-rent", - "solana-rent-collector", - "solana-reward-info", - "solana-runtime-transaction", - "solana-sdk-ids", - "solana-secp256k1-program", - "solana-seed-derivable", - "solana-serde", - "solana-sha256-hasher", - "solana-signature", - "solana-signer", - "solana-slot-hashes", - "solana-slot-history", - "solana-stake-interface", - "solana-stake-program", - "solana-svm", - "solana-svm-callback", - "solana-svm-transaction", - "solana-system-interface", - "solana-system-transaction", - "solana-sysvar", - "solana-sysvar-id", - "solana-time-utils", - "solana-timings", - "solana-transaction", - "solana-transaction-context", - "solana-transaction-error", - "solana-transaction-status-client-types", - "solana-unified-scheduler-logic", - "solana-version", - "solana-vote", - "solana-vote-interface", - "solana-vote-program", - "spl-generic-token", - "static_assertions", - "strum", - "strum_macros", - "symlink", - "tar", - "tempfile", - "thiserror 2.0.12", - "zstd", -] - -[[package]] -name = "solana-runtime-transaction" -version = "3.0.0" -dependencies = [ - "agave-transaction-view", - "log", - "solana-compute-budget", - "solana-compute-budget-instruction", - "solana-hash", - "solana-message", - "solana-pubkey", - "solana-sdk-ids", - "solana-signature", - "solana-svm-transaction", - "solana-transaction", - "solana-transaction-error", - "thiserror 2.0.12", -] - -[[package]] -name = "solana-sanitize" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61f1bc1357b8188d9c4a3af3fc55276e56987265eb7ad073ae6f8180ee54cecf" - -[[package]] -name = "solana-sbpf" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c7a3d3cff34df928b804917bf111d3ede779af406703580cd7ed8fb239f5acf" -dependencies = [ - "byteorder", - "combine 3.8.1", - "hash32", - "libc", - "log", - "rand 0.8.5", - "rustc-demangle", - "thiserror 2.0.12", - "winapi 0.3.9", -] - -[[package]] -name = "solana-sdk-ids" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5d8b9cc68d5c88b062a33e23a6466722467dde0035152d8fb1afbcdf350a5f" -dependencies = [ - "solana-pubkey", -] - -[[package]] -name = "solana-sdk-macro" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86280da8b99d03560f6ab5aca9de2e38805681df34e0bb8f238e69b29433b9df" -dependencies = [ - "bs58", - "proc-macro2", - "quote", - "syn 2.0.96", -] - -[[package]] -name = "solana-secp256k1-program" -version = "2.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f19833e4bc21558fe9ec61f239553abe7d05224347b57d65c2218aeeb82d6149" -dependencies = [ - "digest 0.10.7", - "libsecp256k1", - "serde", - "serde_derive", - "sha3", - "solana-signature", -] - -[[package]] -name = "solana-secp256k1-recover" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baa3120b6cdaa270f39444f5093a90a7b03d296d362878f7a6991d6de3bbe496" -dependencies = [ - "libsecp256k1", - "solana-define-syscall", - "thiserror 2.0.12", -] - -[[package]] -name = "solana-secp256r1-program" -version = "2.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce0ae46da3071a900f02d367d99b2f3058fe2e90c5062ac50c4f20cfedad8f0f" -dependencies = [ - "bytemuck", - "openssl", - "solana-feature-set", - "solana-instruction", - "solana-precompile-error", - "solana-sdk-ids", -] - -[[package]] -name = "solana-seed-derivable" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3beb82b5adb266c6ea90e5cf3967235644848eac476c5a1f2f9283a143b7c97f" -dependencies = [ - "solana-derivation-path", -] - -[[package]] -name = "solana-seed-phrase" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36187af2324f079f65a675ec22b31c24919cb4ac22c79472e85d819db9bbbc15" -dependencies = [ - "hmac 0.12.1", - "pbkdf2 0.11.0", - "sha2 0.10.9", -] - -[[package]] -name = "solana-send-transaction-service" -version = "3.0.0" -dependencies = [ - "async-trait", - "crossbeam-channel", - "itertools 0.12.1", - "log", - "solana-client", - "solana-clock", - "solana-connection-cache", - "solana-hash", - "solana-keypair", - "solana-measure", - "solana-metrics", - "solana-nonce-account", - "solana-pubkey", - "solana-quic-definitions", - "solana-runtime", - "solana-signature", - "solana-time-utils", - "solana-tpu-client-next", - "tokio", - "tokio-util 0.7.16", -] - -[[package]] -name = "solana-serde" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1931484a408af466e14171556a47adaa215953c7f48b24e5f6b0282763818b04" -dependencies = [ - "serde", -] - -[[package]] -name = "solana-serde-varint" -version = "2.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a7e155eba458ecfb0107b98236088c3764a09ddf0201ec29e52a0be40857113" -dependencies = [ - "serde", -] - -[[package]] -name = "solana-serialize-utils" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "817a284b63197d2b27afdba829c5ab34231da4a9b4e763466a003c40ca4f535e" -dependencies = [ - "solana-instruction", - "solana-pubkey", - "solana-sanitize", -] - -[[package]] -name = "solana-sha256-hasher" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aa3feb32c28765f6aa1ce8f3feac30936f16c5c3f7eb73d63a5b8f6f8ecdc44" -dependencies = [ - "sha2 0.10.9", - "solana-define-syscall", - "solana-hash", -] - -[[package]] -name = "solana-short-vec" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c54c66f19b9766a56fa0057d060de8378676cb64987533fa088861858fc5a69" -dependencies = [ - "serde", -] - -[[package]] -name = "solana-shred-version" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afd3db0461089d1ad1a78d9ba3f15b563899ca2386351d38428faa5350c60a98" -dependencies = [ - "solana-hard-forks", - "solana-hash", - "solana-sha256-hasher", -] - -[[package]] -name = "solana-signature" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64c8ec8e657aecfc187522fc67495142c12f35e55ddeca8698edbb738b8dbd8c" -dependencies = [ - "ed25519-dalek", - "five8", - "serde", - "serde-big-array", - "serde_derive", - "solana-sanitize", -] - -[[package]] -name = "solana-signer" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c41991508a4b02f021c1342ba00bcfa098630b213726ceadc7cb032e051975b" -dependencies = [ - "solana-pubkey", - "solana-signature", - "solana-transaction-error", -] - -[[package]] -name = "solana-slot-hashes" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c8691982114513763e88d04094c9caa0376b867a29577939011331134c301ce" -dependencies = [ - "serde", - "serde_derive", - "solana-hash", - "solana-sdk-ids", - "solana-sysvar-id", -] - -[[package]] -name = "solana-slot-history" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97ccc1b2067ca22754d5283afb2b0126d61eae734fc616d23871b0943b0d935e" -dependencies = [ - "bv", - "serde", - "serde_derive", - "solana-sdk-ids", - "solana-sysvar-id", -] - -[[package]] -name = "solana-stable-layout" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f14f7d02af8f2bc1b5efeeae71bc1c2b7f0f65cd75bcc7d8180f2c762a57f54" -dependencies = [ - "solana-instruction", - "solana-pubkey", -] - -[[package]] -name = "solana-stake-interface" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5269e89fde216b4d7e1d1739cf5303f8398a1ff372a81232abbee80e554a838c" -dependencies = [ - "borsh 0.10.4", - "borsh 1.5.7", - "num-traits", - "serde", - "serde_derive", - "solana-clock", - "solana-cpi", - "solana-decode-error", - "solana-instruction", - "solana-program-error", - "solana-pubkey", - "solana-system-interface", - "solana-sysvar-id", -] - -[[package]] -name = "solana-stake-program" -version = "3.0.0" -dependencies = [ - "agave-feature-set", - "bincode", - "log", - "solana-account", - "solana-bincode", - "solana-clock", - "solana-config-program-client", - "solana-genesis-config", - "solana-instruction", - "solana-log-collector", - "solana-native-token", - "solana-packet", - "solana-program-runtime", - "solana-pubkey", - "solana-rent", - "solana-sdk-ids", - "solana-stake-interface", - "solana-sysvar", - "solana-transaction-context", - "solana-type-overrides", - "solana-vote-interface", -] - -[[package]] -name = "solana-storage-bigtable" -version = "3.0.0" -dependencies = [ - "agave-reserved-account-keys", - "backoff", - "bincode", - "bytes", - "bzip2", - "enum-iterator", - "flate2", - "futures 0.3.31", - "goauth", - "http 0.2.12", - "hyper 0.14.32", - "hyper-proxy", - "log", - "openssl", - "prost", - "prost-types", - "serde", - "serde_derive", - "smpl_jwt", - "solana-clock", - "solana-message", - "solana-metrics", - "solana-pubkey", - "solana-serde", - "solana-signature", - "solana-storage-proto", - "solana-time-utils", - "solana-transaction", - "solana-transaction-error", - "solana-transaction-status", - "thiserror 2.0.12", - "tokio", - "tonic", - "zstd", -] - -[[package]] -name = "solana-storage-proto" -version = "3.0.0" -dependencies = [ - "bincode", - "bs58", - "prost", - "protobuf-src", - "serde", - "solana-account-decoder", - "solana-hash", - "solana-instruction", - "solana-message", - "solana-pubkey", - "solana-serde", - "solana-signature", - "solana-transaction", - "solana-transaction-context", - "solana-transaction-error", - "solana-transaction-status", - "tonic-build", -] - -[[package]] -name = "solana-streamer" -version = "3.0.0" -dependencies = [ - "arc-swap", - "async-channel", - "bytes", - "crossbeam-channel", - "dashmap", - "futures 0.3.31", - "futures-util", - "governor", - "histogram", - "indexmap 2.10.0", - "itertools 0.12.1", - "libc", - "log", - "nix", - "num_cpus", - "pem", - "percentage", - "quinn", - "quinn-proto", - "rand 0.8.5", - "rustls 0.23.31", - "smallvec", - "socket2 0.6.0", - "solana-keypair", - "solana-measure", - "solana-metrics", - "solana-net-utils", - "solana-packet", - "solana-perf", - "solana-pubkey", - "solana-quic-definitions", - "solana-signature", - "solana-signer", - "solana-time-utils", - "solana-tls-utils", - "solana-transaction-error", - "solana-transaction-metrics-tracker", - "thiserror 2.0.12", - "tokio", - "tokio-util 0.7.16", - "x509-parser", -] - -[[package]] -name = "solana-svm" -version = "3.0.0" -dependencies = [ - "ahash 0.8.11", - "itertools 0.12.1", - "log", - "percentage", - "serde", - "serde_derive", - "solana-account", - "solana-clock", - "solana-fee-structure", - "solana-hash", - "solana-instruction", - "solana-instructions-sysvar", - "solana-loader-v3-interface 5.0.0", - "solana-loader-v4-interface", - "solana-loader-v4-program", - "solana-log-collector", - "solana-measure", - "solana-message", - "solana-nonce", - "solana-nonce-account", - "solana-program-entrypoint", - "solana-program-pack", - "solana-program-runtime", - "solana-pubkey", - "solana-rent", - "solana-rent-collector", - "solana-sdk-ids", - "solana-slot-hashes", - "solana-svm-callback", - "solana-svm-feature-set", - "solana-svm-transaction", - "solana-system-interface", - "solana-sysvar-id", - "solana-timings", - "solana-transaction-context", - "solana-transaction-error", - "solana-type-overrides", - "spl-generic-token", - "thiserror 2.0.12", -] - -[[package]] -name = "solana-svm-callback" -version = "3.0.0" -dependencies = [ - "solana-account", - "solana-precompile-error", - "solana-pubkey", -] - -[[package]] -name = "solana-svm-example-paytube" -version = "3.0.0" -dependencies = [ - "agave-feature-set", - "agave-syscalls", - "solana-account", - "solana-bpf-loader-program", - "solana-client", - "solana-clock", - "solana-commitment-config", - "solana-compute-budget", - "solana-epoch-schedule", - "solana-fee-structure", - "solana-hash", - "solana-instruction", - "solana-keypair", - "solana-logger", - "solana-program-pack", - "solana-program-runtime", - "solana-pubkey", - "solana-rent", - "solana-sdk-ids", - "solana-signer", - "solana-svm", - "solana-svm-callback", - "solana-svm-feature-set", - "solana-system-interface", - "solana-system-program", - "solana-test-validator", - "solana-transaction", - "solana-transaction-error", - "spl-associated-token-account-interface", - "spl-token-interface", - "termcolor", -] - -[[package]] -name = "solana-svm-feature-set" -version = "3.0.0" - -[[package]] -name = "solana-svm-transaction" -version = "3.0.0" -dependencies = [ - "solana-hash", - "solana-message", - "solana-pubkey", - "solana-sdk-ids", - "solana-signature", - "solana-transaction", -] - -[[package]] -name = "solana-system-interface" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94d7c18cb1a91c6be5f5a8ac9276a1d7c737e39a21beba9ea710ab4b9c63bc90" -dependencies = [ - "js-sys", - "num-traits", - "serde", - "serde_derive", - "solana-decode-error", - "solana-instruction", - "solana-pubkey", - "wasm-bindgen", -] - -[[package]] -name = "solana-system-program" -version = "3.0.0" -dependencies = [ - "bincode", - "log", - "serde", - "serde_derive", - "solana-account", - "solana-bincode", - "solana-fee-calculator", - "solana-instruction", - "solana-log-collector", - "solana-nonce", - "solana-nonce-account", - "solana-packet", - "solana-program-runtime", - "solana-pubkey", - "solana-sdk-ids", - "solana-system-interface", - "solana-sysvar", - "solana-transaction-context", - "solana-type-overrides", -] - -[[package]] -name = "solana-system-transaction" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bd98a25e5bcba8b6be8bcbb7b84b24c2a6a8178d7fb0e3077a916855ceba91a" -dependencies = [ - "solana-hash", - "solana-keypair", - "solana-message", - "solana-pubkey", - "solana-signer", - "solana-system-interface", - "solana-transaction", -] - -[[package]] -name = "solana-sysvar" -version = "2.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d50c92bc019c590f5e42c61939676e18d14809ed00b2a59695dd5c67ae72c097" -dependencies = [ - "base64 0.22.1", - "bincode", - "bytemuck", - "bytemuck_derive", - "lazy_static", - "serde", - "serde_derive", - "solana-account-info", - "solana-clock", - "solana-define-syscall", - "solana-epoch-rewards", - "solana-epoch-schedule", - "solana-fee-calculator", - "solana-hash", - "solana-instruction", - "solana-instructions-sysvar", - "solana-last-restart-slot", - "solana-program-entrypoint", - "solana-program-error", - "solana-program-memory", - "solana-pubkey", - "solana-rent", - "solana-sanitize", - "solana-sdk-ids", - "solana-sdk-macro", - "solana-slot-hashes", - "solana-slot-history", - "solana-stake-interface", - "solana-sysvar-id", -] - -[[package]] -name = "solana-sysvar-id" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5762b273d3325b047cfda250787f8d796d781746860d5d0a746ee29f3e8812c1" -dependencies = [ - "solana-pubkey", - "solana-sdk-ids", -] - -[[package]] -name = "solana-test-validator" -version = "3.0.0" -dependencies = [ - "agave-feature-set", - "base64 0.22.1", - "bincode", - "crossbeam-channel", - "log", - "serde_derive", - "serde_json", - "solana-account", - "solana-accounts-db", - "solana-cli-output", - "solana-clock", - "solana-cluster-type", - "solana-commitment-config", - "solana-compute-budget", - "solana-core", - "solana-epoch-schedule", - "solana-feature-gate-interface", - "solana-fee-calculator", - "solana-geyser-plugin-manager", - "solana-gossip", - "solana-inflation", - "solana-instruction", - "solana-keypair", - "solana-ledger", - "solana-loader-v3-interface 5.0.0", - "solana-logger", - "solana-message", - "solana-native-token", - "solana-net-utils", - "solana-program-test", - "solana-pubkey", - "solana-rent", - "solana-rpc", - "solana-rpc-client", - "solana-rpc-client-api", - "solana-runtime", - "solana-sdk-ids", - "solana-signer", - "solana-streamer", - "solana-tpu-client", - "solana-transaction", - "solana-validator-exit", - "tokio", -] - -[[package]] -name = "solana-time-utils" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6af261afb0e8c39252a04d026e3ea9c405342b08c871a2ad8aa5448e068c784c" - -[[package]] -name = "solana-timings" -version = "3.0.0" -dependencies = [ - "eager", - "enum-iterator", - "solana-pubkey", -] - -[[package]] -name = "solana-tls-utils" -version = "3.0.0" -dependencies = [ - "rustls 0.23.31", - "solana-keypair", - "solana-pubkey", - "solana-signer", - "x509-parser", -] - -[[package]] -name = "solana-tpu-client" -version = "3.0.0" -dependencies = [ - "async-trait", - "bincode", - "futures-util", - "indexmap 2.10.0", - "indicatif", - "log", - "rayon", - "solana-client-traits", - "solana-clock", - "solana-commitment-config", - "solana-connection-cache", - "solana-epoch-schedule", - "solana-measure", - "solana-message", - "solana-net-utils", - "solana-pubkey", - "solana-pubsub-client", - "solana-quic-definitions", - "solana-rpc-client", - "solana-rpc-client-api", - "solana-signature", - "solana-signer", - "solana-transaction", - "solana-transaction-error", - "thiserror 2.0.12", - "tokio", -] - -[[package]] -name = "solana-tpu-client-next" -version = "3.0.0" -dependencies = [ - "async-trait", - "log", - "lru", - "quinn", - "rustls 0.23.31", - "solana-clock", - "solana-connection-cache", - "solana-keypair", - "solana-measure", - "solana-metrics", - "solana-quic-definitions", - "solana-rpc-client", - "solana-streamer", - "solana-time-utils", - "solana-tls-utils", - "solana-tpu-client", - "thiserror 2.0.12", - "tokio", - "tokio-util 0.7.16", -] - -[[package]] -name = "solana-transaction" -version = "2.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80657d6088f721148f5d889c828ca60c7daeedac9a8679f9ec215e0c42bcbf41" -dependencies = [ - "bincode", - "serde", - "serde_derive", - "solana-bincode", - "solana-hash", - "solana-instruction", - "solana-keypair", - "solana-message", - "solana-pubkey", - "solana-sanitize", - "solana-sdk-ids", - "solana-short-vec", - "solana-signature", - "solana-signer", - "solana-system-interface", - "solana-transaction-error", - "wasm-bindgen", -] - -[[package]] -name = "solana-transaction-context" -version = "3.0.0" -dependencies = [ - "bincode", - "serde", - "serde_derive", - "solana-account", - "solana-instruction", - "solana-instructions-sysvar", - "solana-pubkey", - "solana-rent", - "solana-sbpf", - "solana-sdk-ids", -] - -[[package]] -name = "solana-transaction-error" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "222a9dc8fdb61c6088baab34fc3a8b8473a03a7a5fd404ed8dd502fa79b67cb1" -dependencies = [ - "serde", - "serde_derive", - "solana-instruction", - "solana-sanitize", -] - -[[package]] -name = "solana-transaction-metrics-tracker" -version = "3.0.0" -dependencies = [ - "base64 0.22.1", - "bincode", - "log", - "rand 0.8.5", - "solana-packet", - "solana-perf", - "solana-short-vec", - "solana-signature", -] - -[[package]] -name = "solana-transaction-status" -version = "3.0.0" -dependencies = [ - "Inflector", - "agave-reserved-account-keys", - "base64 0.22.1", - "bincode", - "borsh 1.5.7", - "bs58", - "log", - "serde", - "serde_derive", - "serde_json", - "solana-account-decoder", - "solana-address-lookup-table-interface", - "solana-clock", - "solana-hash", - "solana-instruction", - "solana-loader-v2-interface", - "solana-loader-v3-interface 5.0.0", - "solana-message", - "solana-program-option", - "solana-pubkey", - "solana-reward-info", - "solana-sdk-ids", - "solana-signature", - "solana-stake-interface", - "solana-system-interface", - "solana-transaction", - "solana-transaction-error", - "solana-transaction-status-client-types", - "solana-vote-interface", - "spl-associated-token-account-interface", - "spl-memo-interface", - "spl-token-2022-interface", - "spl-token-group-interface", - "spl-token-interface", - "spl-token-metadata-interface", - "thiserror 2.0.12", -] - -[[package]] -name = "solana-transaction-status-client-types" -version = "3.0.0" -dependencies = [ - "base64 0.22.1", - "bincode", - "bs58", - "serde", - "serde_derive", - "serde_json", - "solana-account-decoder-client-types", - "solana-commitment-config", - "solana-instruction", - "solana-message", - "solana-pubkey", - "solana-reward-info", - "solana-signature", - "solana-transaction", - "solana-transaction-context", - "solana-transaction-error", - "thiserror 2.0.12", -] - -[[package]] -name = "solana-turbine" -version = "3.0.0" -dependencies = [ - "agave-feature-set", - "agave-xdp", - "bincode", - "bytes", - "caps", - "crossbeam-channel", - "futures 0.3.31", - "itertools 0.12.1", - "lazy-lru", - "log", - "lru", - "quinn", - "rand 0.8.5", - "rand_chacha 0.3.1", - "rayon", - "rustls 0.23.31", - "solana-clock", - "solana-cluster-type", - "solana-entry", - "solana-gossip", - "solana-hash", - "solana-keypair", - "solana-ledger", - "solana-measure", - "solana-metrics", - "solana-native-token", - "solana-net-utils", - "solana-perf", - "solana-poh", - "solana-pubkey", - "solana-quic-client", - "solana-rayon-threadlimit", - "solana-rpc", - "solana-rpc-client-api", - "solana-runtime", - "solana-signature", - "solana-signer", - "solana-streamer", - "solana-system-transaction", - "solana-time-utils", - "solana-tls-utils", - "solana-transaction-error", - "static_assertions", - "thiserror 2.0.12", - "tokio", -] - -[[package]] -name = "solana-type-overrides" -version = "3.0.0" -dependencies = [ - "rand 0.8.5", -] - -[[package]] -name = "solana-udp-client" -version = "3.0.0" -dependencies = [ - "async-trait", - "solana-connection-cache", - "solana-keypair", - "solana-net-utils", - "solana-streamer", - "solana-transaction-error", - "thiserror 2.0.12", - "tokio", -] - -[[package]] -name = "solana-unified-scheduler-logic" -version = "3.0.0" -dependencies = [ - "assert_matches", - "solana-pubkey", - "solana-runtime-transaction", - "solana-transaction", - "static_assertions", - "unwrap_none", -] - -[[package]] -name = "solana-unified-scheduler-pool" -version = "3.0.0" -dependencies = [ - "agave-banking-stage-ingress-types", - "aquamarine", - "assert_matches", - "crossbeam-channel", - "dashmap", - "derive-where", - "derive_more 1.0.0", - "dyn-clone", - "log", - "qualifier_attr", - "scopeguard", - "solana-clock", - "solana-cost-model", - "solana-ledger", - "solana-metrics", - "solana-poh", - "solana-pubkey", - "solana-runtime", - "solana-runtime-transaction", - "solana-svm", - "solana-timings", - "solana-transaction", - "solana-transaction-error", - "solana-unified-scheduler-logic", - "static_assertions", - "trait-set", - "unwrap_none", - "vec_extract_if_polyfill", -] - -[[package]] -name = "solana-validator-exit" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bbf6d7a3c0b28dd5335c52c0e9eae49d0ae489a8f324917faf0ded65a812c1d" - -[[package]] -name = "solana-version" -version = "3.0.0" -dependencies = [ - "agave-feature-set", - "rand 0.8.5", - "semver", - "serde", - "serde_derive", - "solana-sanitize", - "solana-serde-varint", -] - -[[package]] -name = "solana-vote" -version = "3.0.0" -dependencies = [ - "itertools 0.12.1", - "log", - "serde", - "serde_derive", - "solana-account", - "solana-bincode", - "solana-clock", - "solana-hash", - "solana-instruction", - "solana-keypair", - "solana-packet", - "solana-pubkey", - "solana-sdk-ids", - "solana-serialize-utils", - "solana-signature", - "solana-signer", - "solana-svm-transaction", - "solana-transaction", - "solana-vote-interface", - "thiserror 2.0.12", -] - -[[package]] -name = "solana-vote-interface" -version = "2.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b80d57478d6599d30acc31cc5ae7f93ec2361a06aefe8ea79bc81739a08af4c3" -dependencies = [ - "bincode", - "num-derive", - "num-traits", - "serde", - "serde_derive", - "solana-clock", - "solana-decode-error", - "solana-hash", - "solana-instruction", - "solana-pubkey", - "solana-rent", - "solana-sdk-ids", - "solana-serde-varint", - "solana-serialize-utils", - "solana-short-vec", - "solana-system-interface", -] - -[[package]] -name = "solana-vote-program" -version = "3.0.0" -dependencies = [ - "agave-feature-set", - "bincode", - "log", - "num-derive", - "num-traits", - "serde", - "serde_derive", - "solana-account", - "solana-bincode", - "solana-clock", - "solana-epoch-schedule", - "solana-hash", - "solana-instruction", - "solana-keypair", - "solana-packet", - "solana-program-runtime", - "solana-pubkey", - "solana-rent", - "solana-sdk-ids", - "solana-signer", - "solana-slot-hashes", - "solana-transaction", - "solana-transaction-context", - "solana-vote-interface", - "thiserror 2.0.12", -] - -[[package]] -name = "solana-wen-restart" -version = "3.0.0" -dependencies = [ - "anyhow", - "log", - "prost", - "prost-build", - "prost-types", - "protobuf-src", - "rayon", - "solana-clock", - "solana-entry", - "solana-gossip", - "solana-hash", - "solana-ledger", - "solana-pubkey", - "solana-runtime", - "solana-shred-version", - "solana-time-utils", - "solana-timings", - "solana-vote", - "solana-vote-interface", - "solana-vote-program", -] - -[[package]] -name = "solana-zk-elgamal-proof-program" -version = "3.0.0" -dependencies = [ - "agave-feature-set", - "bytemuck", - "num-derive", - "num-traits", - "solana-instruction", - "solana-log-collector", - "solana-program-runtime", - "solana-sdk-ids", - "solana-zk-sdk 3.0.0", -] - -[[package]] -name = "solana-zk-sdk" -version = "2.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05857892ac50fe03c125d8445fd790c6768015b76f4ad1e4b4b1499938b357f0" -dependencies = [ - "aes-gcm-siv", - "base64 0.22.1", - "bincode", - "bytemuck", - "bytemuck_derive", - "curve25519-dalek 4.1.3", - "itertools 0.12.1", - "js-sys", - "merlin", - "num-derive", - "num-traits", - "rand 0.8.5", - "serde", - "serde_derive", - "serde_json", - "sha3", - "solana-derivation-path", - "solana-instruction", - "solana-pubkey", - "solana-sdk-ids", - "solana-seed-derivable", - "solana-seed-phrase", - "solana-signature", - "solana-signer", - "subtle", - "thiserror 2.0.12", - "wasm-bindgen", - "zeroize", -] - -[[package]] -name = "solana-zk-sdk" -version = "3.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dffbd0b7537f4249d69b74c632f8eac1d2726572022791f9ead65a67d3f6905" -dependencies = [ - "aes-gcm-siv", - "base64 0.22.1", - "bincode", - "bytemuck", - "bytemuck_derive", - "curve25519-dalek 4.1.3", - "itertools 0.12.1", - "js-sys", - "merlin", - "num-derive", - "num-traits", - "rand 0.8.5", - "serde", - "serde_derive", - "serde_json", - "sha3", - "solana-derivation-path", - "solana-instruction", - "solana-pubkey", - "solana-sdk-ids", - "solana-seed-derivable", - "solana-seed-phrase", - "solana-signature", - "solana-signer", - "subtle", - "thiserror 2.0.12", - "wasm-bindgen", - "zeroize", -] - -[[package]] -name = "solana-zk-token-proof-program" -version = "3.0.0" -dependencies = [ - "agave-feature-set", - "bytemuck", - "num-derive", - "num-traits", - "solana-instruction", - "solana-log-collector", - "solana-program-runtime", - "solana-sdk-ids", - "solana-zk-token-sdk", -] - -[[package]] -name = "solana-zk-token-sdk" -version = "3.0.0" -dependencies = [ - "aes-gcm-siv", - "base64 0.22.1", - "bincode", - "bytemuck", - "bytemuck_derive", - "curve25519-dalek 4.1.3", - "itertools 0.12.1", - "merlin", - "num-derive", - "num-traits", - "rand 0.8.5", - "serde", - "serde_derive", - "serde_json", - "sha3", - "solana-curve25519 3.0.0", - "solana-derivation-path", - "solana-instruction", - "solana-pubkey", - "solana-sdk-ids", - "solana-seed-derivable", - "solana-seed-phrase", - "solana-signature", - "solana-signer", - "subtle", - "thiserror 2.0.12", - "zeroize", -] - -[[package]] -name = "spin" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" - -[[package]] -name = "spinning_top" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d96d2d1d716fb500937168cc09353ffdc7a012be8475ac7308e1bdf0e3923300" -dependencies = [ - "lock_api", -] - -[[package]] -name = "spl-associated-token-account-interface" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e6bbe0794e532ac08428d3abf5bf8ae75bd81dfddd785c388e326c00c92c6f5" -dependencies = [ - "borsh 1.5.7", - "solana-instruction", - "solana-pubkey", -] - -[[package]] -name = "spl-discriminator" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7398da23554a31660f17718164e31d31900956054f54f52d5ec1be51cb4f4b3" -dependencies = [ - "bytemuck", - "solana-program-error", - "solana-sha256-hasher", - "spl-discriminator-derive", -] - -[[package]] -name = "spl-discriminator-derive" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9e8418ea6269dcfb01c712f0444d2c75542c04448b480e87de59d2865edc750" -dependencies = [ - "quote", - "spl-discriminator-syn", - "syn 2.0.96", -] - -[[package]] -name = "spl-discriminator-syn" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c1f05593b7ca9eac7caca309720f2eafb96355e037e6d373b909a80fe7b69b9" -dependencies = [ - "proc-macro2", - "quote", - "sha2 0.10.9", - "syn 2.0.96", - "thiserror 1.0.69", -] - -[[package]] -name = "spl-generic-token" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "741a62a566d97c58d33f9ed32337ceedd4e35109a686e31b1866c5dfa56abddc" -dependencies = [ - "bytemuck", - "solana-pubkey", -] - -[[package]] -name = "spl-memo-interface" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24af0730130fea732616be9425fe8eb77782e2aab2f0e76837b6a66aaba96c6b" -dependencies = [ - "solana-instruction", - "solana-pubkey", -] - -[[package]] -name = "spl-pod" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d994afaf86b779104b4a95ba9ca75b8ced3fdb17ee934e38cb69e72afbe17799" -dependencies = [ - "borsh 1.5.7", - "bytemuck", - "bytemuck_derive", - "num-derive", - "num-traits", - "solana-decode-error", - "solana-msg", - "solana-program-error", - "solana-program-option", - "solana-pubkey", - "solana-zk-sdk 2.3.6", - "thiserror 2.0.12", -] - -[[package]] -name = "spl-token-2022-interface" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62d7ae2ee6b856f8ddcbdc3b3a9f4d2141582bbe150f93e5298ee97e0251fa04" -dependencies = [ - "arrayref", - "bytemuck", - "num-derive", - "num-traits", - "num_enum", - "solana-account-info", - "solana-decode-error", - "solana-instruction", - "solana-msg", - "solana-program-error", - "solana-program-option", - "solana-program-pack", - "solana-pubkey", - "solana-sdk-ids", - "solana-zk-sdk 2.3.6", - "spl-pod", - "spl-token-confidential-transfer-proof-extraction", - "spl-token-confidential-transfer-proof-generation", - "spl-token-group-interface", - "spl-token-metadata-interface", - "spl-type-length-value", - "thiserror 2.0.12", -] - -[[package]] -name = "spl-token-confidential-transfer-proof-extraction" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bedc4675c80409a004da46978674e4073c65c4b1c611bf33d120381edeffe036" -dependencies = [ - "bytemuck", - "solana-account-info", - "solana-curve25519 2.2.15", - "solana-instruction", - "solana-instructions-sysvar", - "solana-msg", - "solana-program-error", - "solana-pubkey", - "solana-sdk-ids", - "solana-zk-sdk 2.3.6", - "spl-pod", - "thiserror 2.0.12", -] - -[[package]] -name = "spl-token-confidential-transfer-proof-generation" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae5b124840d4aed474cef101d946a798b806b46a509ee4df91021e1ab1cef3ef" -dependencies = [ - "curve25519-dalek 4.1.3", - "solana-zk-sdk 2.3.6", - "thiserror 2.0.12", -] - -[[package]] -name = "spl-token-group-interface" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5597b4cd76f85ce7cd206045b7dc22da8c25516573d42d267c8d1fd128db5129" -dependencies = [ - "bytemuck", - "num-derive", - "num-traits", - "solana-decode-error", - "solana-instruction", - "solana-msg", - "solana-program-error", - "solana-pubkey", - "spl-discriminator", - "spl-pod", - "thiserror 2.0.12", -] - -[[package]] -name = "spl-token-interface" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06e0c2d4e38ef5834cf7fb1b592b8a8c6eab8485f5ac7a04a151b502c63a0aaa" -dependencies = [ - "arrayref", - "bytemuck", - "num-derive", - "num-traits", - "num_enum", - "solana-instruction", - "solana-program-error", - "solana-program-option", - "solana-program-pack", - "solana-pubkey", - "solana-sdk-ids", - "thiserror 2.0.12", -] - -[[package]] -name = "spl-token-metadata-interface" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "304d6e06f0de0c13a621464b1fd5d4b1bebf60d15ca71a44d3839958e0da16ee" -dependencies = [ - "borsh 1.5.7", - "num-derive", - "num-traits", - "solana-borsh", - "solana-decode-error", - "solana-instruction", - "solana-msg", - "solana-program-error", - "solana-pubkey", - "spl-discriminator", - "spl-pod", - "spl-type-length-value", - "thiserror 2.0.12", -] - -[[package]] -name = "spl-type-length-value" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d417eb548214fa822d93f84444024b4e57c13ed6719d4dcc68eec24fb481e9f5" -dependencies = [ - "bytemuck", - "num-derive", - "num-traits", - "solana-account-info", - "solana-decode-error", - "solana-msg", - "solana-program-error", - "spl-discriminator", - "spl-pod", - "thiserror 2.0.12", -] - -[[package]] -name = "stable_deref_trait" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" - -[[package]] -name = "static_assertions" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" - -[[package]] -name = "stream-cancel" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f9fbf9bd71e4cf18d68a8a0951c0e5b7255920c0cd992c4ff51cddd6ef514a3" -dependencies = [ - "futures-core", - "pin-project", - "tokio", -] - -[[package]] -name = "strsim" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" - -[[package]] -name = "strsim" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" - -[[package]] -name = "strum" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" -dependencies = [ - "strum_macros", -] - -[[package]] -name = "strum_macros" -version = "0.24.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "rustversion", - "syn 1.0.109", -] - -[[package]] -name = "subtle" -version = "2.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" - -[[package]] -name = "symlink" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7973cce6668464ea31f176d85b13c7ab3bba2cb3b77a2ed26abd7801688010a" - -[[package]] -name = "syn" -version = "1.0.109" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "syn" -version = "2.0.96" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5d0adab1ae378d7f53bdebc67a39f1f151407ef230f0ce2883572f5d8985c80" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "sync_wrapper" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" - -[[package]] -name = "sync_wrapper" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" -dependencies = [ - "futures-core", -] - -[[package]] -name = "synstructure" -version = "0.12.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", - "unicode-xid", -] - -[[package]] -name = "synstructure" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.96", -] - -[[package]] -name = "sys-info" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b3a0d0aba8bf96a0e1ddfdc352fc53b3df7f39318c71854910c3c4b024ae52c" -dependencies = [ - "cc", - "libc", -] - -[[package]] -name = "sysctl" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225e483f02d0ad107168dc57381a8a40c3aeea6abe47f37506931f861643cfa8" -dependencies = [ - "bitflags 1.2.1", - "byteorder", - "libc", - "thiserror 1.0.69", - "walkdir", -] - -[[package]] -name = "system-configuration" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" -dependencies = [ - "bitflags 1.2.1", - "core-foundation 0.9.4", - "system-configuration-sys", -] - -[[package]] -name = "system-configuration-sys" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "tar" -version = "0.4.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d863878d212c87a19c1a610eb53bb01fe12951c0501cf5a0d65f724914a667a" -dependencies = [ - "filetime", - "libc", - "xattr", -] - -[[package]] -name = "tarpc" -version = "0.29.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c38a012bed6fb9681d3bf71ffaa4f88f3b4b9ed3198cda6e4c8462d24d4bb80" -dependencies = [ - "anyhow", - "fnv", - "futures 0.3.31", - "humantime", - "opentelemetry", - "pin-project", - "rand 0.8.5", - "serde", - "static_assertions", - "tarpc-plugins", - "thiserror 1.0.69", - "tokio", - "tokio-serde", - "tokio-util 0.6.10", - "tracing", - "tracing-opentelemetry", -] - -[[package]] -name = "tarpc-plugins" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ee42b4e559f17bce0385ebf511a7beb67d5cc33c12c96b7f4e9789919d9c10f" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "tempfile" -version = "3.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" -dependencies = [ - "fastrand", - "getrandom 0.3.1", - "once_cell", - "rustix 1.0.2", - "windows-sys 0.59.0", -] - -[[package]] -name = "termcolor" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "termtree" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" - -[[package]] -name = "textwrap" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" -dependencies = [ - "unicode-width 0.1.14", -] - -[[package]] -name = "thiserror" -version = "1.0.69" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" -dependencies = [ - "thiserror-impl 1.0.69", -] - -[[package]] -name = "thiserror" -version = "2.0.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" -dependencies = [ - "thiserror-impl 2.0.12", -] - -[[package]] -name = "thiserror-impl" -version = "1.0.69" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.96", -] - -[[package]] -name = "thiserror-impl" -version = "2.0.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.96", -] - -[[package]] -name = "thread_local" -version = "1.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" -dependencies = [ - "cfg-if 1.0.0", - "once_cell", -] - -[[package]] -name = "tikv-jemalloc-sys" -version = "0.6.0+5.3.0-1-ge13ca993e8ccb9ba9847cc330696e02839f328f7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd3c60906412afa9c2b5b5a48ca6a5abe5736aec9eb48ad05037a677e52e4e2d" -dependencies = [ - "cc", - "libc", -] - -[[package]] -name = "tikv-jemallocator" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cec5ff18518d81584f477e9bfdf957f5bb0979b0bac3af4ca30b5b3ae2d2865" -dependencies = [ - "libc", - "tikv-jemalloc-sys", -] - -[[package]] -name = "time" -version = "0.3.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21" -dependencies = [ - "deranged", - "itoa", - "num-conv", - "powerfmt", - "serde", - "time-core", - "time-macros", -] - -[[package]] -name = "time-core" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" - -[[package]] -name = "time-macros" -version = "0.2.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de" -dependencies = [ - "num-conv", - "time-core", -] - -[[package]] -name = "tiny-bip39" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffc59cb9dfc85bb312c3a78fd6aa8a8582e310b0fa885d5bb877f6dcc601839d" -dependencies = [ - "anyhow", - "hmac 0.8.1", - "once_cell", - "pbkdf2 0.4.0", - "rand 0.7.3", - "rustc-hash 1.1.0", - "sha2 0.9.9", - "thiserror 1.0.69", - "unicode-normalization", - "wasm-bindgen", - "zeroize", -] - -[[package]] -name = "tinystr" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" -dependencies = [ - "displaydoc", - "zerovec", -] - -[[package]] -name = "tinyvec" -version = "1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "022db8904dfa342efe721985167e9fcd16c29b226db4397ed752a761cfce81e8" -dependencies = [ - "tinyvec_macros", -] - -[[package]] -name = "tinyvec_macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" - -[[package]] -name = "tokio" -version = "1.47.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89e49afdadebb872d3145a5638b59eb0691ea23e46ca484037cfab3b76b95038" -dependencies = [ - "backtrace", - "bytes", - "io-uring", - "libc", - "mio", - "parking_lot 0.12.3", - "pin-project-lite", - "signal-hook-registry", - "slab", - "socket2 0.6.0", - "tokio-macros", - "windows-sys 0.59.0", -] - -[[package]] -name = "tokio-io-timeout" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" -dependencies = [ - "pin-project-lite", - "tokio", -] - -[[package]] -name = "tokio-macros" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.96", -] - -[[package]] -name = "tokio-native-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" -dependencies = [ - "native-tls", - "tokio", -] - -[[package]] -name = "tokio-rustls" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" -dependencies = [ - "rustls 0.21.12", - "tokio", -] - -[[package]] -name = "tokio-rustls" -version = "0.26.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" -dependencies = [ - "rustls 0.23.31", - "tokio", -] - -[[package]] -name = "tokio-serde" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "911a61637386b789af998ee23f50aa30d5fd7edcec8d6d3dedae5e5815205466" -dependencies = [ - "bincode", - "bytes", - "educe", - "futures-core", - "futures-sink", - "pin-project", - "serde", - "serde_json", -] - -[[package]] -name = "tokio-stream" -version = "0.1.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" -dependencies = [ - "futures-core", - "pin-project-lite", - "tokio", -] - -[[package]] -name = "tokio-tungstenite" -version = "0.20.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "212d5dcb2a1ce06d81107c3d0ffa3121fe974b73f068c8282cb1c32328113b6c" -dependencies = [ - "futures-util", - "log", - "rustls 0.21.12", - "tokio", - "tokio-rustls 0.24.1", - "tungstenite", - "webpki-roots 0.25.4", -] - -[[package]] -name = "tokio-util" -version = "0.6.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36943ee01a6d67977dd3f84a5a1d2efeb4ada3a1ae771cadfaa535d9d9fc6507" -dependencies = [ - "bytes", - "futures-core", - "futures-sink", - "log", - "pin-project-lite", - "slab", - "tokio", -] - -[[package]] -name = "tokio-util" -version = "0.7.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14307c986784f72ef81c89db7d9e28d6ac26d16213b109ea501696195e6e3ce5" -dependencies = [ - "bytes", - "futures-core", - "futures-io", - "futures-sink", - "pin-project-lite", - "tokio", -] - -[[package]] -name = "toml" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" -dependencies = [ - "serde", -] - -[[package]] -name = "toml_datetime" -version = "0.6.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" - -[[package]] -name = "toml_edit" -version = "0.22.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" -dependencies = [ - "indexmap 2.10.0", - "toml_datetime", - "winnow", -] - -[[package]] -name = "tonic" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3082666a3a6433f7f511c7192923fa1fe07c69332d3c6a2e6bb040b569199d5a" -dependencies = [ - "async-stream", - "async-trait", - "axum", - "base64 0.21.7", - "bytes", - "futures-core", - "futures-util", - "h2", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.32", - "hyper-timeout", - "percent-encoding 2.3.1", - "pin-project", - "prost", - "rustls-pemfile", - "tokio", - "tokio-rustls 0.24.1", - "tokio-stream", - "tower 0.4.13", - "tower-layer", - "tower-service", - "tracing", -] - -[[package]] -name = "tonic-build" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6fdaae4c2c638bb70fe42803a26fbd6fc6ac8c72f5c59f67ecc2a2dcabf4b07" -dependencies = [ - "prettyplease", - "proc-macro2", - "prost-build", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "tower" -version = "0.4.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" -dependencies = [ - "futures-core", - "futures-util", - "indexmap 1.9.3", - "pin-project", - "pin-project-lite", - "rand 0.8.5", - "slab", - "tokio", - "tokio-util 0.7.16", - "tower-layer", - "tower-service", - "tracing", -] - -[[package]] -name = "tower" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" -dependencies = [ - "futures-core", - "futures-util", - "pin-project-lite", - "sync_wrapper 1.0.2", - "tokio", - "tower-layer", - "tower-service", -] - -[[package]] -name = "tower-http" -version = "0.6.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" -dependencies = [ - "bitflags 2.9.1", - "bytes", - "futures-util", - "http 1.2.0", - "http-body 1.0.1", - "iri-string", - "pin-project-lite", - "tower 0.5.2", - "tower-layer", - "tower-service", -] - -[[package]] -name = "tower-layer" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" - -[[package]] -name = "tower-service" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" - -[[package]] -name = "tracing" -version = "0.1.41" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" -dependencies = [ - "log", - "pin-project-lite", - "tracing-attributes", - "tracing-core", -] - -[[package]] -name = "tracing-attributes" -version = "0.1.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.96", -] - -[[package]] -name = "tracing-core" -version = "0.1.33" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" -dependencies = [ - "once_cell", - "valuable", -] - -[[package]] -name = "tracing-opentelemetry" -version = "0.17.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbbe89715c1dbbb790059e2565353978564924ee85017b5fff365c872ff6721f" -dependencies = [ - "once_cell", - "opentelemetry", - "tracing", - "tracing-core", - "tracing-subscriber", -] - -[[package]] -name = "tracing-subscriber" -version = "0.3.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" -dependencies = [ - "sharded-slab", - "thread_local", - "tracing-core", -] - -[[package]] -name = "trait-set" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b79e2e9c9ab44c6d7c20d5976961b47e8f49ac199154daa514b77cd1ab536625" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "trees" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0de5f738ceab88e2491a94ddc33c3feeadfa95fedc60363ef110845df12f3878" - -[[package]] -name = "try-lock" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" - -[[package]] -name = "tungstenite" -version = "0.20.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e3dac10fd62eaf6617d3a904ae222845979aec67c615d1c842b4002c7666fb9" -dependencies = [ - "byteorder", - "bytes", - "data-encoding", - "http 0.2.12", - "httparse", - "log", - "rand 0.8.5", - "rustls 0.21.12", - "sha1", - "thiserror 1.0.69", - "url 2.5.4", - "utf-8", - "webpki-roots 0.24.0", -] - -[[package]] -name = "typenum" -version = "1.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" - -[[package]] -name = "ucd-trie" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" - -[[package]] -name = "unicase" -version = "2.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" - -[[package]] -name = "unicode-bidi" -version = "0.3.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c1cb5db39152898a79168971543b1cb5020dff7fe43c8dc468b0885f5e29df5" - -[[package]] -name = "unicode-ident" -version = "1.0.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a210d160f08b701c8721ba1c726c11662f877ea6b7094007e1ca9a1041945034" - -[[package]] -name = "unicode-normalization" -version = "0.1.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" -dependencies = [ - "tinyvec", -] - -[[package]] -name = "unicode-segmentation" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" - -[[package]] -name = "unicode-width" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" - -[[package]] -name = "unicode-width" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd" - -[[package]] -name = "unicode-xid" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" - -[[package]] -name = "unit-prefix" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "323402cff2dd658f39ca17c789b502021b3f18707c91cdf22e3838e1b4023817" - -[[package]] -name = "universal-hash" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" -dependencies = [ - "crypto-common", - "subtle", -] - -[[package]] -name = "unreachable" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "382810877fe448991dfc7f0dd6e3ae5d58088fd0ea5e35189655f84e6814fa56" -dependencies = [ - "void", -] - -[[package]] -name = "unsafe-libyaml" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" - -[[package]] -name = "untrusted" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" - -[[package]] -name = "unwrap_none" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "461d0c5956fcc728ecc03a3a961e4adc9a7975d86f6f8371389a289517c02ca9" - -[[package]] -name = "uriparse" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0200d0fc04d809396c2ad43f3c95da3582a2556eba8d453c1087f4120ee352ff" -dependencies = [ - "fnv", - "lazy_static", -] - -[[package]] -name = "url" -version = "1.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd4e7c0d531266369519a4aa4f399d748bd37043b00bde1e4ff1f60a120b355a" -dependencies = [ - "idna 0.1.5", - "matches", - "percent-encoding 1.0.1", -] - -[[package]] -name = "url" -version = "2.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" -dependencies = [ - "form_urlencoded", - "idna 1.0.3", - "percent-encoding 2.3.1", -] - -[[package]] -name = "utf-8" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" - -[[package]] -name = "utf16_iter" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" - -[[package]] -name = "utf8_iter" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" - -[[package]] -name = "valuable" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" - -[[package]] -name = "vcpkg" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" - -[[package]] -name = "vec_extract_if_polyfill" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40c9cb5fb67c2692310b6eb3fce7dd4b6e4c9a75be4f2f46b27f0b2b7799759c" - -[[package]] -name = "vec_map" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" - -[[package]] -name = "version_check" -version = "0.9.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" - -[[package]] -name = "void" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" - -[[package]] -name = "walkdir" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" -dependencies = [ - "same-file", - "winapi-util", -] - -[[package]] -name = "want" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" -dependencies = [ - "try-lock", -] - -[[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - -[[package]] -name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" - -[[package]] -name = "wasi" -version = "0.13.3+wasi-0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2" -dependencies = [ - "wit-bindgen-rt", -] - -[[package]] -name = "wasm-bindgen" -version = "0.2.100" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" -dependencies = [ - "cfg-if 1.0.0", - "once_cell", - "rustversion", - "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.100" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" -dependencies = [ - "bumpalo", - "log", - "proc-macro2", - "quote", - "syn 2.0.96", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-futures" -version = "0.4.50" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" -dependencies = [ - "cfg-if 1.0.0", - "js-sys", - "once_cell", - "wasm-bindgen", - "web-sys", -] - -[[package]] -name = "wasm-bindgen-macro" -version = "0.2.100" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" -dependencies = [ - "quote", - "wasm-bindgen-macro-support", -] - -[[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.100" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.96", - "wasm-bindgen-backend", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-shared" -version = "0.2.100" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "web-sys" -version = "0.3.77" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" -dependencies = [ - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "web-time" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" -dependencies = [ - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "webpki-root-certs" -version = "0.26.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cd5da49bdf1f30054cfe0b8ce2958b8fbeb67c4d82c8967a598af481bef255c" -dependencies = [ - "rustls-pki-types", -] - -[[package]] -name = "webpki-roots" -version = "0.24.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b291546d5d9d1eab74f069c77749f2cb8504a12caa20f0f2de93ddbf6f411888" -dependencies = [ - "rustls-webpki 0.101.7", -] - -[[package]] -name = "webpki-roots" -version = "0.25.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" - -[[package]] -name = "webpki-roots" -version = "0.26.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2210b291f7ea53617fbafcc4939f10914214ec15aace5ba62293a668f322c5c9" -dependencies = [ - "rustls-pki-types", -] - -[[package]] -name = "webpki-roots" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2853738d1cc4f2da3a225c18ec6c3721abb31961096e9dbf5ab35fa88b19cfdb" -dependencies = [ - "rustls-pki-types", -] - -[[package]] -name = "which" -version = "4.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" -dependencies = [ - "either", - "home", - "once_cell", - "rustix 0.38.44", -] - -[[package]] -name = "wide" -version = "0.7.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41b5576b9a81633f3e8df296ce0063042a73507636cbe956c61133dd7034ab22" -dependencies = [ - "bytemuck", - "safe_arch", -] - -[[package]] -name = "winapi" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-build" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-util" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" -dependencies = [ - "windows-sys 0.59.0", -] - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[package]] -name = "windows-core" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" -dependencies = [ - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-link" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dccfd733ce2b1753b03b6d3c65edf020262ea35e20ccdf3e288043e6dd620e3" - -[[package]] -name = "windows-sys" -version = "0.45.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" -dependencies = [ - "windows-targets 0.42.2", -] - -[[package]] -name = "windows-sys" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" -dependencies = [ - "windows-targets 0.48.5", -] - -[[package]] -name = "windows-sys" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" -dependencies = [ - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-sys" -version = "0.59.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" -dependencies = [ - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-sys" -version = "0.60.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" -dependencies = [ - "windows-targets 0.53.2", -] - -[[package]] -name = "windows-targets" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" -dependencies = [ - "windows_aarch64_gnullvm 0.42.2", - "windows_aarch64_msvc 0.42.2", - "windows_i686_gnu 0.42.2", - "windows_i686_msvc 0.42.2", - "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm 0.42.2", - "windows_x86_64_msvc 0.42.2", -] - -[[package]] -name = "windows-targets" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" -dependencies = [ - "windows_aarch64_gnullvm 0.48.5", - "windows_aarch64_msvc 0.48.5", - "windows_i686_gnu 0.48.5", - "windows_i686_msvc 0.48.5", - "windows_x86_64_gnu 0.48.5", - "windows_x86_64_gnullvm 0.48.5", - "windows_x86_64_msvc 0.48.5", -] - -[[package]] -name = "windows-targets" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" -dependencies = [ - "windows_aarch64_gnullvm 0.52.6", - "windows_aarch64_msvc 0.52.6", - "windows_i686_gnu 0.52.6", - "windows_i686_gnullvm 0.52.6", - "windows_i686_msvc 0.52.6", - "windows_x86_64_gnu 0.52.6", - "windows_x86_64_gnullvm 0.52.6", - "windows_x86_64_msvc 0.52.6", -] - -[[package]] -name = "windows-targets" -version = "0.53.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c66f69fcc9ce11da9966ddb31a40968cad001c5bedeb5c2b82ede4253ab48aef" -dependencies = [ - "windows_aarch64_gnullvm 0.53.0", - "windows_aarch64_msvc 0.53.0", - "windows_i686_gnu 0.53.0", - "windows_i686_gnullvm 0.53.0", - "windows_i686_msvc 0.53.0", - "windows_x86_64_gnu 0.53.0", - "windows_x86_64_gnullvm 0.53.0", - "windows_x86_64_msvc 0.53.0", -] - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" - -[[package]] -name = "windows_i686_gnu" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" - -[[package]] -name = "windows_i686_gnu" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" - -[[package]] -name = "windows_i686_gnu" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" - -[[package]] -name = "windows_i686_gnu" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" - -[[package]] -name = "windows_i686_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" - -[[package]] -name = "windows_i686_gnullvm" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" - -[[package]] -name = "windows_i686_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" - -[[package]] -name = "windows_i686_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" - -[[package]] -name = "windows_i686_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" - -[[package]] -name = "windows_i686_msvc" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" - -[[package]] -name = "winnow" -version = "0.6.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad699df48212c6cc6eb4435f35500ac6fd3b9913324f938aea302022ce19d310" -dependencies = [ - "memchr", -] - -[[package]] -name = "winreg" -version = "0.50.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" -dependencies = [ - "cfg-if 1.0.0", - "windows-sys 0.48.0", -] - -[[package]] -name = "wit-bindgen-rt" -version = "0.33.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" -dependencies = [ - "bitflags 2.9.1", -] - -[[package]] -name = "write16" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" - -[[package]] -name = "writeable" -version = "0.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" - -[[package]] -name = "x509-parser" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0ecbeb7b67ce215e40e3cc7f2ff902f94a223acf44995934763467e7b1febc8" -dependencies = [ - "asn1-rs", - "base64 0.13.1", - "data-encoding", - "der-parser", - "lazy_static", - "nom", - "oid-registry", - "rusticata-macros", - "thiserror 1.0.69", - "time", -] - -[[package]] -name = "xattr" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e105d177a3871454f754b33bb0ee637ecaaac997446375fd3e5d43a2ed00c909" -dependencies = [ - "libc", - "linux-raw-sys 0.4.15", - "rustix 0.38.44", -] - -[[package]] -name = "yaml-rust" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" -dependencies = [ - "linked-hash-map", -] - -[[package]] -name = "yoke" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" -dependencies = [ - "serde", - "stable_deref_trait", - "yoke-derive", - "zerofrom", -] - -[[package]] -name = "yoke-derive" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.96", - "synstructure 0.13.1", -] - -[[package]] -name = "zerocopy" -version = "0.7.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" -dependencies = [ - "byteorder", - "zerocopy-derive 0.7.35", -] - -[[package]] -name = "zerocopy" -version = "0.8.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd97444d05a4328b90e75e503a34bad781f14e28a823ad3557f0750df1ebcbc6" -dependencies = [ - "zerocopy-derive 0.8.23", -] - -[[package]] -name = "zerocopy-derive" -version = "0.7.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.96", -] - -[[package]] -name = "zerocopy-derive" -version = "0.8.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6352c01d0edd5db859a63e2605f4ea3183ddbd15e2c4a9e7d32184df75e4f154" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.96", -] - -[[package]] -name = "zerofrom" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e" -dependencies = [ - "zerofrom-derive", -] - -[[package]] -name = "zerofrom-derive" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.96", - "synstructure 0.13.1", -] - -[[package]] -name = "zeroize" -version = "1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" -dependencies = [ - "zeroize_derive", -] - -[[package]] -name = "zeroize_derive" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.96", -] - -[[package]] -name = "zerovec" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" -dependencies = [ - "yoke", - "zerofrom", - "zerovec-derive", -] - -[[package]] -name = "zerovec-derive" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.96", -] - -[[package]] -name = "zstd" -version = "0.13.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a" -dependencies = [ - "zstd-safe", -] - -[[package]] -name = "zstd-safe" -version = "7.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54a3ab4db68cea366acc5c897c7b4d4d1b8994a9cd6e6f841f8964566a419059" -dependencies = [ - "zstd-sys", -] - -[[package]] -name = "zstd-sys" -version = "2.0.13+zstd.1.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38ff0f21cfee8f97d94cef41359e0c89aa6113028ab0291aa8ca0038995a95aa" -dependencies = [ - "cc", - "pkg-config", -] - -[[patch.unused]] -name = "crossbeam-epoch" -version = "0.9.5" -source = "git+https://github.com/anza-xyz/crossbeam?rev=fd279d707025f0e60951e429bf778b4813d1b6bf#fd279d707025f0e60951e429bf778b4813d1b6bf" diff --git a/svm/examples/Cargo.toml b/svm/examples/Cargo.toml deleted file mode 100644 index 98bbc84b86d407..00000000000000 --- a/svm/examples/Cargo.toml +++ /dev/null @@ -1,82 +0,0 @@ -[workspace] -members = ["json-rpc/client", "json-rpc/server", "paytube"] - -resolver = "2" - -[workspace.package] -version = "3.0.0" -authors = ["Anza Maintainers "] -repository = "https://github.com/anza-xyz/agave" -homepage = "https://anza.xyz/" -license = "Apache-2.0" -edition = "2021" - -[workspace.dependencies] -agave-feature-set = { path = "../../feature-set" } -agave-reserved-account-keys = { path = "../../reserved-account-keys" } -agave-syscalls = { path = "../../syscalls" } -base64 = "0.22.1" -bincode = "1.3.3" -borsh = { version = "1.5.2", features = ["derive"] } -bs58 = { version = "0.5.1", default-features = false } -clap = "2.33.1" -crossbeam-channel = "0.5.13" -env_logger = "0.9.3" -home = "0.5" -jsonrpc-core = "18.0.0" -jsonrpc-core-client = "18.0.0" -jsonrpc-derive = "18.0.0" -jsonrpc-http-server = "18.0.0" -log = "0.4.22" -serde = "1.0.214" -serde_json = "1.0.132" -solana-account = "2.2.1" -solana-account-decoder = { path = "../../account-decoder" } -solana-bpf-loader-program = { path = "../../programs/bpf_loader" } -solana-client = { path = "../../client" } -solana-clock = "2.2.1" -solana-commitment-config = "2.2.1" -solana-compute-budget = { path = "../../compute-budget" } -solana-compute-budget-interface = "2.2.2" -solana-epoch-schedule = "2.2.1" -solana-hash = "2.2.1" -solana-instruction = "2.2.1" -solana-keypair = "2.2.1" -solana-logger = "=2.3.1" -solana-message = "2.3.0" -solana-nonce = "2.2.1" -solana-perf = { path = "../../perf" } -solana-program-pack = "2.2.1" -solana-program-runtime = { path = "../../program-runtime" } -solana-pubkey = "2.3.0" -solana-rent = "2.2.1" -solana-rpc-client-api = { path = "../../rpc-client-api" } -solana-sdk-ids = "2.2.1" -solana-signature = "2.2.1" -solana-signer = "2.2.1" -solana-svm = { path = "../" } -solana-svm-callback = { path = "../../svm-callback" } -solana-svm-feature-set = { path = "../../svm-feature-set" } -solana-system-interface = "1.0" -solana-system-program = { path = "../../programs/system" } -solana-sysvar = "2.2.1" -solana-sysvar-id = "2.2.1" -solana-test-validator = { path = "../../test-validator" } -solana-transaction = "2.2.2" -solana-transaction-context = { path = "../../transaction-context" } -solana-transaction-error = "2.2.1" -solana-transaction-status = { path = "../../transaction-status" } -solana-validator-exit = "2.2.1" -solana-version = { path = "../../version" } -spl-associated-token-account-interface = "1.0.0" -spl-token-2022-interface = "1.0.0" -spl-token-interface = "1.0.0" -termcolor = "1.4.1" -thiserror = "1.0.68" -tokio = "1.29.1" -tokio-util = "0.7" -yaml-rust = "0.4" - -[patch.crates-io] -crossbeam-epoch = { git = "https://github.com/anza-xyz/crossbeam", rev = "fd279d707025f0e60951e429bf778b4813d1b6bf" } -solana-curve25519 = { path = "../../curves/curve25519" } diff --git a/svm/examples/json-rpc/README.md b/svm/examples/json-rpc/README.md deleted file mode 100644 index 9879fc3e077020..00000000000000 --- a/svm/examples/json-rpc/README.md +++ /dev/null @@ -1,31 +0,0 @@ -This is an example application using SVM to implement a tiny subset of -Solana RPC protocol for the purpose of simulating transaction -execution without having to use the entire Solana Runtime. - -The example consists of two host applications -- json-rpc-server -- the RPC server that accepts incoming RPC requests - and performs transaction simulation sending back the results, -- json-rpc-client -- the RPC client program that sends transactions to - json-rpc-server for simulation, - -and - -- json-rpc-program is the source code of on-chain program that is - executed in a transaction sent by json-rpc-client. - -To run the example, compile the json-rpc-program with `cargo -build-sbf` command. Using solana-test-validator create a ledger, or -use an existing one, and deploy the compiled program to store it in -the ledger. Using agave-ledger-tool dump ledger accounts to a file, -e.g. `accounts.out`. Now start the json-rpc-server, e.g. -``` -cargo run --manifest-path json-rpc-server/Cargo.toml -- -l test-ledger -a accounts.json -``` - -Finally, run the client program. -``` -cargo run --manifest-path json-rpc-client/Cargo.toml -- -C config.yml -k json-rpc-program/target/deploy/helloworld-keypair.json -u localhost -``` - -The client will communicate with the server and print the responses it -receives from the server. diff --git a/svm/examples/json-rpc/client/Cargo.toml b/svm/examples/json-rpc/client/Cargo.toml deleted file mode 100644 index 4a59f7ce81233b..00000000000000 --- a/svm/examples/json-rpc/client/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "json-rpc-client" -description = "Reference example using Solana SVM API for RPC API" -version = { workspace = true } -edition = { workspace = true } -publish = false - -[features] -dummy-for-ci-check = [] -frozen-abi = [] - -[dependencies] -borsh = { workspace = true } -clap = { workspace = true } -home = { workspace = true } -solana-client = { workspace = true } -solana-commitment-config = { workspace = true } -solana-instruction = { workspace = true } -solana-keypair = { workspace = true } -solana-message = { workspace = true } -solana-pubkey = { workspace = true } -solana-signer = { workspace = true } -solana-transaction = { workspace = true } -thiserror = { workspace = true } -yaml-rust = { workspace = true } diff --git a/svm/examples/json-rpc/client/src/client.rs b/svm/examples/json-rpc/client/src/client.rs deleted file mode 100644 index 9252fa8f481d05..00000000000000 --- a/svm/examples/json-rpc/client/src/client.rs +++ /dev/null @@ -1,77 +0,0 @@ -use { - crate::utils, - solana_client::rpc_client::RpcClient, - solana_commitment_config::CommitmentConfig, - solana_instruction::{AccountMeta, Instruction}, - solana_keypair::{read_keypair_file, Keypair}, - solana_message::Message, - solana_signer::Signer, - solana_transaction::Transaction, -}; - -/// Establishes a RPC connection with the Simulation server. -/// Information about the server is gleened from the config file `config.yml`. -pub fn establish_connection(url: &Option<&str>, config: &Option<&str>) -> utils::Result { - let rpc_url = match url { - Some(x) => { - if *x == "localhost" { - "http://localhost:8899".to_string() - } else { - String::from(*x) - } - } - None => utils::get_rpc_url(config)?, - }; - Ok(RpcClient::new_with_commitment( - rpc_url, - CommitmentConfig::confirmed(), - )) -} - -/// Loads keypair information from the file located at KEYPAIR_PATH -/// and then verifies that the loaded keypair information corresponds -/// to an executable account via CONNECTION. Failure to read the -/// keypair or the loaded keypair corresponding to an executable -/// account will result in an error being returned. -pub fn get_program(keypair_path: &str, connection: &RpcClient) -> utils::Result { - let program_keypair = read_keypair_file(keypair_path).map_err(|e| { - utils::Error::InvalidConfig(format!( - "failed to read program keypair file ({}): ({})", - keypair_path, e - )) - })?; - - let program_info = connection.get_account(&program_keypair.pubkey())?; - if !program_info.executable { - return Err(utils::Error::InvalidConfig(format!( - "program with keypair ({}) is not executable", - keypair_path - ))); - } - - Ok(program_keypair) -} - -pub fn say_hello(player: &Keypair, program: &Keypair, connection: &RpcClient) -> utils::Result<()> { - let greeting_pubkey = utils::get_greeting_public_key(&player.pubkey(), &program.pubkey())?; - println!("greeting pubkey {greeting_pubkey:?}"); - - // Submit an instruction to the chain which tells the program to - // run. We pass the account that we want the results to be stored - // in as one of the account arguments which the program will - // handle. - - let data = [1u8]; - let instruction = Instruction::new_with_bytes( - program.pubkey(), - &data, - vec![AccountMeta::new(greeting_pubkey, false)], - ); - let message = Message::new(&[instruction], Some(&player.pubkey())); - let transaction = Transaction::new(&[player], message, connection.get_latest_blockhash()?); - - let response = connection.simulate_transaction(&transaction)?; - println!("{:?}", response); - - Ok(()) -} diff --git a/svm/examples/json-rpc/client/src/main.rs b/svm/examples/json-rpc/client/src/main.rs deleted file mode 100644 index c27a903ab249bb..00000000000000 --- a/svm/examples/json-rpc/client/src/main.rs +++ /dev/null @@ -1,48 +0,0 @@ -use clap::{crate_description, crate_name, crate_version, App, Arg}; - -mod client; -mod utils; - -fn main() { - let version = crate_version!().to_string(); - let args = std::env::args().collect::>(); - let matches = App::new(crate_name!()) - .about(crate_description!()) - .version(version.as_str()) - .arg( - Arg::with_name("config") - .long("config") - .short("C") - .takes_value(true) - .value_name("CONFIG") - .help("Config filepath"), - ) - .arg( - Arg::with_name("keypair") - .long("keypair") - .short("k") - .takes_value(true) - .value_name("KEYPAIR") - .help("Filepath or URL to a keypair"), - ) - .arg( - Arg::with_name("url") - .long("url") - .short("u") - .takes_value(true) - .value_name("URL_OR_MONIKER") - .help("URL for JSON RPC Server"), - ) - .get_matches_from(args); - let config = matches.value_of("config"); - let keypair = matches.value_of("keypair").unwrap(); - let url = matches.value_of("url"); - let connection = client::establish_connection(&url, &config).unwrap(); - println!( - "Connected to Simulation server running version ({}).", - connection.get_version().unwrap() - ); - let player = utils::get_player(&config).unwrap(); - let program = client::get_program(keypair, &connection).unwrap(); - client::say_hello(&player, &program, &connection).unwrap(); -} diff --git a/svm/examples/json-rpc/client/src/utils.rs b/svm/examples/json-rpc/client/src/utils.rs deleted file mode 100644 index 73cb43456f8eff..00000000000000 --- a/svm/examples/json-rpc/client/src/utils.rs +++ /dev/null @@ -1,105 +0,0 @@ -use { - borsh::{BorshDeserialize, BorshSerialize}, - solana_keypair::{read_keypair_file, Keypair}, - solana_pubkey::Pubkey, - thiserror::Error, - yaml_rust::YamlLoader, -}; - -#[allow(clippy::large_enum_variant)] -#[derive(Error, Debug)] -pub enum Error { - #[error("failed to read solana config file: ({0})")] - ConfigRead(std::io::Error), - #[error("failed to parse solana config file: ({0})")] - ConfigParse(#[from] yaml_rust::ScanError), - #[error("invalid config: ({0})")] - InvalidConfig(String), - - #[error("solana client error: ({0})")] - Client(#[from] solana_client::client_error::ClientError), - - #[error("error in public key derivation: ({0})")] - KeyDerivation(#[from] solana_pubkey::PubkeyError), -} - -pub type Result = std::result::Result; - -/// The schema for greeting storage in greeting accounts. This is what -/// is serialized into the account and updated when hellos are sent. -#[derive(BorshSerialize, BorshDeserialize)] -struct GreetingSchema { - counter: u32, -} - -/// Parses and returns the Solana yaml config on the system. -pub fn get_config(config: &Option<&str>) -> Result { - let path = match config { - Some(path) => std::path::PathBuf::from(path), - None => match home::home_dir() { - Some(mut path) => { - path.push(".config/solana/cli/config.yml"); - path - } - None => { - return Err(Error::ConfigRead(std::io::Error::new( - std::io::ErrorKind::NotFound, - "failed to locate homedir and thus can not locate solana config", - ))); - } - }, - }; - let config = std::fs::read_to_string(path).map_err(Error::ConfigRead)?; - let mut config = YamlLoader::load_from_str(&config)?; - match config.len() { - 1 => Ok(config.remove(0)), - l => Err(Error::InvalidConfig(format!( - "expected one yaml document got ({})", - l - ))), - } -} - -/// Gets the RPC url for the cluster that this machine is configured -/// to communicate with. -pub fn get_rpc_url(config: &Option<&str>) -> Result { - let config = get_config(config)?; - match config["json_rpc_url"].as_str() { - Some(s) => Ok(s.to_string()), - None => Err(Error::InvalidConfig( - "missing `json_rpc_url` field".to_string(), - )), - } -} - -/// Gets the "player" or local solana wallet that has been configured -/// on the machine. -pub fn get_player(config: &Option<&str>) -> Result { - let config = get_config(config)?; - if let Some(path) = config["keypair_path"].as_str() { - read_keypair_file(path).map_err(|e| { - Error::InvalidConfig(format!("failed to read keypair file ({}): ({})", path, e)) - }) - } else { - Err(Error::InvalidConfig( - "missing `keypair_path` field".to_string(), - )) - } -} - -/// Gets the seed used to generate greeting accounts. If you'd like to -/// force this program to generate a new greeting account and thus -/// restart the counter you can change this value. -pub fn get_greeting_seed() -> &'static str { - "hello" -} - -/// Derives and returns the greeting account public key for a given -/// PLAYER, PROGRAM combination. -pub fn get_greeting_public_key(player: &Pubkey, program: &Pubkey) -> Result { - Ok(Pubkey::create_with_seed( - player, - get_greeting_seed(), - program, - )?) -} diff --git a/svm/examples/json-rpc/config.yml b/svm/examples/json-rpc/config.yml deleted file mode 100644 index a7e4a4b1226dba..00000000000000 --- a/svm/examples/json-rpc/config.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -json_rpc_url: http://127.0.0.1:8899 -websocket_url: '' -keypair_path: svm/examples/test.json -address_labels: - '11111111111111111111111111111111': System Program -commitment: confirmed diff --git a/svm/examples/json-rpc/program/Cargo.toml b/svm/examples/json-rpc/program/Cargo.toml deleted file mode 100644 index 469e603318dcff..00000000000000 --- a/svm/examples/json-rpc/program/Cargo.toml +++ /dev/null @@ -1,22 +0,0 @@ -[package] -name = "json-rpc-example-program" -version = "3.0.0" -edition = "2021" - -[features] -# This was needed for ci -dummy-for-ci-check = [] -frozen-abi = [] - -[dependencies] -borsh = "0.9" -solana-account-info = "2.3.0" -solana-program-entrypoint = "2.3.0" -solana-msg = "2.2.1" -solana-pubkey = "2.4.0" - -[lib] -name = "program" -crate-type = ["cdylib", "lib"] - -[workspace] diff --git a/svm/examples/json-rpc/program/src/lib.rs b/svm/examples/json-rpc/program/src/lib.rs deleted file mode 100644 index b087968b704d62..00000000000000 --- a/svm/examples/json-rpc/program/src/lib.rs +++ /dev/null @@ -1,33 +0,0 @@ -use { - borsh::{BorshDeserialize, BorshSerialize}, - solana_account_info::{next_account_info, AccountInfo}, - solana_msg::msg, - solana_program_entrypoint::entrypoint, - solana_pubkey::Pubkey, -}; - -/// The type of state managed by this program. The type defined here -/// must match the `GreetingAccount` type defined by the client. -#[derive(BorshSerialize, BorshDeserialize, Debug)] -pub struct GreetingAccount { - /// The number of greetings that have been sent to this account. - pub counter: u32, -} - -entrypoint!(process_instruction); - -pub fn process_instruction( - program_id: &Pubkey, - accounts: &[AccountInfo], - _instruction_data: &[u8], -) -> solana_program_entrypoint::ProgramResult { - // Get the account that stores greeting count information. - let accounts_iter = &mut accounts.iter(); - let account = next_account_info(accounts_iter)?; - - msg!("account.owner"); - account.owner.log(); - msg!("program_id"); - program_id.log(); - Ok(()) -} diff --git a/svm/examples/json-rpc/server/Cargo.toml b/svm/examples/json-rpc/server/Cargo.toml deleted file mode 100644 index 0e3c44d53c2631..00000000000000 --- a/svm/examples/json-rpc/server/Cargo.toml +++ /dev/null @@ -1,60 +0,0 @@ -[package] -name = "json-rpc-server" -description = "Reference example using Solana SVM API for RPC API" -version = { workspace = true } -edition = { workspace = true } -publish = false - -[features] -dummy-for-ci-check = [] -frozen-abi = [] - -[dependencies] -agave-feature-set = { workspace = true } -agave-reserved-account-keys = { workspace = true } -agave-syscalls = { workspace = true } -base64 = { workspace = true } -bincode = { workspace = true } -bs58 = { workspace = true } -clap = { workspace = true } -crossbeam-channel = { workspace = true } -env_logger = { workspace = true } -jsonrpc-core = { workspace = true } -jsonrpc-core-client = { workspace = true } -jsonrpc-derive = { workspace = true } -jsonrpc-http-server = { workspace = true } -log = { workspace = true } -serde = { workspace = true } -serde_json = { workspace = true } -solana-account = { workspace = true } -solana-account-decoder = { workspace = true } -solana-bpf-loader-program = { workspace = true } -solana-clock = { workspace = true } -solana-commitment-config = { workspace = true } -solana-compute-budget = { workspace = true } -solana-compute-budget-interface = { workspace = true } -solana-hash = { workspace = true } -solana-message = { workspace = true } -solana-nonce = { workspace = true } -solana-perf = { workspace = true } -solana-program-runtime = { workspace = true } -solana-pubkey = { workspace = true } -solana-rent = { workspace = true } -solana-rpc-client-api = { workspace = true } -solana-sdk-ids = { workspace = true } -solana-signature = { workspace = true } -solana-svm = { workspace = true } -solana-svm-callback = { workspace = true } -solana-system-interface = { workspace = true } -solana-system-program = { workspace = true } -solana-sysvar = { workspace = true } -solana-sysvar-id = { workspace = true } -solana-transaction = { workspace = true } -solana-transaction-context = { workspace = true } -solana-transaction-error = { workspace = true } -solana-transaction-status = { workspace = true } -solana-validator-exit = { workspace = true } -solana-version = { workspace = true } -spl-token-2022-interface = { workspace = true } -tokio = { workspace = true, features = ["full"] } -tokio-util = { workspace = true, features = ["codec", "compat"] } diff --git a/svm/examples/json-rpc/server/src/main.rs b/svm/examples/json-rpc/server/src/main.rs deleted file mode 100644 index 93f18ebdde54b9..00000000000000 --- a/svm/examples/json-rpc/server/src/main.rs +++ /dev/null @@ -1,76 +0,0 @@ -#![allow(clippy::arithmetic_side_effects)] - -use { - clap::{value_t_or_exit, App, Arg}, - std::{ - net::{IpAddr, Ipv4Addr, SocketAddr}, - path::PathBuf, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }, - thread, - time::Duration, - }, -}; - -pub mod rpc_process; -pub mod rpc_service; -pub mod svm_bridge; - -fn main() { - env_logger::init(); - let matches = App::new("solana-json-rpc") - .version("0.1.0") - .author("Agave Team ") - .about("JSON-RPC Simulation server") - .arg( - Arg::with_name("accounts_path") - .short("a") - .long("accounts") - .value_name("FILE") - .takes_value(true) - .required(true) - .default_value("accounts.json") - .help("Use FILE as location of accounts.json"), - ) - .arg( - Arg::with_name("ledger_path") - .short("l") - .long("ledger") - .value_name("DIR") - .takes_value(true) - .required(true) - .default_value("test-ledger") - .help("Use DIR as ledger location"), - ) - .get_matches(); - - let accounts_path = PathBuf::from(value_t_or_exit!(matches, "accounts_path", String)); - let ledger_path = PathBuf::from(value_t_or_exit!(matches, "ledger_path", String)); - let rpc_addr = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); - let rpc_port = 8899u16; - let rpc_addr = SocketAddr::new(rpc_addr, rpc_port); - - let config = rpc_process::JsonRpcConfig { - accounts_path, - ledger_path, - rpc_threads: 1, - rpc_niceness_adj: 0, - max_request_body_size: Some(8192), - }; - - let exit = Arc::new(AtomicBool::new(false)); - let validator_exit = rpc_process::create_exit(exit.clone()); - - let _rpc_service = - rpc_service::JsonRpcService::new(rpc_addr, config, validator_exit, exit.clone()); - - let refresh_interval = Duration::from_millis(250); - for _i in 0.. { - if exit.load(Ordering::Relaxed) { - break; - } - thread::sleep(refresh_interval); - } -} diff --git a/svm/examples/json-rpc/server/src/rpc_process.rs b/svm/examples/json-rpc/server/src/rpc_process.rs deleted file mode 100644 index 5ac6395f542eff..00000000000000 --- a/svm/examples/json-rpc/server/src/rpc_process.rs +++ /dev/null @@ -1,934 +0,0 @@ -use { - crate::svm_bridge::{ - create_executable_environment, LoadAndExecuteTransactionsOutput, MockBankCallback, - MockForkGraph, TransactionBatch, - }, - agave_reserved_account_keys::ReservedAccountKeys, - base64::{prelude::BASE64_STANDARD, Engine}, - bincode::config::Options, - jsonrpc_core::{types::error, Error, Metadata, Result}, - jsonrpc_derive::rpc, - log::*, - serde_json, - solana_account::{from_account, Account, AccountSharedData, ReadableAccount}, - solana_account_decoder::{ - encode_ui_account, - parse_account_data::{AccountAdditionalDataV3, SplTokenAdditionalDataV2}, - parse_token::{get_token_account_mint, is_known_spl_token_id}, - UiAccount, UiAccountEncoding, UiDataSliceConfig, MAX_BASE58_BYTES, - }, - solana_clock::{Slot, MAX_PROCESSING_AGE, MAX_TRANSACTION_FORWARDING_DELAY}, - solana_commitment_config::CommitmentConfig, - solana_hash::Hash, - solana_message::{ - inner_instruction::InnerInstructions, - v0::{LoadedAddresses, MessageAddressTableLookup}, - AddressLoader, AddressLoaderError, - }, - solana_nonce::state::DurableNonce, - solana_perf::packet::PACKET_DATA_SIZE, - solana_program_runtime::{ - execution_budget::SVMTransactionExecutionAndFeeBudgetLimits, - loaded_programs::ProgramCacheEntry, - }, - solana_pubkey::Pubkey, - solana_rent::Rent, - solana_rpc_client_api::{ - config::*, - response::{Response as RpcResponse, *}, - }, - solana_signature::Signature, - solana_svm::{ - account_loader::{CheckedTransactionDetails, TransactionCheckResult}, - account_overrides::AccountOverrides, - transaction_error_metrics::TransactionErrorMetrics, - transaction_processing_result::{ - ProcessedTransaction, TransactionProcessingResultExtensions, - }, - transaction_processor::{ - ExecutionRecordingConfig, TransactionBatchProcessor, TransactionLogMessages, - TransactionProcessingConfig, TransactionProcessingEnvironment, - }, - }, - solana_system_program::system_processor, - solana_sysvar as sysvar, - solana_transaction::{ - sanitized::{MessageHash, SanitizedTransaction}, - versioned::VersionedTransaction, - }, - solana_transaction_context::{TransactionAccount, TransactionReturnData}, - solana_transaction_error::TransactionError, - solana_transaction_status::{ - map_inner_instructions, parse_ui_inner_instructions, TransactionBinaryEncoding, - UiTransactionEncoding, - }, - solana_validator_exit::Exit, - spl_token_2022_interface::{ - extension::{ - interest_bearing_mint::InterestBearingConfig, scaled_ui_amount::ScaledUiAmountConfig, - BaseStateWithExtensions, StateWithExtensions, - }, - state::Mint, - }, - std::{ - any::type_name, - cmp::min, - collections::{HashMap, HashSet}, - fs, - path::PathBuf, - str::FromStr, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, RwLock, - }, - }, -}; - -mod transaction { - pub use { - solana_transaction::sanitized::MAX_TX_ACCOUNT_LOCKS, - solana_transaction_error::TransactionResult as Result, - }; -} - -pub const MAX_REQUEST_BODY_SIZE: usize = 50 * (1 << 10); // 50kB - -const EXECUTION_SLOT: u64 = 5; // The execution slot must be greater than the deployment slot -const EXECUTION_EPOCH: u64 = 2; // The execution epoch must be greater than the deployment epoch -const MAX_BASE58_SIZE: usize = 1683; // Golden, bump if PACKET_DATA_SIZE changes -const MAX_BASE64_SIZE: usize = 1644; // Golden, bump if PACKET_DATA_SIZE changes - -fn new_response(slot: Slot, value: T) -> RpcResponse { - RpcResponse { - context: RpcResponseContext::new(slot), - value, - } -} - -#[derive(Debug, Default, Clone)] -pub struct JsonRpcConfig { - pub accounts_path: PathBuf, - pub ledger_path: PathBuf, - pub rpc_threads: usize, - pub rpc_niceness_adj: i8, - pub max_request_body_size: Option, -} - -#[derive(Clone)] -pub struct JsonRpcRequestProcessor { - account_map: Vec<(Pubkey, AccountSharedData)>, - #[allow(dead_code)] - exit: Arc>, - transaction_processor: Arc>>, -} - -struct TransactionSimulationResult { - pub result: transaction::Result<()>, - pub logs: TransactionLogMessages, - pub post_simulation_accounts: Vec, - pub units_consumed: u64, - pub loaded_accounts_data_size: u32, - pub return_data: Option, - pub inner_instructions: Option>, -} - -#[derive(Debug, Default, PartialEq)] -pub struct ProcessedTransactionCounts { - pub processed_transactions_count: u64, - pub processed_non_vote_transactions_count: u64, - pub processed_with_successful_result_count: u64, - pub signature_count: u64, -} - -#[derive(Debug, PartialEq, Eq)] -pub enum TransactionLogCollectorFilter { - All, - AllWithVotes, - None, - OnlyMentionedAddresses, -} - -impl Default for TransactionLogCollectorFilter { - fn default() -> Self { - Self::None - } -} - -#[derive(Debug, Default)] -pub struct TransactionLogCollectorConfig { - pub mentioned_addresses: HashSet, - pub filter: TransactionLogCollectorFilter, -} - -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct TransactionLogInfo { - pub signature: Signature, - pub result: transaction::Result<()>, - pub is_vote: bool, - pub log_messages: TransactionLogMessages, -} - -#[derive(Default, Debug)] -pub struct TransactionLogCollector { - // All the logs collected for from this Bank. Exact contents depend on the - // active `TransactionLogCollectorFilter` - pub logs: Vec, - - // For each `mentioned_addresses`, maintain a list of indices into `logs` to easily - // locate the logs from transactions that included the mentioned addresses. - pub mentioned_address_map: HashMap>, -} - -impl AddressLoader for JsonRpcRequestProcessor { - fn load_addresses( - self, - _lookups: &[MessageAddressTableLookup], - ) -> core::result::Result { - Ok(LoadedAddresses { - writable: vec![], - readonly: vec![], - }) - } -} - -impl Metadata for JsonRpcRequestProcessor {} - -impl JsonRpcRequestProcessor { - pub fn new(config: JsonRpcConfig, exit: Arc>) -> Self { - let accounts_json_path = config.accounts_path.clone(); - let accounts_data: String = fs::read_to_string(accounts_json_path).unwrap(); - let accounts_data: serde_json::Value = serde_json::from_str(&accounts_data).unwrap(); - let accounts_slice: Vec<(Pubkey, AccountSharedData)> = accounts_data["accounts"] - .as_array() - .unwrap() - .iter() - .map(|acc| { - let pubkey = Pubkey::from_str(acc["pubkey"].as_str().unwrap()).unwrap(); - let account = acc["account"].as_object().unwrap(); - let owner = account["owner"].as_str().unwrap(); - let data = account["data"].as_array().unwrap()[0].as_str().unwrap(); - let acc_data = AccountSharedData::from(Account { - lamports: account["lamports"].as_u64().unwrap(), - data: BASE64_STANDARD.decode(data).unwrap(), - owner: Pubkey::from_str(owner).unwrap(), - executable: account["executable"].as_bool().unwrap(), - rent_epoch: account["rentEpoch"].as_u64().unwrap(), - }); - (pubkey, acc_data) - }) - .collect(); - let batch_processor = TransactionBatchProcessor::::new_uninitialized( - EXECUTION_SLOT, - EXECUTION_EPOCH, - ); - - Self { - account_map: accounts_slice, - exit, - transaction_processor: Arc::new(RwLock::new(batch_processor)), - } - } - - fn get_account_info( - &self, - pubkey: &Pubkey, - config: Option, - ) -> Result>> { - let RpcAccountInfoConfig { - encoding, - data_slice, - commitment: _, - min_context_slot: _, - } = config.unwrap_or_default(); - let encoding = encoding.unwrap_or(UiAccountEncoding::Binary); - Ok(new_response( - 0, - match self.get_account(pubkey) { - Some(account) => { - debug!("Found account {pubkey:?}"); - Some(encode_account(&account, pubkey, encoding, data_slice)?) - } - None => { - debug!("Did not find account {pubkey:?}"); - None - } - }, - )) - } - - fn get_latest_blockhash(&self, _config: RpcContextConfig) -> Result> { - let blockhash = Hash::default(); - let last_valid_block_height = 0u64; - Ok(new_response( - 0, - RpcBlockhash { - blockhash: blockhash.to_string(), - last_valid_block_height, - }, - )) - } - - fn get_minimum_balance_for_rent_exemption( - &self, - _data_len: usize, - _commitment: Option, - ) -> u64 { - 0u64 - } - - fn simulate_transaction_unchecked( - &self, - transaction: &SanitizedTransaction, - enable_cpi_recording: bool, - ) -> TransactionSimulationResult { - let mut mock_bank = MockBankCallback::new(self.account_map.clone()); - let transaction_processor = self.transaction_processor.read().unwrap(); - - let account_keys = transaction.message().account_keys(); - let number_of_accounts = account_keys.len(); - let account_overrides = AccountOverrides::default(); - - let fork_graph = Arc::new(RwLock::new(MockForkGraph {})); - - create_executable_environment( - fork_graph.clone(), - &account_keys, - &mut mock_bank, - &transaction_processor, - ); - - // Add the system program builtin. - transaction_processor.add_builtin( - &mock_bank, - solana_system_program::id(), - "system_program", - ProgramCacheEntry::new_builtin( - 0, - b"system_program".len(), - system_processor::Entrypoint::vm, - ), - ); - // Add the BPF Loader v2 builtin, for the SPL Token program. - transaction_processor.add_builtin( - &mock_bank, - solana_sdk_ids::bpf_loader_upgradeable::id(), - "solana_bpf_loader_upgradeable_program", - ProgramCacheEntry::new_builtin( - 0, - b"solana_bpf_loader_upgradeable_program".len(), - solana_bpf_loader_program::Entrypoint::vm, - ), - ); - - let batch = self.prepare_unlocked_batch_from_single_tx(transaction); - let LoadAndExecuteTransactionsOutput { - mut processing_results, - .. - } = self.load_and_execute_transactions( - &mock_bank, - &batch, - // After simulation, transactions will need to be forwarded to the leader - // for processing. During forwarding, the transaction could expire if the - // delay is not accounted for. - MAX_PROCESSING_AGE - MAX_TRANSACTION_FORWARDING_DELAY, - TransactionProcessingConfig { - account_overrides: Some(&account_overrides), - check_program_modification_slot: false, - log_messages_bytes_limit: None, - limit_to_load_programs: true, - recording_config: ExecutionRecordingConfig { - enable_cpi_recording, - enable_log_recording: true, - enable_return_data_recording: true, - enable_transaction_balance_recording: true, - }, - }, - ); - - let processing_result = processing_results - .pop() - .unwrap_or(Err(TransactionError::InvalidProgramForExecution)); - let flattened_result = processing_result.flattened_result(); - let (post_simulation_accounts, logs, return_data, inner_instructions) = - match processing_result { - Ok(processed_tx) => match processed_tx { - ProcessedTransaction::Executed(executed_tx) => { - let details = executed_tx.execution_details; - let post_simulation_accounts = executed_tx - .loaded_transaction - .accounts - .into_iter() - .take(number_of_accounts) - .collect::>(); - ( - post_simulation_accounts, - details.log_messages, - details.return_data, - details.inner_instructions, - ) - } - ProcessedTransaction::FeesOnly(_) => (vec![], None, None, None), - }, - Err(_) => (vec![], None, None, None), - }; - let logs = logs.unwrap_or_default(); - let units_consumed: u64 = 0; - let loaded_accounts_data_size: u32 = 0; - - TransactionSimulationResult { - result: flattened_result, - logs, - post_simulation_accounts, - units_consumed, - loaded_accounts_data_size, - return_data, - inner_instructions, - } - } - - fn prepare_unlocked_batch_from_single_tx<'a>( - &'a self, - transaction: &'a SanitizedTransaction, - ) -> TransactionBatch<'a> { - let tx_account_lock_limit = transaction::MAX_TX_ACCOUNT_LOCKS; - let lock_result = transaction - .get_account_locks(tx_account_lock_limit) - .map(|_| ()); - let batch = TransactionBatch::new( - vec![lock_result], - std::borrow::Cow::Borrowed(std::slice::from_ref(transaction)), - ); - batch - } - - fn check_age( - &self, - sanitized_txs: &[impl core::borrow::Borrow], - lock_results: &[transaction::Result<()>], - max_age: usize, - error_counters: &mut TransactionErrorMetrics, - ) -> Vec { - let last_blockhash = Hash::default(); - let next_durable_nonce = DurableNonce::from_blockhash(&last_blockhash); - - sanitized_txs - .iter() - .zip(lock_results) - .map(|(tx, lock_res)| match lock_res { - Ok(()) => self.check_transaction_age( - tx.borrow(), - max_age, - &next_durable_nonce, - error_counters, - ), - Err(e) => Err(e.clone()), - }) - .collect() - } - - fn check_transaction_age( - &self, - _tx: &SanitizedTransaction, - _max_age: usize, - _next_durable_nonce: &DurableNonce, - _error_counters: &mut TransactionErrorMetrics, - ) -> TransactionCheckResult { - /* for now just return defaults */ - Ok(CheckedTransactionDetails::new( - None, - Ok(SVMTransactionExecutionAndFeeBudgetLimits::default()), - )) - } - - fn clock(&self) -> sysvar::clock::Clock { - from_account(&self.get_account(&sysvar::clock::id()).unwrap_or_default()) - .unwrap_or_default() - } - - fn get_account(&self, pubkey: &Pubkey) -> Option { - let account_map: HashMap = - HashMap::from_iter(self.account_map.clone()); - account_map.get(pubkey).cloned() - } - - fn get_additional_mint_data(&self, data: &[u8]) -> Result { - StateWithExtensions::::unpack(data) - .map_err(|_| { - Error::invalid_params("Invalid param: Token mint could not be unpacked".to_string()) - }) - .map(|mint| { - let interest_bearing_config = mint - .get_extension::() - .map(|x| (*x, self.clock().unix_timestamp)) - .ok(); - let scaled_ui_amount_config = mint - .get_extension::() - .map(|x| (*x, self.clock().unix_timestamp)) - .ok(); - SplTokenAdditionalDataV2 { - decimals: mint.base.decimals, - interest_bearing_config, - scaled_ui_amount_config, - } - }) - } - - fn get_encoded_account( - &self, - pubkey: &Pubkey, - encoding: UiAccountEncoding, - data_slice: Option, - // only used for simulation results - overwrite_accounts: Option<&HashMap>, - ) -> Result> { - match overwrite_accounts - .and_then(|accounts| accounts.get(pubkey).cloned()) - .or_else(|| self.get_account(pubkey)) - { - Some(account) => { - let response = if is_known_spl_token_id(account.owner()) - && encoding == UiAccountEncoding::JsonParsed - { - self.get_parsed_token_account(pubkey, account, overwrite_accounts) - } else { - encode_account(&account, pubkey, encoding, data_slice)? - }; - Ok(Some(response)) - } - None => Ok(None), - } - } - - fn get_parsed_token_account( - &self, - pubkey: &Pubkey, - account: AccountSharedData, - // only used for simulation results - overwrite_accounts: Option<&HashMap>, - ) -> UiAccount { - let additional_data = get_token_account_mint(account.data()) - .and_then(|mint_pubkey| { - overwrite_accounts - .and_then(|accounts| accounts.get(&mint_pubkey).cloned()) - .or_else(|| self.get_account(&mint_pubkey)) - }) - .and_then(|mint_account| self.get_additional_mint_data(mint_account.data()).ok()) - .map(|data| AccountAdditionalDataV3 { - spl_token_additional_data: Some(data), - }); - - encode_ui_account( - pubkey, - &account, - UiAccountEncoding::JsonParsed, - additional_data, - None, - ) - } - - fn last_blockhash_and_lamports_per_signature(&self) -> (Hash, u64) { - let last_hash = Hash::default(); - let last_lamports_per_signature = u64::default(); - (last_hash, last_lamports_per_signature) - } - - fn load_and_execute_transactions( - &self, - bank: &MockBankCallback, - batch: &TransactionBatch, - max_age: usize, - processing_config: TransactionProcessingConfig, - ) -> LoadAndExecuteTransactionsOutput { - let sanitized_txs = batch.sanitized_transactions(); - debug!("processing transactions: {}", sanitized_txs.len()); - let mut error_counters = TransactionErrorMetrics::default(); - - let check_results = self.check_age( - sanitized_txs, - batch.lock_results(), - max_age, - &mut error_counters, - ); - - let (blockhash, lamports_per_signature) = self.last_blockhash_and_lamports_per_signature(); - let processing_environment = TransactionProcessingEnvironment { - blockhash, - blockhash_lamports_per_signature: lamports_per_signature, - epoch_total_stake: 0, - feature_set: bank.feature_set.runtime_features(), - rent: Rent::default(), - }; - - let sanitized_output = self - .transaction_processor - .read() - .unwrap() - .load_and_execute_sanitized_transactions( - bank, - sanitized_txs, - check_results, - &processing_environment, - &processing_config, - ); - - let err_count = &mut error_counters.total; - - let mut processed_counts = ProcessedTransactionCounts::default(); - for (processing_result, tx) in sanitized_output - .processing_results - .iter() - .zip(sanitized_txs) - { - if processing_result.was_processed() { - // Signature count must be accumulated only if the transaction - // is processed, otherwise a mismatched count between banking - // and replay could occur - processed_counts.signature_count += - u64::from(tx.message().header().num_required_signatures); - processed_counts.processed_transactions_count += 1; - - if !tx.is_simple_vote_transaction() { - processed_counts.processed_non_vote_transactions_count += 1; - } - } - - match processing_result.flattened_result() { - Ok(()) => { - processed_counts.processed_with_successful_result_count += 1; - } - Err(err) => { - if err_count.0 == 0 { - debug!("tx error: {:?} {:?}", err, tx); - } - *err_count += 1; - } - } - } - - LoadAndExecuteTransactionsOutput { - processing_results: sanitized_output.processing_results, - } - } -} - -/// RPC interface that an API node is expected to provide -pub mod rpc { - use super::*; - #[rpc] - pub trait Rpc { - type Metadata; - - #[rpc(meta, name = "getAccountInfo")] - fn get_account_info( - &self, - meta: Self::Metadata, - pubkey_str: String, - config: Option, - ) -> Result>>; - - #[rpc(meta, name = "getLatestBlockhash")] - fn get_latest_blockhash( - &self, - meta: Self::Metadata, - config: Option, - ) -> Result>; - - #[rpc(meta, name = "getMinimumBalanceForRentExemption")] - fn get_minimum_balance_for_rent_exemption( - &self, - meta: Self::Metadata, - data_len: usize, - commitment: Option, - ) -> Result; - - #[rpc(meta, name = "getVersion")] - fn get_version(&self, meta: Self::Metadata) -> Result; - - #[rpc(meta, name = "simulateTransaction")] - fn simulate_transaction( - &self, - meta: Self::Metadata, - data: String, - config: Option, - ) -> Result>; - } - - pub struct RpcImpl; - - impl Rpc for RpcImpl { - type Metadata = JsonRpcRequestProcessor; - - fn simulate_transaction( - &self, - meta: Self::Metadata, - data: String, - config: Option, - ) -> Result> { - debug!("simulate_transaction rpc request received"); - - let RpcSimulateTransactionConfig { - sig_verify: _, - replace_recent_blockhash: _, - commitment: _, - encoding, - accounts: config_accounts, - min_context_slot: _, - inner_instructions: enable_cpi_recording, - } = config.unwrap_or_default(); - let tx_encoding = encoding.unwrap_or(UiTransactionEncoding::Base58); - let binary_encoding = tx_encoding.into_binary_encoding().ok_or_else(|| { - Error::invalid_params(format!( - "unsupported encoding: {tx_encoding}. Supported encodings: base58, base64" - )) - })?; - let (_, unsanitized_tx) = - decode_and_deserialize::(data, binary_encoding)?; - debug!("unsanitized transaction decoded {:?}", unsanitized_tx); - - let transaction = sanitize_transaction( - unsanitized_tx, - meta.clone(), - &ReservedAccountKeys::default().active, - )?; - - let TransactionSimulationResult { - result, - logs, - post_simulation_accounts, - units_consumed, - loaded_accounts_data_size, - return_data, - inner_instructions, - } = meta.simulate_transaction_unchecked(&transaction, enable_cpi_recording); - - let account_keys = transaction.message().account_keys(); - let number_of_accounts = account_keys.len(); - - let accounts = if let Some(config_accounts) = config_accounts { - let accounts_encoding = config_accounts - .encoding - .unwrap_or(UiAccountEncoding::Base64); - - if accounts_encoding == UiAccountEncoding::Binary - || accounts_encoding == UiAccountEncoding::Base58 - { - return Err(Error::invalid_params("base58 encoding not supported")); - } - - if config_accounts.addresses.len() > number_of_accounts { - return Err(Error::invalid_params(format!( - "Too many accounts provided; max {number_of_accounts}" - ))); - } - - if result.is_err() { - Some(vec![None; config_accounts.addresses.len()]) - } else { - let mut post_simulation_accounts_map = HashMap::new(); - for (pubkey, data) in post_simulation_accounts { - post_simulation_accounts_map.insert(pubkey, data); - } - - Some( - config_accounts - .addresses - .iter() - .map(|address_str| { - let pubkey = verify_pubkey(address_str)?; - meta.get_encoded_account( - &pubkey, - accounts_encoding, - None, - Some(&post_simulation_accounts_map), - ) - }) - .collect::>>()?, - ) - } - } else { - None - }; - - let inner_instructions = inner_instructions.map(|info| { - map_inner_instructions(info) - .map(|converted| parse_ui_inner_instructions(converted, &account_keys)) - .collect() - }); - - Ok(new_response( - 0, - RpcSimulateTransactionResult { - err: result.err().map(Into::into), - logs: Some(logs), - accounts, - units_consumed: Some(units_consumed), - loaded_accounts_data_size: Some(loaded_accounts_data_size), - return_data: return_data.map(|return_data| return_data.into()), - inner_instructions, - replacement_blockhash: None, - }, - )) - } - - fn get_account_info( - &self, - meta: Self::Metadata, - pubkey_str: String, - config: Option, - ) -> Result>> { - debug!("get_account_info rpc request received: {:?}", pubkey_str); - let pubkey = verify_pubkey(&pubkey_str)?; - debug!("pubkey {pubkey:?} verified."); - meta.get_account_info(&pubkey, config) - } - - fn get_latest_blockhash( - &self, - meta: Self::Metadata, - config: Option, - ) -> Result> { - debug!("get_latest_blockhash rpc request received"); - meta.get_latest_blockhash(config.unwrap_or_default()) - } - - fn get_minimum_balance_for_rent_exemption( - &self, - meta: Self::Metadata, - data_len: usize, - commitment: Option, - ) -> Result { - debug!( - "get_minimum_balance_for_rent_exemption rpc request received: {:?}", - data_len - ); - if data_len as u64 > solana_system_interface::MAX_PERMITTED_DATA_LENGTH { - return Err(Error::invalid_request()); - } - Ok(meta.get_minimum_balance_for_rent_exemption(data_len, commitment)) - } - - fn get_version(&self, _: Self::Metadata) -> Result { - debug!("get_version rpc request received"); - let version = solana_version::Version::default(); - Ok(RpcVersionInfo { - solana_core: version.to_string(), - feature_set: Some(version.feature_set), - }) - } - } -} - -pub fn create_exit(exit: Arc) -> Arc> { - let mut exit_handler = Exit::default(); - exit_handler.register_exit(Box::new(move || exit.store(true, Ordering::Relaxed))); - Arc::new(RwLock::new(exit_handler)) -} - -fn decode_and_deserialize( - encoded: String, - encoding: TransactionBinaryEncoding, -) -> Result<(Vec, T)> -where - T: serde::de::DeserializeOwned, -{ - let wire_output = match encoding { - TransactionBinaryEncoding::Base58 => { - if encoded.len() > MAX_BASE58_SIZE { - return Err(Error::invalid_params(format!( - "base58 encoded {} too large: {} bytes (max: encoded/raw {}/{})", - type_name::(), - encoded.len(), - MAX_BASE58_SIZE, - PACKET_DATA_SIZE, - ))); - } - bs58::decode(encoded) - .into_vec() - .map_err(|e| Error::invalid_params(format!("invalid base58 encoding: {e:?}")))? - } - TransactionBinaryEncoding::Base64 => { - if encoded.len() > MAX_BASE64_SIZE { - return Err(Error::invalid_params(format!( - "base64 encoded {} too large: {} bytes (max: encoded/raw {}/{})", - type_name::(), - encoded.len(), - MAX_BASE64_SIZE, - PACKET_DATA_SIZE, - ))); - } - BASE64_STANDARD - .decode(encoded) - .map_err(|e| Error::invalid_params(format!("invalid base64 encoding: {e:?}")))? - } - }; - if wire_output.len() > PACKET_DATA_SIZE { - return Err(Error::invalid_params(format!( - "decoded {} too large: {} bytes (max: {} bytes)", - type_name::(), - wire_output.len(), - PACKET_DATA_SIZE - ))); - } - bincode::options() - .with_limit(PACKET_DATA_SIZE as u64) - .with_fixint_encoding() - .allow_trailing_bytes() - .deserialize_from(&wire_output[..]) - .map_err(|err| { - Error::invalid_params(format!( - "failed to deserialize {}: {}", - type_name::(), - &err.to_string() - )) - }) - .map(|output| (wire_output, output)) -} - -fn encode_account( - account: &T, - pubkey: &Pubkey, - encoding: UiAccountEncoding, - data_slice: Option, -) -> Result { - if (encoding == UiAccountEncoding::Binary || encoding == UiAccountEncoding::Base58) - && data_slice - .map(|s| min(s.length, account.data().len().saturating_sub(s.offset))) - .unwrap_or(account.data().len()) - > MAX_BASE58_BYTES - { - let message = format!( - "Encoded binary (base 58) data should be less than {MAX_BASE58_BYTES} bytes, please \ - use Base64 encoding." - ); - Err(error::Error { - code: error::ErrorCode::InvalidRequest, - message, - data: None, - }) - } else { - Ok(encode_ui_account( - pubkey, account, encoding, None, data_slice, - )) - } -} - -fn sanitize_transaction( - transaction: VersionedTransaction, - address_loader: impl AddressLoader, - reserved_account_keys: &HashSet, -) -> Result { - SanitizedTransaction::try_create( - transaction, - MessageHash::Compute, - None, - address_loader, - reserved_account_keys, - ) - .map_err(|err| Error::invalid_params(format!("invalid transaction: {err}"))) -} - -fn verify_pubkey(input: &str) -> Result { - input - .parse() - .map_err(|e| Error::invalid_params(format!("Invalid param: {e:?}"))) -} diff --git a/svm/examples/json-rpc/server/src/rpc_service.rs b/svm/examples/json-rpc/server/src/rpc_service.rs deleted file mode 100644 index 45165f5d3e8f15..00000000000000 --- a/svm/examples/json-rpc/server/src/rpc_service.rs +++ /dev/null @@ -1,111 +0,0 @@ -use { - crate::rpc_process::{rpc::*, *}, - crossbeam_channel::unbounded, - jsonrpc_core::MetaIoHandler, - jsonrpc_http_server::{ - hyper, AccessControlAllowOrigin, CloseHandle, DomainsValidation, ServerBuilder, - }, - log::*, - solana_perf::thread::renice_this_thread, - solana_validator_exit::Exit, - std::{ - net::SocketAddr, - sync::{atomic::AtomicBool, Arc, RwLock}, - thread::{self, Builder, JoinHandle}, - }, -}; - -pub struct JsonRpcService { - thread_hdl: JoinHandle<()>, - close_handle: Option, -} - -impl JsonRpcService { - #[allow(clippy::too_many_arguments)] - pub fn new( - rpc_addr: SocketAddr, - config: JsonRpcConfig, - validator_exit: Arc>, - _exit: Arc, - ) -> Result { - info!("rpc bound to {:?}", rpc_addr); - info!("rpc configuration: {:?}", config); - let rpc_threads = 1.max(config.rpc_threads); - let rpc_niceness_adj = config.rpc_niceness_adj; - - let runtime = Arc::new( - tokio::runtime::Builder::new_multi_thread() - .worker_threads(rpc_threads) - .on_thread_start(move || renice_this_thread(rpc_niceness_adj).unwrap()) - .thread_name("solRpcEl") - .enable_all() - .build() - .expect("Runtime"), - ); - - let max_request_body_size = config - .max_request_body_size - .unwrap_or(MAX_REQUEST_BODY_SIZE); - let request_processor = JsonRpcRequestProcessor::new(config, validator_exit.clone()); - let (close_handle_sender, close_handle_receiver) = unbounded(); - let thread_hdl = Builder::new() - .name("solJsonRpcSvc".to_string()) - .spawn(move || { - renice_this_thread(rpc_niceness_adj).unwrap(); - let mut io = MetaIoHandler::default(); - io.extend_with(rpc::RpcImpl.to_delegate()); - let server = ServerBuilder::with_meta_extractor( - io, - move |_req: &hyper::Request| request_processor.clone(), - ) - .event_loop_executor(runtime.handle().clone()) - .threads(1) - .cors(DomainsValidation::AllowOnly(vec![ - AccessControlAllowOrigin::Any, - ])) - .cors_max_age(86400) - .max_request_body_size(max_request_body_size) - .start_http(&rpc_addr); - - if let Err(e) = server { - warn!( - "JSON RPC service unavailable error: {:?}. \nAlso, check that port {} is \ - not already in use by another application", - e, - rpc_addr.port() - ); - close_handle_sender.send(Err(e.to_string())).unwrap(); - return; - } - - let server = server.unwrap(); - close_handle_sender.send(Ok(server.close_handle())).unwrap(); - server.wait(); - }) - .unwrap(); - - let close_handle = close_handle_receiver.recv().unwrap()?; - let close_handle_ = close_handle.clone(); - validator_exit - .write() - .unwrap() - .register_exit(Box::new(move || { - close_handle_.close(); - })); - Ok(Self { - thread_hdl, - close_handle: Some(close_handle), - }) - } - - pub fn exit(&mut self) { - if let Some(c) = self.close_handle.take() { - c.close() - } - } - - pub fn join(mut self) -> thread::Result<()> { - self.exit(); - self.thread_hdl.join() - } -} diff --git a/svm/examples/json-rpc/server/src/svm_bridge.rs b/svm/examples/json-rpc/server/src/svm_bridge.rs deleted file mode 100644 index 12fc4fd3a25807..00000000000000 --- a/svm/examples/json-rpc/server/src/svm_bridge.rs +++ /dev/null @@ -1,276 +0,0 @@ -use { - agave_feature_set::FeatureSet, - agave_syscalls::{ - SyscallAbort, SyscallGetClockSysvar, SyscallInvokeSignedRust, SyscallLog, - SyscallLogBpfComputeUnits, SyscallLogPubkey, SyscallLogU64, SyscallMemcpy, SyscallMemset, - SyscallSetReturnData, - }, - log::*, - solana_account::{Account, AccountSharedData, ReadableAccount}, - solana_clock::{Clock, Slot, UnixTimestamp}, - solana_compute_budget::compute_budget::ComputeBudget, - solana_message::AccountKeys, - solana_program_runtime::{ - invoke_context::InvokeContext, - loaded_programs::{ - BlockRelation, ForkGraph, LoadProgramMetrics, ProgramCacheEntry, - ProgramRuntimeEnvironments, - }, - solana_sbpf::{ - program::{BuiltinProgram, SBPFVersion}, - vm::Config, - }, - }, - solana_pubkey::Pubkey, - solana_svm::{ - transaction_processing_result::TransactionProcessingResult, - transaction_processor::TransactionBatchProcessor, - }, - solana_svm_callback::{InvokeContextCallback, TransactionProcessingCallback}, - solana_sysvar_id::SysvarId, - solana_transaction::sanitized::SanitizedTransaction, - std::{ - collections::HashMap, - sync::{Arc, RwLock}, - time::{SystemTime, UNIX_EPOCH}, - }, -}; - -mod transaction { - pub use solana_transaction_error::TransactionResult as Result; -} - -const DEPLOYMENT_SLOT: u64 = 0; -const DEPLOYMENT_EPOCH: u64 = 0; - -pub struct MockForkGraph {} - -impl ForkGraph for MockForkGraph { - fn relationship(&self, a: Slot, b: Slot) -> BlockRelation { - match a.cmp(&b) { - std::cmp::Ordering::Less => BlockRelation::Ancestor, - std::cmp::Ordering::Equal => BlockRelation::Equal, - std::cmp::Ordering::Greater => BlockRelation::Descendant, - } - } -} - -pub struct MockBankCallback { - pub feature_set: Arc, - pub account_shared_data: RwLock>, -} - -impl InvokeContextCallback for MockBankCallback {} - -impl TransactionProcessingCallback for MockBankCallback { - fn account_matches_owners(&self, account: &Pubkey, owners: &[Pubkey]) -> Option { - if let Some(data) = self.account_shared_data.read().unwrap().get(account) { - if data.lamports() == 0 { - None - } else { - owners.iter().position(|entry| data.owner() == entry) - } - } else { - None - } - } - - fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option { - debug!( - "Get account {pubkey} shared data, thread {:?}", - std::thread::current().name() - ); - self.account_shared_data - .read() - .unwrap() - .get(pubkey) - .cloned() - } - - fn add_builtin_account(&self, name: &str, program_id: &Pubkey) { - let account_data = AccountSharedData::from(Account { - lamports: 5000, - data: name.as_bytes().to_vec(), - owner: solana_sdk_ids::native_loader::id(), - executable: true, - rent_epoch: 0, - }); - - self.account_shared_data - .write() - .unwrap() - .insert(*program_id, account_data); - } -} - -impl MockBankCallback { - pub fn new(account_map: Vec<(Pubkey, AccountSharedData)>) -> Self { - Self { - feature_set: Arc::new(FeatureSet::default()), - account_shared_data: RwLock::new(HashMap::from_iter(account_map)), - } - } - - #[allow(dead_code)] - pub fn override_feature_set(&mut self, new_set: FeatureSet) { - self.feature_set = Arc::new(new_set) - } -} - -pub struct LoadAndExecuteTransactionsOutput { - // Vector of results indicating whether a transaction was executed or could not - // be executed. Note executed transactions can still have failed! - pub processing_results: Vec, -} - -pub struct TransactionBatch<'a> { - lock_results: Vec>, - sanitized_txs: std::borrow::Cow<'a, [SanitizedTransaction]>, -} - -impl<'a> TransactionBatch<'a> { - pub fn new( - lock_results: Vec>, - sanitized_txs: std::borrow::Cow<'a, [SanitizedTransaction]>, - ) -> Self { - assert_eq!(lock_results.len(), sanitized_txs.len()); - Self { - lock_results, - sanitized_txs, - } - } - - pub fn lock_results(&self) -> &Vec> { - &self.lock_results - } - - pub fn sanitized_transactions(&self) -> &[SanitizedTransaction] { - &self.sanitized_txs - } -} - -pub fn create_custom_environment<'a>() -> BuiltinProgram> { - let compute_budget = ComputeBudget::new_with_defaults(/* simd_0296_active */ false); - let vm_config = Config { - max_call_depth: compute_budget.max_call_depth, - stack_frame_size: compute_budget.stack_frame_size, - enable_address_translation: true, - enable_stack_frame_gaps: true, - instruction_meter_checkpoint_distance: 10000, - enable_instruction_meter: true, - enable_instruction_tracing: true, - enable_symbol_and_section_labels: true, - reject_broken_elfs: true, - noop_instruction_rate: 256, - sanitize_user_provided_values: true, - enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V3, - optimize_rodata: false, - aligned_memory_mapping: true, - }; - - // Register system calls that the compiled contract calls during execution. - let mut loader = BuiltinProgram::new_loader(vm_config); - loader - .register_function("abort", SyscallAbort::vm) - .expect("Registration failed"); - loader - .register_function("sol_log_", SyscallLog::vm) - .expect("Registration failed"); - loader - .register_function("sol_log_64_", SyscallLogU64::vm) - .expect("Registration failed"); - loader - .register_function("sol_log_compute_units_", SyscallLogBpfComputeUnits::vm) - .expect("Registration failed"); - loader - .register_function("sol_log_pubkey", SyscallLogPubkey::vm) - .expect("Registration failed"); - loader - .register_function("sol_memcpy_", SyscallMemcpy::vm) - .expect("Registration failed"); - loader - .register_function("sol_memset_", SyscallMemset::vm) - .expect("Registration failed"); - loader - .register_function("sol_invoke_signed_rust", SyscallInvokeSignedRust::vm) - .expect("Registration failed"); - loader - .register_function("sol_set_return_data", SyscallSetReturnData::vm) - .expect("Registration failed"); - loader - .register_function("sol_get_clock_sysvar", SyscallGetClockSysvar::vm) - .expect("Registration failed"); - loader -} - -pub fn create_executable_environment( - fork_graph: Arc>, - account_keys: &AccountKeys, - mock_bank: &mut MockBankCallback, - transaction_processor: &TransactionBatchProcessor, -) { - let mut program_cache = transaction_processor.program_cache.write().unwrap(); - - program_cache.environments = ProgramRuntimeEnvironments { - program_runtime_v1: Arc::new(create_custom_environment()), - // We are not using program runtime v2 - program_runtime_v2: Arc::new(BuiltinProgram::new_loader(Config::default())), - }; - - program_cache.fork_graph = Some(Arc::downgrade(&fork_graph)); - // add programs to cache - for key in account_keys.iter() { - if let Some(account) = mock_bank.get_account_shared_data(key) { - if account.executable() - && *account.owner() == solana_sdk_ids::bpf_loader_upgradeable::id() - { - let data = account.data(); - let program_data_account_key = Pubkey::try_from(data[4..].to_vec()).unwrap(); - let program_data_account = mock_bank - .get_account_shared_data(&program_data_account_key) - .unwrap(); - let program_data = program_data_account.data(); - let elf_bytes = program_data[45..].to_vec(); - - let program_runtime_environment = - program_cache.environments.program_runtime_v1.clone(); - program_cache.assign_program( - *key, - Arc::new( - ProgramCacheEntry::new( - &solana_sdk_ids::bpf_loader_upgradeable::id(), - program_runtime_environment, - 0, - 0, - &elf_bytes, - elf_bytes.len(), - &mut LoadProgramMetrics::default(), - ) - .unwrap(), - ), - ); - } - } - } - - // We must fill in the sysvar cache entries - let time_now = SystemTime::now() - .duration_since(UNIX_EPOCH) - .expect("Time went backwards") - .as_secs() as i64; - let clock = Clock { - slot: DEPLOYMENT_SLOT, - epoch_start_timestamp: time_now.saturating_sub(10) as UnixTimestamp, - epoch: DEPLOYMENT_EPOCH, - leader_schedule_epoch: DEPLOYMENT_EPOCH, - unix_timestamp: time_now as UnixTimestamp, - }; - - let mut account_data = AccountSharedData::default(); - account_data.set_data_from_slice(bincode::serialize(&clock).unwrap().as_slice()); - mock_bank - .account_shared_data - .write() - .unwrap() - .insert(Clock::id(), account_data); -} diff --git a/svm/examples/json-rpc/test.json b/svm/examples/json-rpc/test.json deleted file mode 100644 index 2fd43ae43866aa..00000000000000 --- a/svm/examples/json-rpc/test.json +++ /dev/null @@ -1 +0,0 @@ -[39,82,169,128,159,226,211,180,118,92,132,200,38,92,230,90,221,95,252,83,174,5,205,251,125,219,15,82,119,57,3,125,134,169,60,216,172,10,24,129,71,172,121,154,5,13,100,84,126,135,69,153,3,163,184,126,153,0,99,201,89,63,43,24] \ No newline at end of file diff --git a/svm/examples/paytube/Cargo.toml b/svm/examples/paytube/Cargo.toml deleted file mode 100644 index 7b093a4c8414e8..00000000000000 --- a/svm/examples/paytube/Cargo.toml +++ /dev/null @@ -1,45 +0,0 @@ -[package] -name = "solana-svm-example-paytube" -description = "Reference example using Solana SVM API" -version = { workspace = true } -edition = { workspace = true } -publish = false - -[features] -dummy-for-ci-check = [] -frozen-abi = [] - -[dependencies] -agave-feature-set = { workspace = true } -agave-syscalls = { workspace = true } -solana-account = { workspace = true } -solana-bpf-loader-program = { workspace = true } -solana-client = { workspace = true } -solana-clock = { workspace = true } -solana-commitment-config = { workspace = true } -solana-compute-budget = { workspace = true } -solana-epoch-schedule = { workspace = true } -solana-fee-structure = "=2.3.0" -solana-hash = { workspace = true } -solana-instruction = { workspace = true } -solana-keypair = { workspace = true } -solana-logger = { workspace = true } -solana-program-pack = { workspace = true } -solana-program-runtime = { workspace = true, features = ["dev-context-only-utils"] } -solana-pubkey = { workspace = true } -solana-rent = { workspace = true } -solana-sdk-ids = { workspace = true } -solana-signer = { workspace = true } -solana-svm = { workspace = true } -solana-svm-callback = { workspace = true } -solana-svm-feature-set = { workspace = true } -solana-system-interface = { workspace = true } -solana-system-program = { workspace = true } -solana-transaction = { workspace = true, features = ["blake3"] } -solana-transaction-error = { workspace = true } -spl-associated-token-account-interface = { workspace = true } -spl-token-interface = { workspace = true } -termcolor = { workspace = true } - -[dev-dependencies] -solana-test-validator = { workspace = true } diff --git a/svm/examples/paytube/README.md b/svm/examples/paytube/README.md deleted file mode 100644 index 400b1903ee1302..00000000000000 --- a/svm/examples/paytube/README.md +++ /dev/null @@ -1,18 +0,0 @@ -# PayTube - -A reference implementation of an off-chain [state channel](https://ethereum.org/en/developers/docs/scaling/state-channels/) -built using [Anza's SVM API](https://www.anza.xyz/blog/anzas-new-svm-api). - -With the release of Agave 2.0, we've decoupled the SVM API from the rest of the -runtime, which means it can be used outside the validator. This unlocks -SVM-based solutions such as sidecars, channels, rollups, and more. This project -demonstrates everything you need to know about bootstrapping with this new API. - -PayTube is a state channel (more specifically a payment channel), designed to -allow multiple parties to transact amongst each other in SOL or SPL tokens -off-chain. When the channel is closed, the resulting changes in each user's -balances are posted to the base chain (Solana). - -Although this project is for demonstration purposes, a payment channel similar -to PayTube could be created that scales to handle massive bandwidth of -transfers, saving the overhead of posting transactions to the chain for last. diff --git a/svm/examples/paytube/src/lib.rs b/svm/examples/paytube/src/lib.rs deleted file mode 100644 index 5caffc6efcb83e..00000000000000 --- a/svm/examples/paytube/src/lib.rs +++ /dev/null @@ -1,197 +0,0 @@ -//! PayTube. A simple SPL payment channel. -//! -//! PayTube is an SVM-based payment channel that allows two parties to exchange -//! tokens off-chain. The channel is opened by invoking the PayTube "VM", -//! running on some arbitrary server(s). When transacting has concluded, the -//! channel is closed by submitting the final payment ledger to Solana. -//! -//! The final ledger tracks debits and credits to all registered token accounts -//! or system accounts (native SOL) during the lifetime of a channel. It is -//! then used to to craft a batch of transactions to submit to the settlement -//! chain (Solana). -//! -//! Users opt-in to using a PayTube channel by "registering" their token -//! accounts to the channel. This is done by delegating a token account to the -//! PayTube on-chain program on Solana. This delegation is temporary, and -//! released immediately after channel settlement. -//! -//! Note: This opt-in solution is for demonstration purposes only. -//! -//! ```text -//! -//! PayTube "VM" -//! -//! Bob Alice Bob Alice Will -//! | | | | | -//! | --o--o--o-> | | --o--o--o-> | | -//! | | | | --o--o--o-> | <--- PayTube -//! | <-o--o--o-- | | <-o--o--o-- | | Transactions -//! | | | | | -//! | --o--o--o-> | | -----o--o--o-----> | -//! | | | | -//! | --o--o--o-> | | <----o--o--o------ | -//! -//! \ / \ | / -//! -//! ------ ------ -//! Alice: x Alice: x -//! Bob: x Bob: x <--- Solana Transaction -//! Will: x with final ledgers -//! ------ ------ -//! -//! \\ \\ -//! x x -//! -//! Solana Solana <--- Settled to Solana -//! ``` -//! -//! The Solana SVM's `TransactionBatchProcessor` requires projects to provide a -//! "loader" plugin, which implements the `TransactionProcessingCallback` -//! interface. -//! -//! PayTube defines a `PayTubeAccountLoader` that implements the -//! `TransactionProcessingCallback` interface, and provides it to the -//! `TransactionBatchProcessor` to process PayTube transactions. - -mod loader; -mod log; -mod processor; -mod settler; -pub mod transaction; - -use { - crate::{ - loader::PayTubeAccountLoader, settler::PayTubeSettler, transaction::PayTubeTransaction, - }, - processor::{ - create_transaction_batch_processor, get_transaction_check_results, PayTubeForkGraph, - }, - solana_client::rpc_client::RpcClient, - solana_fee_structure::FeeStructure, - solana_hash::Hash, - solana_keypair::Keypair, - solana_program_runtime::execution_budget::SVMTransactionExecutionBudget, - solana_rent::Rent, - solana_svm::transaction_processor::{ - TransactionProcessingConfig, TransactionProcessingEnvironment, - }, - solana_svm_feature_set::SVMFeatureSet, - std::sync::{Arc, RwLock}, - transaction::create_svm_transactions, -}; - -/// A PayTube channel instance. -/// -/// Facilitates native SOL or SPL token transfers amongst various channel -/// participants, settling the final changes in balances to the base chain. -pub struct PayTubeChannel { - /// I think you know why this is a bad idea... - keys: Vec, - rpc_client: RpcClient, -} - -impl PayTubeChannel { - pub fn new(keys: Vec, rpc_client: RpcClient) -> Self { - Self { keys, rpc_client } - } - - /// The PayTube API. Processes a batch of PayTube transactions. - /// - /// Obviously this is a very simple implementation, but one could imagine - /// a more complex service that employs custom functionality, such as: - /// - /// * Increased throughput for individual P2P transfers. - /// * Custom Solana transaction ordering (e.g. MEV). - /// - /// The general scaffold of the PayTube API would remain the same. - pub fn process_paytube_transfers(&self, transactions: &[PayTubeTransaction]) { - log::setup_solana_logging(); - log::creating_paytube_channel(); - - // PayTube default configs. - // - // These can be configurable for channel customization, including - // imposing resource or feature restrictions, but more commonly they - // would likely be hoisted from the cluster. - // - // For example purposes, they are provided as defaults here. - let compute_budget = SVMTransactionExecutionBudget::default(); - let feature_set = SVMFeatureSet::all_enabled(); - let fee_structure = FeeStructure::default(); - let rent = Rent::default(); - - // PayTube loader/callback implementation. - // - // Required to provide the SVM API with a mechanism for loading - // accounts. - let account_loader = PayTubeAccountLoader::new(&self.rpc_client); - - // Solana SVM transaction batch processor. - // - // Creates an instance of `TransactionBatchProcessor`, which can be - // used by PayTube to process transactions using the SVM. - // - // This allows programs such as the System and Token programs to be - // translated and executed within a provisioned virtual machine, as - // well as offers many of the same functionality as the lower-level - // Solana runtime. - let fork_graph = Arc::new(RwLock::new(PayTubeForkGraph {})); - let processor = create_transaction_batch_processor( - &account_loader, - &feature_set, - &compute_budget, - Arc::clone(&fork_graph), - ); - - // The PayTube transaction processing runtime environment. - // - // Again, these can be configurable or hoisted from the cluster. - let processing_environment = TransactionProcessingEnvironment { - blockhash: Hash::default(), - blockhash_lamports_per_signature: fee_structure.lamports_per_signature, - epoch_total_stake: 0, - feature_set, - rent, - }; - - // The PayTube transaction processing config for Solana SVM. - // - // Extended configurations for even more customization of the SVM API. - let processing_config = TransactionProcessingConfig::default(); - - // Step 1: Convert the batch of PayTube transactions into - // SVM-compatible transactions for processing. - // - // In the future, the SVM API may allow for trait-based transactions. - // In this case, `PayTubeTransaction` could simply implement the - // interface, and avoid this conversion entirely. - let svm_transactions = create_svm_transactions(transactions); - - // Step 2: Process the SVM-compatible transactions with the SVM API. - log::processing_transactions(svm_transactions.len()); - let results = processor.load_and_execute_sanitized_transactions( - &account_loader, - &svm_transactions, - get_transaction_check_results(svm_transactions.len()), - &processing_environment, - &processing_config, - ); - - // Step 3: Convert the SVM API processor results into a final ledger - // using `PayTubeSettler`, and settle the resulting balance differences - // to the Solana base chain. - // - // Here the settler is basically iterating over the transaction results - // to track debits and credits, but only for those transactions which - // were executed succesfully. - // - // The final ledger of debits and credits to each participant can then - // be packaged into a minimal number of settlement transactions for - // submission. - let settler = PayTubeSettler::new(&self.rpc_client, transactions, results, &self.keys); - log::settling_to_base_chain(settler.num_transactions()); - settler.process_settle(); - - log::channel_closed(); - } -} diff --git a/svm/examples/paytube/src/loader.rs b/svm/examples/paytube/src/loader.rs deleted file mode 100644 index ec18cdb987712b..00000000000000 --- a/svm/examples/paytube/src/loader.rs +++ /dev/null @@ -1,57 +0,0 @@ -//! PayTube's "account loader" component, which provides the SVM API with the -//! ability to load accounts for PayTube channels. -//! -//! The account loader is a simple example of an RPC client that can first load -//! an account from the base chain, then cache it locally within the protocol -//! for the duration of the channel. - -use { - solana_account::{AccountSharedData, ReadableAccount}, - solana_client::rpc_client::RpcClient, - solana_pubkey::Pubkey, - solana_svm_callback::{InvokeContextCallback, TransactionProcessingCallback}, - std::{collections::HashMap, sync::RwLock}, -}; - -/// An account loading mechanism to hoist accounts from the base chain up to -/// an active PayTube channel. -/// -/// Employs a simple cache mechanism to ensure accounts are only loaded once. -pub struct PayTubeAccountLoader<'a> { - cache: RwLock>, - rpc_client: &'a RpcClient, -} - -impl<'a> PayTubeAccountLoader<'a> { - pub fn new(rpc_client: &'a RpcClient) -> Self { - Self { - cache: RwLock::new(HashMap::new()), - rpc_client, - } - } -} - -/// Implementation of the SVM API's `TransactionProcessingCallback` interface. -/// -/// The SVM API requires this plugin be provided to provide the SVM with the -/// ability to load accounts. -/// -/// In the Agave validator, this implementation is Bank, powered by AccountsDB. -impl InvokeContextCallback for PayTubeAccountLoader<'_> {} -impl TransactionProcessingCallback for PayTubeAccountLoader<'_> { - fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option { - if let Some(account) = self.cache.read().unwrap().get(pubkey) { - return Some(account.clone()); - } - - let account: AccountSharedData = self.rpc_client.get_account(pubkey).ok()?.into(); - self.cache.write().unwrap().insert(*pubkey, account.clone()); - - Some(account) - } - - fn account_matches_owners(&self, account: &Pubkey, owners: &[Pubkey]) -> Option { - self.get_account_shared_data(account) - .and_then(|account| owners.iter().position(|key| account.owner().eq(key))) - } -} diff --git a/svm/examples/paytube/src/log.rs b/svm/examples/paytube/src/log.rs deleted file mode 100644 index 040ef237b55284..00000000000000 --- a/svm/examples/paytube/src/log.rs +++ /dev/null @@ -1,47 +0,0 @@ -//! Just logging! -use { - std::io::Write, - termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor}, -}; - -fn log_magenta(msg: &str) { - let mut stdout = StandardStream::stdout(ColorChoice::Always); - - stdout - .set_color(ColorSpec::new().set_fg(Some(Color::Magenta)).set_bold(true)) - .unwrap(); - - writeln!(&mut stdout, "\n[PAYTUBE]: INFO: {}\n", msg).unwrap(); - - stdout.reset().unwrap(); -} - -pub(crate) fn setup_solana_logging() { - #[rustfmt::skip] - solana_logger::setup_with_default( - "solana_sbpf::vm=debug,\ - solana_runtime::message_processor=debug,\ - solana_runtime::system_instruction_processor=trace", - ); -} - -pub(crate) fn creating_paytube_channel() { - log_magenta("Creating PayTube channel..."); -} - -pub(crate) fn processing_transactions(num_transactions: usize) { - log_magenta("Processing PayTube transactions with the SVM API..."); - log_magenta(&format!("Number of transactions: {}", num_transactions)); -} - -pub(crate) fn settling_to_base_chain(num_transactions: usize) { - log_magenta("Settling results from PayTube to the base chain..."); - log_magenta(&format!( - "Number of settlement transactions: {}", - num_transactions - )); -} - -pub(crate) fn channel_closed() { - log_magenta("PayTube channel closed."); -} diff --git a/svm/examples/paytube/src/processor.rs b/svm/examples/paytube/src/processor.rs deleted file mode 100644 index 0fdb89758290c4..00000000000000 --- a/svm/examples/paytube/src/processor.rs +++ /dev/null @@ -1,114 +0,0 @@ -//! A helper to initialize Solana SVM API's `TransactionBatchProcessor`. - -use { - agave_syscalls::create_program_runtime_environment_v1, - solana_clock::Slot, - solana_compute_budget::compute_budget_limits::ComputeBudgetLimits, - solana_fee_structure::FeeDetails, - solana_program_runtime::{ - execution_budget::SVMTransactionExecutionBudget, - loaded_programs::{BlockRelation, ForkGraph, ProgramCacheEntry}, - }, - solana_svm::{ - account_loader::CheckedTransactionDetails, transaction_processor::TransactionBatchProcessor, - }, - solana_svm_callback::TransactionProcessingCallback, - solana_svm_feature_set::SVMFeatureSet, - solana_system_program::system_processor, - std::sync::{Arc, RwLock}, -}; - -mod transaction { - pub use solana_transaction_error::TransactionResult as Result; -} - -/// In order to use the `TransactionBatchProcessor`, another trait - Solana -/// Program Runtime's `ForkGraph` - must be implemented, to tell the batch -/// processor how to work across forks. -/// -/// Since PayTube doesn't use slots or forks, this implementation is mocked. -pub(crate) struct PayTubeForkGraph {} - -impl ForkGraph for PayTubeForkGraph { - fn relationship(&self, _a: Slot, _b: Slot) -> BlockRelation { - BlockRelation::Unknown - } -} - -/// This function encapsulates some initial setup required to tweak the -/// `TransactionBatchProcessor` for use within PayTube. -/// -/// We're simply configuring the mocked fork graph on the SVM API's program -/// cache, then adding the System program to the processor's builtins. -pub(crate) fn create_transaction_batch_processor( - callbacks: &CB, - feature_set: &SVMFeatureSet, - compute_budget: &SVMTransactionExecutionBudget, - fork_graph: Arc>, -) -> TransactionBatchProcessor { - // Create a new transaction batch processor. - // - // We're going to use slot 1 specifically because any programs we add will - // be deployed in slot 0, and they are delayed visibility until the next - // slot (1). - // This includes programs owned by BPF Loader v2, which are automatically - // marked as "depoyed" in slot 0. - // See `solana_svm::program_loader::load_program_with_pubkey` for more - // details. - let processor = TransactionBatchProcessor::::new( - /* slot */ 1, - /* epoch */ 1, - Arc::downgrade(&fork_graph), - Some(Arc::new( - create_program_runtime_environment_v1(feature_set, compute_budget, false, false) - .unwrap(), - )), - None, - ); - - // Add the system program builtin. - processor.add_builtin( - callbacks, - solana_system_program::id(), - "system_program", - ProgramCacheEntry::new_builtin( - 0, - b"system_program".len(), - system_processor::Entrypoint::vm, - ), - ); - - // Add the BPF Loader v2 builtin, for the SPL Token program. - processor.add_builtin( - callbacks, - solana_sdk_ids::bpf_loader::id(), - "solana_bpf_loader_program", - ProgramCacheEntry::new_builtin( - 0, - b"solana_bpf_loader_program".len(), - solana_bpf_loader_program::Entrypoint::vm, - ), - ); - - processor -} - -/// This function is also a mock. In the Agave validator, the bank pre-checks -/// transactions before providing them to the SVM API. We mock this step in -/// PayTube, since we don't need to perform such pre-checks. -pub(crate) fn get_transaction_check_results( - len: usize, -) -> Vec> { - let compute_budget_limit = ComputeBudgetLimits::default(); - vec![ - transaction::Result::Ok(CheckedTransactionDetails::new( - None, - Ok(compute_budget_limit.get_compute_budget_and_limits( - compute_budget_limit.loaded_accounts_bytes, - FeeDetails::default(), - /* simd_0296_active */ false, - )), - )); - len - ] -} diff --git a/svm/examples/paytube/src/settler.rs b/svm/examples/paytube/src/settler.rs deleted file mode 100644 index ba69bc3a916b99..00000000000000 --- a/svm/examples/paytube/src/settler.rs +++ /dev/null @@ -1,180 +0,0 @@ -//! PayTube's "settler" component for settling the final ledgers across all -//! channel participants. -//! -//! When users are finished transacting, the resulting ledger is used to craft -//! a batch of transactions to settle all state changes to the base chain -//! (Solana). -//! -//! The interesting piece here is that there can be hundreds or thousands of -//! transactions across a handful of users, but only the resulting difference -//! between their balance when the channel opened and their balance when the -//! channel is about to close are needed to create the settlement transaction. - -use { - crate::transaction::PayTubeTransaction, - solana_client::{rpc_client::RpcClient, rpc_config::RpcSendTransactionConfig}, - solana_commitment_config::CommitmentConfig, - solana_instruction::Instruction as SolanaInstruction, - solana_keypair::Keypair, - solana_pubkey::Pubkey, - solana_signer::Signer, - solana_svm::{ - transaction_processing_result::TransactionProcessingResultExtensions, - transaction_processor::LoadAndExecuteSanitizedTransactionsOutput, - }, - solana_system_interface::instruction as system_instruction, - solana_transaction::Transaction as SolanaTransaction, - spl_associated_token_account_interface::address::get_associated_token_address, - std::collections::HashMap, -}; - -/// The key used for storing ledger entries. -/// -/// Each entry in the ledger represents the movement of SOL or tokens between -/// two parties. The two keys of the two parties are stored in a sorted array -/// of length two, and the value's sign determines the direction of transfer. -/// -/// This design allows the ledger to combine transfers from a -> b and b -> a -/// in the same entry, calculating the final delta between two parties. -/// -/// Note that this design could be even _further_ optimized to minimize the -/// number of required settlement transactions in a few ways, including -/// combining transfers across parties, ignoring zero-balance changes, and -/// more. An on-chain program on the base chain could even facilitate -/// multi-party transfers, further reducing the number of required -/// settlement transactions. -#[derive(PartialEq, Eq, Hash)] -struct LedgerKey { - mint: Option, - keys: [Pubkey; 2], -} - -/// A ledger of PayTube transactions, used to deconstruct into base chain -/// transactions. -/// -/// The value is stored as a signed `i128`, in order to include a sign but also -/// provide enough room to store `u64::MAX`. -struct Ledger { - ledger: HashMap, -} - -impl Ledger { - fn new( - paytube_transactions: &[PayTubeTransaction], - svm_output: LoadAndExecuteSanitizedTransactionsOutput, - ) -> Self { - let mut ledger: HashMap = HashMap::new(); - paytube_transactions - .iter() - .zip(svm_output.processing_results) - .for_each(|(transaction, result)| { - // Only append to the ledger if the PayTube transaction was - // successful. - if result.was_processed_with_successful_result() { - let mint = transaction.mint; - let mut keys = [transaction.from, transaction.to]; - keys.sort(); - let amount = if keys.iter().position(|k| k.eq(&transaction.from)).unwrap() == 0 - { - transaction.amount as i128 - } else { - (transaction.amount as i128) - .checked_neg() - .unwrap_or_default() - }; - ledger - .entry(LedgerKey { mint, keys }) - .and_modify(|e| *e = e.checked_add(amount).unwrap()) - .or_insert(amount); - } - }); - Self { ledger } - } - - fn generate_base_chain_instructions(&self) -> Vec { - self.ledger - .iter() - .map(|(key, amount)| { - let (from, to, amount) = if *amount < 0 { - (key.keys[1], key.keys[0], (amount * -1) as u64) - } else { - (key.keys[0], key.keys[1], *amount as u64) - }; - if let Some(mint) = key.mint { - let source_pubkey = get_associated_token_address(&from, &mint); - let destination_pubkey = get_associated_token_address(&to, &mint); - return spl_token_interface::instruction::transfer( - &spl_token_interface::id(), - &source_pubkey, - &destination_pubkey, - &from, - &[], - amount, - ) - .unwrap(); - } - system_instruction::transfer(&from, &to, amount) - }) - .collect::>() - } -} - -const CHUNK_SIZE: usize = 10; - -/// PayTube final transaction settler. -pub struct PayTubeSettler<'a> { - instructions: Vec, - keys: &'a [Keypair], - rpc_client: &'a RpcClient, -} - -impl<'a> PayTubeSettler<'a> { - /// Create a new instance of a `PayTubeSettler` by tallying up all - /// transfers into a ledger. - pub fn new( - rpc_client: &'a RpcClient, - paytube_transactions: &[PayTubeTransaction], - svm_output: LoadAndExecuteSanitizedTransactionsOutput, - keys: &'a [Keypair], - ) -> Self { - // Build the ledger from the processed PayTube transactions. - let ledger = Ledger::new(paytube_transactions, svm_output); - - // Build the Solana instructions from the ledger. - let instructions = ledger.generate_base_chain_instructions(); - - Self { - instructions, - keys, - rpc_client, - } - } - - /// Count how many settlement transactions are estimated to be required. - pub(crate) fn num_transactions(&self) -> usize { - self.instructions.len().div_ceil(CHUNK_SIZE) - } - - /// Settle the payment channel results to the Solana blockchain. - pub fn process_settle(&self) { - let recent_blockhash = self.rpc_client.get_latest_blockhash().unwrap(); - self.instructions.chunks(CHUNK_SIZE).for_each(|chunk| { - let transaction = SolanaTransaction::new_signed_with_payer( - chunk, - Some(&self.keys[0].pubkey()), - self.keys, - recent_blockhash, - ); - self.rpc_client - .send_and_confirm_transaction_with_spinner_and_config( - &transaction, - CommitmentConfig::processed(), - RpcSendTransactionConfig { - skip_preflight: true, - ..Default::default() - }, - ) - .unwrap(); - }); - } -} diff --git a/svm/examples/paytube/src/transaction.rs b/svm/examples/paytube/src/transaction.rs deleted file mode 100644 index 7dbc449b8a97a0..00000000000000 --- a/svm/examples/paytube/src/transaction.rs +++ /dev/null @@ -1,82 +0,0 @@ -//! PayTube's custom transaction format, tailored specifically for SOL or SPL -//! token transfers. -//! -//! Mostly for demonstration purposes, to show how projects may use completely -//! different transactions in their protocol, then convert the resulting state -//! transitions into the necessary transactions for the base chain - in this -//! case Solana. - -use { - solana_instruction::Instruction as SolanaInstruction, - solana_pubkey::Pubkey, - solana_system_interface::instruction as system_instruction, - solana_transaction::{ - sanitized::SanitizedTransaction as SolanaSanitizedTransaction, - Transaction as SolanaTransaction, - }, - spl_associated_token_account_interface::address::get_associated_token_address, - std::collections::HashSet, -}; - -/// A simple PayTube transaction. Transfers SPL tokens or SOL from one account -/// to another. -/// -/// A `None` value for `mint` represents native SOL. -pub struct PayTubeTransaction { - pub mint: Option, - pub from: Pubkey, - pub to: Pubkey, - pub amount: u64, -} - -impl From<&PayTubeTransaction> for SolanaInstruction { - fn from(value: &PayTubeTransaction) -> Self { - let PayTubeTransaction { - mint, - from, - to, - amount, - } = value; - if let Some(mint) = mint { - let source_pubkey = get_associated_token_address(from, mint); - let destination_pubkey = get_associated_token_address(to, mint); - return spl_token_interface::instruction::transfer( - &spl_token_interface::id(), - &source_pubkey, - &destination_pubkey, - from, - &[], - *amount, - ) - .unwrap(); - } - system_instruction::transfer(from, to, *amount) - } -} - -impl From<&PayTubeTransaction> for SolanaTransaction { - fn from(value: &PayTubeTransaction) -> Self { - SolanaTransaction::new_with_payer(&[SolanaInstruction::from(value)], Some(&value.from)) - } -} - -impl From<&PayTubeTransaction> for SolanaSanitizedTransaction { - fn from(value: &PayTubeTransaction) -> Self { - SolanaSanitizedTransaction::try_from_legacy_transaction( - SolanaTransaction::from(value), - &HashSet::new(), - ) - .unwrap() - } -} - -/// Create a batch of Solana transactions, for the Solana SVM's transaction -/// processor, from a batch of PayTube instructions. -pub fn create_svm_transactions( - paytube_transactions: &[PayTubeTransaction], -) -> Vec { - paytube_transactions - .iter() - .map(SolanaSanitizedTransaction::from) - .collect() -} diff --git a/svm/examples/paytube/tests/native_sol.rs b/svm/examples/paytube/tests/native_sol.rs deleted file mode 100644 index 7396e945def7c3..00000000000000 --- a/svm/examples/paytube/tests/native_sol.rs +++ /dev/null @@ -1,73 +0,0 @@ -mod setup; - -use { - setup::{system_account, TestValidatorContext}, - solana_keypair::Keypair, - solana_signer::Signer, - solana_svm_example_paytube::{transaction::PayTubeTransaction, PayTubeChannel}, -}; - -#[test] -fn test_native_sol() { - let alice = Keypair::new(); - let bob = Keypair::new(); - let will = Keypair::new(); - - let alice_pubkey = alice.pubkey(); - let bob_pubkey = bob.pubkey(); - let will_pubkey = will.pubkey(); - - let accounts = vec![ - (alice_pubkey, system_account(10_000_000)), - (bob_pubkey, system_account(10_000_000)), - (will_pubkey, system_account(10_000_000)), - ]; - - let context = TestValidatorContext::start_with_accounts(accounts); - let test_validator = &context.test_validator; - let payer = context.payer.insecure_clone(); - - let rpc_client = test_validator.get_rpc_client(); - - let paytube_channel = PayTubeChannel::new(vec![payer, alice, bob, will], rpc_client); - - paytube_channel.process_paytube_transfers(&[ - // Alice -> Bob 2_000_000 - PayTubeTransaction { - from: alice_pubkey, - to: bob_pubkey, - amount: 2_000_000, - mint: None, - }, - // Bob -> Will 5_000_000 - PayTubeTransaction { - from: bob_pubkey, - to: will_pubkey, - amount: 5_000_000, - mint: None, - }, - // Alice -> Bob 2_000_000 - PayTubeTransaction { - from: alice_pubkey, - to: bob_pubkey, - amount: 2_000_000, - mint: None, - }, - // Will -> Alice 1_000_000 - PayTubeTransaction { - from: will_pubkey, - to: alice_pubkey, - amount: 1_000_000, - mint: None, - }, - ]); - - // Ledger: - // Alice: 10_000_000 - 2_000_000 - 2_000_000 + 1_000_000 = 7_000_000 - // Bob: 10_000_000 + 2_000_000 - 5_000_000 + 2_000_000 = 9_000_000 - // Will: 10_000_000 + 5_000_000 - 1_000_000 = 14_000_000 - let rpc_client = test_validator.get_rpc_client(); - assert_eq!(rpc_client.get_balance(&alice_pubkey).unwrap(), 7_000_000); - assert_eq!(rpc_client.get_balance(&bob_pubkey).unwrap(), 9_000_000); - assert_eq!(rpc_client.get_balance(&will_pubkey).unwrap(), 14_000_000); -} diff --git a/svm/examples/paytube/tests/setup.rs b/svm/examples/paytube/tests/setup.rs deleted file mode 100644 index f4ff327b8d5052..00000000000000 --- a/svm/examples/paytube/tests/setup.rs +++ /dev/null @@ -1,85 +0,0 @@ -#![allow(unused)] - -use { - solana_account::{Account, AccountSharedData, ReadableAccount}, - solana_epoch_schedule::EpochSchedule, - solana_keypair::Keypair, - solana_program_pack::Pack, - solana_pubkey::Pubkey, - solana_system_interface::program as system_program, - solana_test_validator::{TestValidator, TestValidatorGenesis}, - spl_token_interface::state::{Account as TokenAccount, Mint}, -}; - -const SLOTS_PER_EPOCH: u64 = 50; - -pub struct TestValidatorContext { - pub test_validator: TestValidator, - pub payer: Keypair, -} - -impl TestValidatorContext { - pub fn start_with_accounts(accounts: Vec<(Pubkey, AccountSharedData)>) -> Self { - let epoch_schedule = EpochSchedule::custom(SLOTS_PER_EPOCH, SLOTS_PER_EPOCH, false); - - let (test_validator, payer) = TestValidatorGenesis::default() - .epoch_schedule(epoch_schedule) - .add_accounts(accounts) - .start(); - - Self { - test_validator, - payer, - } - } -} - -pub fn get_token_account_balance(token_account: Account) -> u64 { - let state = TokenAccount::unpack(token_account.data()).unwrap(); - state.amount -} - -pub fn mint_account() -> AccountSharedData { - let data = { - let mut data = [0; Mint::LEN]; - Mint::pack( - Mint { - supply: 100_000_000, - decimals: 0, - is_initialized: true, - ..Default::default() - }, - &mut data, - ) - .unwrap(); - data - }; - let mut account = AccountSharedData::new(100_000_000, data.len(), &spl_token_interface::id()); - account.set_data_from_slice(&data); - account -} - -pub fn system_account(lamports: u64) -> AccountSharedData { - AccountSharedData::new(lamports, 0, &system_program::id()) -} - -pub fn token_account(owner: &Pubkey, mint: &Pubkey, amount: u64) -> AccountSharedData { - let data = { - let mut data = [0; TokenAccount::LEN]; - TokenAccount::pack( - TokenAccount { - mint: *mint, - owner: *owner, - amount, - state: spl_token_interface::state::AccountState::Initialized, - ..Default::default() - }, - &mut data, - ) - .unwrap(); - data - }; - let mut account = AccountSharedData::new(100_000_000, data.len(), &spl_token_interface::id()); - account.set_data_from_slice(&data); - account -} diff --git a/svm/examples/paytube/tests/spl_tokens.rs b/svm/examples/paytube/tests/spl_tokens.rs deleted file mode 100644 index bb0d79cce05e23..00000000000000 --- a/svm/examples/paytube/tests/spl_tokens.rs +++ /dev/null @@ -1,107 +0,0 @@ -mod setup; - -use { - setup::{ - get_token_account_balance, mint_account, system_account, token_account, - TestValidatorContext, - }, - solana_keypair::Keypair, - solana_pubkey::Pubkey, - solana_signer::Signer, - solana_svm_example_paytube::{transaction::PayTubeTransaction, PayTubeChannel}, - spl_associated_token_account_interface::address::get_associated_token_address, -}; - -#[test] -fn test_spl_tokens() { - let mint = Pubkey::new_unique(); - - let alice = Keypair::new(); - let bob = Keypair::new(); - let will = Keypair::new(); - - let alice_pubkey = alice.pubkey(); - let alice_token_account_pubkey = get_associated_token_address(&alice_pubkey, &mint); - - let bob_pubkey = bob.pubkey(); - let bob_token_account_pubkey = get_associated_token_address(&bob_pubkey, &mint); - - let will_pubkey = will.pubkey(); - let will_token_account_pubkey = get_associated_token_address(&will_pubkey, &mint); - - let accounts = vec![ - (mint, mint_account()), - (alice_pubkey, system_account(10_000_000)), - ( - alice_token_account_pubkey, - token_account(&alice_pubkey, &mint, 10), - ), - (bob_pubkey, system_account(10_000_000)), - ( - bob_token_account_pubkey, - token_account(&bob_pubkey, &mint, 10), - ), - (will_pubkey, system_account(10_000_000)), - ( - will_token_account_pubkey, - token_account(&will_pubkey, &mint, 10), - ), - ]; - - let context = TestValidatorContext::start_with_accounts(accounts); - let test_validator = &context.test_validator; - let payer = context.payer.insecure_clone(); - - let rpc_client = test_validator.get_rpc_client(); - - let paytube_channel = PayTubeChannel::new(vec![payer, alice, bob, will], rpc_client); - - paytube_channel.process_paytube_transfers(&[ - // Alice -> Bob 2 - PayTubeTransaction { - from: alice_pubkey, - to: bob_pubkey, - amount: 2, - mint: Some(mint), - }, - // Bob -> Will 5 - PayTubeTransaction { - from: bob_pubkey, - to: will_pubkey, - amount: 5, - mint: Some(mint), - }, - // Alice -> Bob 2 - PayTubeTransaction { - from: alice_pubkey, - to: bob_pubkey, - amount: 2, - mint: Some(mint), - }, - // Will -> Alice 1 - PayTubeTransaction { - from: will_pubkey, - to: alice_pubkey, - amount: 1, - mint: Some(mint), - }, - ]); - - // Ledger: - // Alice: 10 - 2 - 2 + 1 = 7 - // Bob: 10 + 2 - 5 + 2 = 9 - // Will: 10 + 5 - 1 = 14 - let rpc_client = test_validator.get_rpc_client(); - assert_eq!( - get_token_account_balance(rpc_client.get_account(&alice_token_account_pubkey).unwrap()), - 7 - ); - assert_eq!( - get_token_account_balance(rpc_client.get_account(&bob_token_account_pubkey).unwrap()), - 9 - ); - assert_eq!( - get_token_account_balance(rpc_client.get_account(&will_token_account_pubkey).unwrap()), - 14 - ); -} diff --git a/svm/src/account_loader.rs b/svm/src/account_loader.rs index 69344124c04d37..7f4b6bb7ebd3bc 100644 --- a/svm/src/account_loader.rs +++ b/svm/src/account_loader.rs @@ -4,7 +4,9 @@ use { crate::{ account_overrides::AccountOverrides, nonce_info::NonceInfo, - rent_calculator::{check_rent_state_with_account, get_account_rent_state}, + rent_calculator::{ + check_rent_state_with_account, get_account_rent_state, RENT_EXEMPT_RENT_EPOCH, + }, rollback_accounts::RollbackAccounts, transaction_error_metrics::TransactionErrorMetrics, transaction_execution_result::ExecutedTransaction, @@ -14,6 +16,7 @@ use { state_traits::StateMut, Account, AccountSharedData, ReadableAccount, WritableAccount, PROGRAM_OWNERS, }, + solana_clock::Slot, solana_fee_structure::FeeDetails, solana_instruction::{BorrowedAccountMeta, BorrowedInstruction}, solana_instructions_sysvar::construct_instructions_data, @@ -25,7 +28,6 @@ use { }, solana_pubkey::Pubkey, solana_rent::Rent, - solana_rent_collector::RENT_EXEMPT_RENT_EPOCH, solana_sdk_ids::{ bpf_loader_upgradeable, native_loader, sysvar::{self, slot_history}, @@ -33,7 +35,7 @@ use { solana_svm_callback::{AccountState, TransactionProcessingCallback}, solana_svm_feature_set::SVMFeatureSet, solana_svm_transaction::svm_message::SVMMessage, - solana_transaction_context::{IndexOfAccount, TransactionAccount}, + solana_transaction_context::{transaction_accounts::TransactionAccount, IndexOfAccount}, solana_transaction_error::{TransactionError, TransactionResult as Result}, std::num::{NonZeroU32, Saturating}, }; @@ -48,7 +50,6 @@ pub(crate) const TRANSACTION_ACCOUNT_BASE_SIZE: usize = 64; const ADDRESS_LOOKUP_TABLE_BASE_SIZE: usize = 8248; // for the load instructions -pub(crate) type TransactionProgramIndices = Vec>; pub type TransactionCheckResult = Result; type TransactionValidationResult = Result; @@ -137,7 +138,7 @@ pub(crate) struct LoadedTransactionAccount { )] pub struct LoadedTransaction { pub accounts: Vec, - pub(crate) program_indices: TransactionProgramIndices, + pub(crate) program_indices: Vec, pub fee_details: FeeDetails, pub rollback_accounts: RollbackAccounts, pub(crate) compute_budget: SVMTransactionExecutionBudget, @@ -157,6 +158,7 @@ pub struct FeesOnlyTransaction { // type, and itself implements `TransactionProcessingCallback`, behaving // exactly like the implementor of the trait, but also returning up-to-date // account states mid-batch. +#[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] pub(crate) struct AccountLoader<'a, CB: TransactionProcessingCallback> { loaded_accounts: AHashMap, callbacks: &'a CB, @@ -165,6 +167,7 @@ pub(crate) struct AccountLoader<'a, CB: TransactionProcessingCallback> { impl<'a, CB: TransactionProcessingCallback> AccountLoader<'a, CB> { // create a new AccountLoader for the transaction batch + #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] pub(crate) fn new_with_loaded_accounts_capacity( account_overrides: Option<&'a AccountOverrides>, callbacks: &'a CB, @@ -233,17 +236,26 @@ impl<'a, CB: TransactionProcessingCallback> AccountLoader<'a, CB> { // This is a general purpose function suitable for usage outside initial transaction loading. pub(crate) fn load_account(&mut self, account_key: &Pubkey) -> Option { match self.do_load(account_key) { + // Exists, from AccountLoader. + (Some(account), false) => Some(account), + // Not allocated, but has an AccountLoader placeholder already. + (None, false) => None, + // Exists in accounts-db. Store it in AccountLoader for future loads. (Some(account), true) => { self.loaded_accounts.insert(*account_key, account.clone()); Some(account) } - (account, false) => account, - (None, true) => unreachable!(), + // Does not exist and has never been seen. + (None, true) => { + self.loaded_accounts + .insert(*account_key, AccountSharedData::default()); + None + } } } // Internal helper for core loading logic to prevent code duplication. Returns a bool - // indicating whether the account came from accounts-db, which allows wrappers with + // indicating whether an accounts-db lookup was performed, which allows wrappers with // &mut self to insert the account. Wrappers with &self ignore it. fn do_load(&self, account_key: &Pubkey) -> (Option, bool) { if let Some(account) = self.loaded_accounts.get(account_key) { @@ -257,10 +269,10 @@ impl<'a, CB: TransactionProcessingCallback> AccountLoader<'a, CB> { }; (option_account, false) - } else if let Some(account) = self.callbacks.get_account_shared_data(account_key) { + } else if let Some((account, _slot)) = self.callbacks.get_account_shared_data(account_key) { (Some(account), true) } else { - (None, false) + (None, true) } } @@ -317,14 +329,10 @@ impl<'a, CB: TransactionProcessingCallback> AccountLoader<'a, CB> { // In general, most accounts we load this way should already be in our accounts store. // Once SIMD-0186 is implemented, 100% of accounts will be. impl TransactionProcessingCallback for AccountLoader<'_, CB> { - fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option { - self.do_load(pubkey).0 - } - - fn account_matches_owners(&self, pubkey: &Pubkey, owners: &[Pubkey]) -> Option { - self.do_load(pubkey) - .0 - .and_then(|account| owners.iter().position(|entry| entry == account.owner())) + fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option<(AccountSharedData, Slot)> { + // The returned last-modification-slot is a dummy value for now, + // but will later be used in IndexImplementation::V2 of the global program cache. + self.do_load(pubkey).0.map(|account| (account, 0)) } } @@ -403,7 +411,6 @@ pub fn validate_fee_payer( &payer_pre_rent_state, &payer_post_rent_state, payer_address, - payer_account, payer_index, ) } @@ -449,7 +456,7 @@ pub(crate) fn load_transaction( #[derive(PartialEq, Eq, Debug, Clone)] struct LoadedTransactionAccounts { pub(crate) accounts: Vec, - pub(crate) program_indices: TransactionProgramIndices, + pub(crate) program_indices: Vec, pub(crate) loaded_accounts_data_size: u32, } @@ -612,15 +619,6 @@ fn load_transaction_accounts_simd186( return Err(TransactionError::ProgramAccountNotFound); }; - if !account_loader - .feature_set - .remove_accounts_executable_flag_checks - && !program_account.executable() - { - error_metrics.invalid_program_for_execution += 1; - return Err(TransactionError::InvalidProgramForExecution); - } - let owner_id = program_account.owner(); if !native_loader::check_id(owner_id) && !PROGRAM_OWNERS.contains(owner_id) { error_metrics.invalid_program_for_execution += 1; @@ -629,7 +627,7 @@ fn load_transaction_accounts_simd186( loaded_transaction_accounts .program_indices - .push(vec![instruction.program_id_index as IndexOfAccount]); + .push(instruction.program_id_index as IndexOfAccount); } Ok(loaded_transaction_accounts) @@ -679,9 +677,10 @@ fn load_transaction_accounts_old( let program_indices = message .program_instructions_iter() .map(|(program_id, instruction)| { - let mut account_indices = Vec::with_capacity(2); if native_loader::check_id(program_id) { - return Ok(account_indices); + // Just as with an empty vector, trying to borrow the program account will fail + // with a u16::MAX + return Ok(u16::MAX as IndexOfAccount); } let program_index = instruction.program_id_index as usize; @@ -691,29 +690,14 @@ fn load_transaction_accounts_old( return Err(TransactionError::ProgramAccountNotFound); }; - if !account_loader - .feature_set - .remove_accounts_executable_flag_checks - && !program_account.executable() - { - error_metrics.invalid_program_for_execution += 1; - return Err(TransactionError::InvalidProgramForExecution); - } - account_indices.insert(0, program_index as IndexOfAccount); - let owner_id = program_account.owner(); if native_loader::check_id(owner_id) { - return Ok(account_indices); + return Ok(program_index as IndexOfAccount); } if !validated_loaders.contains(owner_id) { if let Some(owner_account) = account_loader.load_account(owner_id) { - if !native_loader::check_id(owner_account.owner()) - || (!account_loader - .feature_set - .remove_accounts_executable_flag_checks - && !owner_account.executable()) - { + if !native_loader::check_id(owner_account.owner()) { error_metrics.invalid_program_for_execution += 1; return Err(TransactionError::InvalidProgramForExecution); } @@ -729,9 +713,9 @@ fn load_transaction_accounts_old( return Err(TransactionError::ProgramAccountNotFound); } } - Ok(account_indices) + Ok(program_index as IndexOfAccount) }) - .collect::>>>()?; + .collect::>>()?; Ok(LoadedTransactionAccounts { accounts, @@ -833,7 +817,6 @@ mod tests { use { super::*, crate::transaction_account_state_info::TransactionAccountStateInfo, - agave_reserved_account_keys::ReservedAccountKeys, rand0_7::prelude::*, solana_account::{Account, AccountSharedData, ReadableAccount, WritableAccount}, solana_hash::Hash, @@ -845,14 +828,13 @@ mod tests { v0::{LoadedAddresses, LoadedMessage}, LegacyMessage, Message, MessageHeader, SanitizedMessage, }, - solana_native_token::{sol_to_lamports, LAMPORTS_PER_SOL}, + solana_native_token::LAMPORTS_PER_SOL, solana_nonce::{self as nonce, versions::Versions as NonceVersions}, solana_program_runtime::execution_budget::{ DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, }, solana_pubkey::Pubkey, solana_rent::Rent, - solana_rent_collector::RENT_EXEMPT_RENT_EPOCH, solana_sdk_ids::{ bpf_loader, bpf_loader_upgradeable, native_loader, system_program, sysvar, }, @@ -861,9 +843,18 @@ mod tests { solana_svm_callback::{InvokeContextCallback, TransactionProcessingCallback}, solana_system_transaction::transfer, solana_transaction::{sanitized::SanitizedTransaction, Transaction}, - solana_transaction_context::{TransactionAccount, TransactionContext}, + solana_transaction_context::{ + transaction_accounts::TransactionAccount, TransactionContext, + }, solana_transaction_error::{TransactionError, TransactionResult as Result}, - std::{borrow::Cow, cell::RefCell, collections::HashMap, fs::File, io::Read}, + std::{ + borrow::Cow, + cell::RefCell, + collections::{HashMap, HashSet}, + fs::File, + io::Read, + sync::Arc, + }, test_case::test_case, }; @@ -889,12 +880,10 @@ mod tests { impl InvokeContextCallback for TestCallbacks {} impl TransactionProcessingCallback for TestCallbacks { - fn account_matches_owners(&self, _account: &Pubkey, _owners: &[Pubkey]) -> Option { - None - } - - fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option { - self.accounts_map.get(pubkey).cloned() + fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option<(AccountSharedData, Slot)> { + self.accounts_map + .get(pubkey) + .map(|account| (account.clone(), 0)) } fn inspect_account( @@ -961,10 +950,7 @@ mod tests { } fn new_unchecked_sanitized_message(message: Message) -> SanitizedMessage { - SanitizedMessage::Legacy(LegacyMessage::new( - message, - &ReservedAccountKeys::empty_key_set(), - )) + SanitizedMessage::Legacy(LegacyMessage::new(message, &HashSet::new())) } #[test_case(false; "informal_loaded_size")] @@ -1059,7 +1045,7 @@ mod tests { assert_eq!(loaded_transaction.accounts.len(), 3); assert_eq!(loaded_transaction.accounts[0].1, accounts[0].1); assert_eq!(loaded_transaction.program_indices.len(), 1); - assert_eq!(loaded_transaction.program_indices[0].len(), 0); + assert_eq!(loaded_transaction.program_indices[0], u16::MAX); } TransactionLoadResult::FeesOnly(fees_only_tx) if formalize_loaded_transaction_data_size => @@ -1158,7 +1144,6 @@ mod tests { ); let mut feature_set = SVMFeatureSet::all_enabled(); - feature_set.remove_accounts_executable_flag_checks = false; feature_set.formalize_loaded_transaction_data_size = formalize_loaded_transaction_data_size; let load_results = load_accounts_with_features_and_rent( @@ -1169,14 +1154,18 @@ mod tests { feature_set, ); - assert_eq!(error_metrics.invalid_program_for_execution.0, 1); - assert!(matches!( - load_results, - TransactionLoadResult::FeesOnly(FeesOnlyTransaction { - load_error: TransactionError::InvalidProgramForExecution, - .. - }), - )); + assert_eq!(error_metrics.invalid_program_for_execution.0, 0); + match &load_results { + TransactionLoadResult::Loaded(loaded_transaction) => { + assert_eq!(loaded_transaction.accounts.len(), 2); + assert_eq!(loaded_transaction.accounts[0].1, accounts[0].1); + assert_eq!(loaded_transaction.accounts[1].1, accounts[1].1); + assert_eq!(loaded_transaction.program_indices.len(), 1); + assert_eq!(loaded_transaction.program_indices[0], 1); + } + TransactionLoadResult::FeesOnly(fees_only_tx) => panic!("{}", fees_only_tx.load_error), + TransactionLoadResult::NotLoaded(e) => panic!("{e}"), + } } #[test_case(false; "informal_loaded_size")] @@ -1235,8 +1224,8 @@ mod tests { assert_eq!(loaded_transaction.accounts.len(), 3); assert_eq!(loaded_transaction.accounts[0].1, accounts[0].1); assert_eq!(loaded_transaction.program_indices.len(), 2); - assert_eq!(loaded_transaction.program_indices[0], &[1]); - assert_eq!(loaded_transaction.program_indices[1], &[2]); + assert_eq!(loaded_transaction.program_indices[0], 1); + assert_eq!(loaded_transaction.program_indices[1], 2); } TransactionLoadResult::FeesOnly(fees_only_tx) => panic!("{}", fees_only_tx.load_error), TransactionLoadResult::NotLoaded(e) => panic!("{e}"), @@ -1641,7 +1630,7 @@ mod tests { mock_bank.accounts_map[&native_loader::id()].clone() ) ], - program_indices: vec![vec![]], + program_indices: vec![u16::MAX], loaded_accounts_data_size, } ); @@ -1809,7 +1798,7 @@ mod tests { mock_bank.accounts_map[&key1.pubkey()].clone() ), ], - program_indices: vec![vec![1]], + program_indices: vec![1], loaded_accounts_data_size, } ); @@ -1998,7 +1987,7 @@ mod tests { mock_bank.accounts_map[&key1.pubkey()].clone() ), ], - program_indices: vec![vec![1]], + program_indices: vec![1], loaded_accounts_data_size, } ); @@ -2097,7 +2086,7 @@ mod tests { ), (key3.pubkey(), account_data), ], - program_indices: vec![vec![1], vec![1]], + program_indices: vec![1, 1], loaded_accounts_data_size, } ); @@ -2124,12 +2113,7 @@ mod tests { .insert(recipient, AccountSharedData::default()); let mut account_loader = (&bank).into(); - let tx = transfer( - &mint_keypair, - &recipient, - sol_to_lamports(1.), - last_block_hash, - ); + let tx = transfer(&mint_keypair, &recipient, LAMPORTS_PER_SOL, last_block_hash); let num_accounts = tx.message().account_keys.len(); let sanitized_tx = SanitizedTransaction::from_transaction_for_tests(tx); let mut error_metrics = TransactionErrorMetrics::default(); @@ -2266,7 +2250,7 @@ mod tests { ), (key3.pubkey(), account_data), ], - program_indices: vec![vec![1], vec![1]], + program_indices: vec![1, 1], fee_details: FeeDetails::default(), rollback_accounts: RollbackAccounts::default(), compute_budget: SVMTransactionExecutionBudget::default(), @@ -2757,7 +2741,10 @@ mod tests { let account_loader: AccountLoader<_> = (&mock_bank).into(); assert_eq!( - account_loader.get_account_shared_data(&fee_payer).unwrap(), + account_loader + .get_account_shared_data(&fee_payer) + .unwrap() + .0, fee_payer_account ); @@ -2784,7 +2771,10 @@ mod tests { fee_payer_account ); assert_eq!( - account_loader.get_account_shared_data(&fee_payer).unwrap(), + account_loader + .get_account_shared_data(&fee_payer) + .unwrap() + .0, fee_payer_account ); @@ -2808,15 +2798,10 @@ mod tests { // note all magic numbers (how many accounts, how many instructions, how big to size buffers) are arbitrary // other than trying not to swamp programs with blank accounts and keep transaction size below the 64mb limit - #[test_case(false; "executable_mandatory")] - #[test_case(true; "executable_optional")] - fn test_load_transaction_accounts_data_sizes_simd186( - remove_accounts_executable_flag_checks: bool, - ) { + #[test] + fn test_load_transaction_accounts_data_sizes_simd186() { let mut rng = rand0_7::thread_rng(); let mut mock_bank = TestCallbacks::default(); - mock_bank.feature_set.remove_accounts_executable_flag_checks = - remove_accounts_executable_flag_checks; // arbitrary accounts for _ in 0..128 { @@ -2855,7 +2840,7 @@ mod tests { 1, vec![0; rng.gen_range(0, 512)], *loader, - !remove_accounts_executable_flag_checks || rng.gen(), + rng.gen(), u64::MAX, ); @@ -2873,7 +2858,7 @@ mod tests { 1, vec![0; rng.gen_range(0, 512)], *loader, - !remove_accounts_executable_flag_checks || rng.gen(), + rng.gen(), u64::MAX, ); programdata_tracker.insert( @@ -2995,4 +2980,62 @@ mod tests { ); } } + + #[test] + fn test_loader_aliasing() { + let mut mock_bank = TestCallbacks::default(); + + let hit_address = Pubkey::new_unique(); + let miss_address = Pubkey::new_unique(); + + let expected_hit_account = AccountSharedData::default(); + mock_bank + .accounts_map + .insert(hit_address, expected_hit_account.clone()); + + let mut account_loader: AccountLoader<_> = (&mock_bank).into(); + + // load hits accounts-db, same account is stored + account_loader.load_account(&hit_address); + let actual_hit_account = account_loader.loaded_accounts.get(&hit_address); + + assert_eq!(actual_hit_account, Some(&expected_hit_account)); + assert!(Arc::ptr_eq( + &actual_hit_account.unwrap().data_clone(), + &expected_hit_account.data_clone() + )); + + // reload doesnt affect this + account_loader.load_account(&hit_address); + let actual_hit_account = account_loader.loaded_accounts.get(&hit_address); + + assert_eq!(actual_hit_account, Some(&expected_hit_account)); + assert!(Arc::ptr_eq( + &actual_hit_account.unwrap().data_clone(), + &expected_hit_account.data_clone() + )); + + // load misses accounts-db, placeholder is inserted + account_loader.load_account(&miss_address); + let expected_miss_account = account_loader + .loaded_accounts + .get(&miss_address) + .unwrap() + .clone(); + + assert!(!Arc::ptr_eq( + &expected_miss_account.data_clone(), + &expected_hit_account.data_clone() + )); + + // reload keeps the same placeholder + account_loader.load_account(&miss_address); + let actual_miss_account = account_loader.loaded_accounts.get(&miss_address); + + assert_eq!(actual_miss_account, Some(&expected_miss_account)); + assert!(Arc::ptr_eq( + &actual_miss_account.unwrap().data_clone(), + &expected_miss_account.data_clone() + )); + } } diff --git a/svm/src/message_processor.rs b/svm/src/message_processor.rs index 1bf6511ba17a12..ccdd7e046987e6 100644 --- a/svm/src/message_processor.rs +++ b/svm/src/message_processor.rs @@ -1,8 +1,8 @@ use { - solana_measure::measure_us, solana_program_runtime::invoke_context::InvokeContext, + solana_svm_measure::measure_us, + solana_svm_timings::{ExecuteDetailsTimings, ExecuteTimings}, solana_svm_transaction::svm_message::SVMMessage, - solana_timings::{ExecuteDetailsTimings, ExecuteTimings}, solana_transaction_context::IndexOfAccount, solana_transaction_error::TransactionError, }; @@ -14,19 +14,19 @@ use { /// The accounts are committed back to the bank only if every instruction succeeds. pub(crate) fn process_message( message: &impl SVMMessage, - program_indices: &[Vec], + program_indices: &[IndexOfAccount], invoke_context: &mut InvokeContext, execute_timings: &mut ExecuteTimings, accumulated_consumed_units: &mut u64, ) -> Result<(), TransactionError> { debug_assert_eq!(program_indices.len(), message.num_instructions()); - for (top_level_instruction_index, ((program_id, instruction), program_indices)) in message + for (top_level_instruction_index, ((program_id, instruction), program_account_index)) in message .program_instructions_iter() .zip(program_indices.iter()) .enumerate() { invoke_context - .prepare_next_top_level_instruction(message, &instruction, program_indices.clone()) + .prepare_next_top_level_instruction(message, &instruction, *program_account_index) .map_err(|err| { TransactionError::InstructionError(top_level_instruction_index as u8, err) })?; @@ -76,7 +76,6 @@ pub(crate) fn process_message( mod tests { use { super::*, - agave_reserved_account_keys::ReservedAccountKeys, ed25519_dalek::ed25519::signature::Signer, openssl::{ ec::{EcGroup, EcKey}, @@ -109,7 +108,7 @@ mod tests { solana_svm_callback::InvokeContextCallback, solana_svm_feature_set::SVMFeatureSet, solana_transaction_context::TransactionContext, - std::sync::Arc, + std::{collections::HashSet, sync::Arc}, }; struct MockCallback {} @@ -127,8 +126,7 @@ mod tests { } fn new_sanitized_message(message: Message) -> SanitizedMessage { - SanitizedMessage::try_from_legacy_message(message, &ReservedAccountKeys::empty_key_set()) - .unwrap() + SanitizedMessage::try_from_legacy_message(message, &HashSet::new()).unwrap() } #[test] @@ -149,17 +147,17 @@ mod tests { MockSystemInstruction::Correct => Ok(()), MockSystemInstruction::TransferLamports { lamports } => { instruction_context - .try_borrow_instruction_account(transaction_context, 0)? + .try_borrow_instruction_account(0)? .checked_sub_lamports(lamports)?; instruction_context - .try_borrow_instruction_account(transaction_context, 1)? + .try_borrow_instruction_account(1)? .checked_add_lamports(lamports)?; Ok(()) } MockSystemInstruction::ChangeData { data } => { instruction_context - .try_borrow_instruction_account(transaction_context, 1)? - .set_data(vec![data])?; + .try_borrow_instruction_account(1)? + .set_data_from_slice(&[data])?; Ok(()) } } @@ -187,7 +185,7 @@ mod tests { ), ]; let mut transaction_context = TransactionContext::new(accounts, Rent::default(), 1, 3); - let program_indices = vec![vec![2]]; + let program_indices = vec![2]; let mut program_cache_for_tx_batch = ProgramCacheForTxBatch::default(); program_cache_for_tx_batch.replenish( mock_system_program_id, @@ -363,15 +361,12 @@ mod tests { let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; let instruction_data = instruction_context.get_instruction_data(); - let mut to_account = - instruction_context.try_borrow_instruction_account(transaction_context, 1)?; + let mut to_account = instruction_context.try_borrow_instruction_account(1)?; if let Ok(instruction) = bincode::deserialize(instruction_data) { match instruction { MockSystemInstruction::BorrowFail => { - let from_account = instruction_context - .try_borrow_instruction_account(transaction_context, 0)?; - let dup_account = instruction_context - .try_borrow_instruction_account(transaction_context, 2)?; + let from_account = instruction_context.try_borrow_instruction_account(0)?; + let dup_account = instruction_context.try_borrow_instruction_account(2)?; if from_account.get_lamports() != dup_account.get_lamports() { return Err(InstructionError::InvalidArgument); } @@ -379,10 +374,10 @@ mod tests { } MockSystemInstruction::MultiBorrowMut => { let lamports_a = instruction_context - .try_borrow_instruction_account(transaction_context, 0)? + .try_borrow_instruction_account(0)? .get_lamports(); let lamports_b = instruction_context - .try_borrow_instruction_account(transaction_context, 2)? + .try_borrow_instruction_account(2)? .get_lamports(); if lamports_a != lamports_b { return Err(InstructionError::InvalidArgument); @@ -390,14 +385,14 @@ mod tests { Ok(()) } MockSystemInstruction::DoWork { lamports, data } => { - let mut dup_account = instruction_context - .try_borrow_instruction_account(transaction_context, 2)?; + let mut dup_account = + instruction_context.try_borrow_instruction_account(2)?; dup_account.checked_sub_lamports(lamports)?; to_account.checked_add_lamports(lamports)?; - dup_account.set_data(vec![data])?; + dup_account.set_data_from_slice(&[data])?; drop(dup_account); - let mut from_account = instruction_context - .try_borrow_instruction_account(transaction_context, 0)?; + let mut from_account = + instruction_context.try_borrow_instruction_account(0)?; from_account.checked_sub_lamports(lamports)?; to_account.checked_add_lamports(lamports)?; Ok(()) @@ -423,7 +418,7 @@ mod tests { ), ]; let mut transaction_context = TransactionContext::new(accounts, Rent::default(), 1, 3); - let program_indices = vec![vec![2]]; + let program_indices = vec![2]; let mut program_cache_for_tx_batch = ProgramCacheForTxBatch::default(); program_cache_for_tx_batch.replenish( mock_program_id, @@ -695,7 +690,7 @@ mod tests { ); let result = process_message( &message, - &[vec![1], vec![2], vec![3], vec![4]], + &[1, 2, 3, 4], &mut invoke_context, &mut ExecuteTimings::default(), &mut 0, diff --git a/svm/src/program_loader.rs b/svm/src/program_loader.rs index 4d79cb235932d4..efc65e38a3666d 100644 --- a/svm/src/program_loader.rs +++ b/svm/src/program_loader.rs @@ -11,9 +11,9 @@ use { solana_pubkey::Pubkey, solana_sdk_ids::{bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, loader_v4}, solana_svm_callback::TransactionProcessingCallback, - solana_timings::ExecuteTimings, + solana_svm_timings::ExecuteTimings, + solana_svm_type_overrides::sync::Arc, solana_transaction_error::{TransactionError, TransactionResult}, - solana_type_overrides::sync::Arc, }; #[derive(Debug)] @@ -64,7 +64,7 @@ pub(crate) fn load_program_accounts( callbacks: &CB, pubkey: &Pubkey, ) -> Option { - let program_account = callbacks.get_account_shared_data(pubkey)?; + let (program_account, _slot) = callbacks.get_account_shared_data(pubkey)?; if loader_v4::check_id(program_account.owner()) { return Some( @@ -92,7 +92,9 @@ pub(crate) fn load_program_accounts( programdata_address, }) = program_account.state() { - if let Some(programdata_account) = callbacks.get_account_shared_data(&programdata_address) { + if let Some((programdata_account, _slot)) = + callbacks.get_account_shared_data(&programdata_address) + { if let Ok(UpgradeableLoaderState::ProgramData { slot, upgrade_authority_address: _, @@ -217,7 +219,7 @@ pub(crate) fn get_program_modification_slot( callbacks: &CB, pubkey: &Pubkey, ) -> TransactionResult { - let program = callbacks + let (program, _slot) = callbacks .get_account_shared_data(pubkey) .ok_or(TransactionError::ProgramAccountNotFound)?; if bpf_loader_upgradeable::check_id(program.owner()) { @@ -225,7 +227,7 @@ pub(crate) fn get_program_modification_slot( programdata_address, }) = program.state() { - let programdata = callbacks + let (programdata, _slot) = callbacks .get_account_shared_data(&programdata_address) .ok_or(TransactionError::ProgramAccountNotFound)?; if let Ok(UpgradeableLoaderState::ProgramData { @@ -283,28 +285,11 @@ mod tests { impl InvokeContextCallback for MockBankCallback {} impl TransactionProcessingCallback for MockBankCallback { - fn account_matches_owners(&self, account: &Pubkey, owners: &[Pubkey]) -> Option { - if let Some(data) = self.account_shared_data.borrow().get(account) { - if data.lamports() == 0 { - None - } else { - owners.iter().position(|entry| data.owner() == entry) - } - } else { - None - } - } - - fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option { - self.account_shared_data.borrow().get(pubkey).cloned() - } - - fn add_builtin_account(&self, name: &str, program_id: &Pubkey) { - let mut account_data = AccountSharedData::default(); - account_data.set_data(name.as_bytes().to_vec()); + fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option<(AccountSharedData, Slot)> { self.account_shared_data - .borrow_mut() - .insert(*program_id, account_data); + .borrow() + .get(pubkey) + .map(|account| (account.clone(), 0)) } } @@ -821,9 +806,9 @@ mod tests { let upcoming_environments = ProgramRuntimeEnvironments::default(); let current_environments = { - let mut program_cache = batch_processor.program_cache.write().unwrap(); - program_cache.upcoming_environments = Some(upcoming_environments.clone()); - program_cache.environments.clone() + let mut global_program_cache = batch_processor.global_program_cache.write().unwrap(); + global_program_cache.upcoming_environments = Some(upcoming_environments.clone()); + global_program_cache.environments.clone() }; mock_bank .account_shared_data diff --git a/svm/src/rent_calculator.rs b/svm/src/rent_calculator.rs index 3b64ff75a26912..3720eeab64079e 100644 --- a/svm/src/rent_calculator.rs +++ b/svm/src/rent_calculator.rs @@ -4,12 +4,18 @@ use { solana_account::{AccountSharedData, ReadableAccount}, + solana_clock::Epoch, solana_pubkey::Pubkey, solana_rent::Rent, solana_transaction_context::{IndexOfAccount, TransactionContext}, solana_transaction_error::{TransactionError, TransactionResult}, }; +/// When rent is collected from an exempt account, rent_epoch is set to this +/// value. The idea is to have a fixed, consistent value for rent_epoch for all accounts that do not collect rent. +/// This enables us to get rid of the field completely. +pub const RENT_EXEMPT_RENT_EPOCH: Epoch = Epoch::MAX; + /// Rent state of a Solana account. #[derive(Debug, PartialEq, Eq)] pub enum RentState { @@ -42,10 +48,6 @@ pub fn check_rent_state( transaction_context .get_key_of_account_at_index(index) .expect(expect_msg), - &transaction_context - .accounts() - .try_borrow(index) - .expect(expect_msg), index, )?; } @@ -61,7 +63,6 @@ pub fn check_rent_state_with_account( pre_rent_state: &RentState, post_rent_state: &RentState, address: &Pubkey, - _account_state: &AccountSharedData, account_index: IndexOfAccount, ) -> TransactionResult<()> { if !solana_sdk_ids::incinerator::check_id(address) diff --git a/svm/src/rollback_accounts.rs b/svm/src/rollback_accounts.rs index 2fb2ca3837de15..b55f522caa1f44 100644 --- a/svm/src/rollback_accounts.rs +++ b/svm/src/rollback_accounts.rs @@ -3,7 +3,7 @@ use { solana_account::{AccountSharedData, ReadableAccount, WritableAccount}, solana_clock::Epoch, solana_pubkey::Pubkey, - solana_transaction_context::TransactionAccount, + solana_transaction_context::transaction_accounts::TransactionAccount, }; /// Captured account state used to rollback account state for nonce and fee @@ -99,6 +99,15 @@ impl RollbackAccounts { } } + /// Return a reference to the fee payer account. + pub fn fee_payer(&self) -> &TransactionAccount { + match self { + Self::FeePayerOnly { fee_payer } => fee_payer, + Self::SameNonceAndFeePayer { nonce } => nonce, + Self::SeparateNonceAndFeePayer { fee_payer, .. } => fee_payer, + } + } + /// Number of accounts tracked for rollback pub fn count(&self) -> usize { match self { diff --git a/svm/src/transaction_account_state_info.rs b/svm/src/transaction_account_state_info.rs index 03caffb8f7410f..751d63956ada12 100644 --- a/svm/src/transaction_account_state_info.rs +++ b/svm/src/transaction_account_state_info.rs @@ -70,7 +70,6 @@ impl TransactionAccountStateInfo { mod test { use { super::*, - agave_reserved_account_keys::ReservedAccountKeys, solana_account::AccountSharedData, solana_hash::Hash, solana_keypair::Keypair, @@ -82,6 +81,7 @@ mod test { solana_signer::Signer, solana_transaction_context::TransactionContext, solana_transaction_error::TransactionError, + std::collections::HashSet, }; #[test] @@ -110,10 +110,8 @@ mod test { recent_blockhash: Hash::default(), }; - let sanitized_message = SanitizedMessage::Legacy(LegacyMessage::new( - message, - &ReservedAccountKeys::empty_key_set(), - )); + let sanitized_message = + SanitizedMessage::Legacy(LegacyMessage::new(message, &HashSet::new())); let transaction_accounts = vec![ (key1.pubkey(), AccountSharedData::default()), @@ -164,10 +162,8 @@ mod test { recent_blockhash: Hash::default(), }; - let sanitized_message = SanitizedMessage::Legacy(LegacyMessage::new( - message, - &ReservedAccountKeys::empty_key_set(), - )); + let sanitized_message = + SanitizedMessage::Legacy(LegacyMessage::new(message, &HashSet::new())); let transaction_accounts = vec![ (key1.pubkey(), AccountSharedData::default()), diff --git a/svm/src/transaction_balances.rs b/svm/src/transaction_balances.rs index aa2f1df87e6fac..f767175f1ab3ab 100644 --- a/svm/src/transaction_balances.rs +++ b/svm/src/transaction_balances.rs @@ -163,7 +163,7 @@ impl BalanceCollectionRoutines for Option { // this contains all the information we can provide to construct TransactionTokenBalance // that type, in ledger, depends on UiTokenAmount from account-decoder, so we cannot build it here -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct SvmTokenInfo { pub account_index: u8, pub mint: Pubkey, diff --git a/svm/src/transaction_commit_result.rs b/svm/src/transaction_commit_result.rs index cf7dee4334017b..9116f4b9b8522d 100644 --- a/svm/src/transaction_commit_result.rs +++ b/svm/src/transaction_commit_result.rs @@ -16,6 +16,7 @@ pub struct CommittedTransaction { pub executed_units: u64, pub fee_details: FeeDetails, pub loaded_account_stats: TransactionLoadedAccountsStats, + pub fee_payer_post_balance: u64, } pub trait TransactionCommitResultExtensions { diff --git a/svm/src/transaction_execution_result.rs b/svm/src/transaction_execution_result.rs index ab94eda8331f5a..6a066b49df5a8c 100644 --- a/svm/src/transaction_execution_result.rs +++ b/svm/src/transaction_execution_result.rs @@ -1,11 +1,6 @@ -// Re-exported since these have moved to `solana_message`. -#[deprecated( - since = "1.18.0", - note = "Please use `solana_message::inner_instruction` types instead" -)] -pub use solana_message::inner_instruction::{InnerInstruction, InnerInstructionsList}; use { crate::account_loader::LoadedTransaction, + solana_message::inner_instruction::InnerInstructionsList, solana_program_runtime::loaded_programs::ProgramCacheEntry, solana_pubkey::Pubkey, solana_transaction_context::TransactionReturnData, diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index 4a98648858bfbb..5ffda9cf01908e 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -24,8 +24,6 @@ use { solana_clock::{Epoch, Slot}, solana_hash::Hash, solana_instruction::TRANSACTION_LEVEL_STACK_HEIGHT, - solana_log_collector::LogCollector, - solana_measure::{measure::Measure, measure_us}, solana_message::{ compiled_instruction::CompiledInstruction, inner_instruction::{InnerInstruction, InnerInstructionsList}, @@ -46,16 +44,18 @@ use { }, solana_pubkey::Pubkey, solana_rent::Rent, - solana_sdk_ids::{native_loader, system_program}, + solana_sdk_ids::system_program, solana_svm_callback::TransactionProcessingCallback, solana_svm_feature_set::SVMFeatureSet, + solana_svm_log_collector::LogCollector, + solana_svm_measure::{measure::Measure, measure_us}, + solana_svm_timings::{ExecuteTimingType, ExecuteTimings}, solana_svm_transaction::{svm_message::SVMMessage, svm_transaction::SVMTransaction}, - solana_timings::{ExecuteTimingType, ExecuteTimings}, + solana_svm_type_overrides::sync::{atomic::Ordering, Arc, RwLock, RwLockReadGuard}, solana_transaction_context::{ExecutionRecord, TransactionContext}, solana_transaction_error::{TransactionError, TransactionResult}, - solana_type_overrides::sync::{atomic::Ordering, Arc, RwLock, RwLockReadGuard}, std::{ - collections::{hash_map::Entry, HashMap, HashSet}, + collections::HashSet, fmt::{Debug, Formatter}, rc::Rc, sync::Weak, @@ -157,7 +157,7 @@ pub struct TransactionBatchProcessor { sysvar_cache: RwLock, /// Programs required for transaction batch processing - pub program_cache: Arc>>, + pub global_program_cache: Arc>>, /// Builtin program ids pub builtin_program_ids: RwLock>, @@ -171,7 +171,7 @@ impl Debug for TransactionBatchProcessor { .field("slot", &self.slot) .field("epoch", &self.epoch) .field("sysvar_cache", &self.sysvar_cache) - .field("program_cache", &self.program_cache) + .field("global_program_cache", &self.global_program_cache) .finish() } } @@ -182,7 +182,7 @@ impl Default for TransactionBatchProcessor { slot: Slot::default(), epoch: Epoch::default(), sysvar_cache: RwLock::::default(), - program_cache: Arc::new(RwLock::new(ProgramCache::new( + global_program_cache: Arc::new(RwLock::new(ProgramCache::new( Slot::default(), Epoch::default(), ))), @@ -206,7 +206,7 @@ impl TransactionBatchProcessor { Self { slot, epoch, - program_cache: Arc::new(RwLock::new(ProgramCache::new(slot, epoch))), + global_program_cache: Arc::new(RwLock::new(ProgramCache::new(slot, epoch))), ..Self::default() } } @@ -228,10 +228,10 @@ impl TransactionBatchProcessor { ) -> Self { let processor = Self::new_uninitialized(slot, epoch); { - let mut program_cache = processor.program_cache.write().unwrap(); - program_cache.set_fork_graph(fork_graph); + let mut global_program_cache = processor.global_program_cache.write().unwrap(); + global_program_cache.set_fork_graph(fork_graph); processor.configure_program_runtime_environments_inner( - &mut program_cache, + &mut global_program_cache, program_runtime_environment_v1, program_runtime_environment_v2, ); @@ -250,7 +250,7 @@ impl TransactionBatchProcessor { slot, epoch, sysvar_cache: RwLock::::default(), - program_cache: self.program_cache.clone(), + global_program_cache: self.global_program_cache.clone(), builtin_program_ids: RwLock::new(self.builtin_program_ids.read().unwrap().clone()), execution_cost: self.execution_cost, } @@ -264,17 +264,17 @@ impl TransactionBatchProcessor { fn configure_program_runtime_environments_inner( &self, - program_cache: &mut ProgramCache, + global_program_cache: &mut ProgramCache, program_runtime_environment_v1: Option, program_runtime_environment_v2: Option, ) { let empty_loader = || Arc::new(BuiltinProgram::new_loader(VmConfig::default())); - program_cache.latest_root_slot = self.slot; - program_cache.latest_root_epoch = self.epoch; - program_cache.environments.program_runtime_v1 = + global_program_cache.latest_root_slot = self.slot; + global_program_cache.latest_root_epoch = self.epoch; + global_program_cache.environments.program_runtime_v1 = program_runtime_environment_v1.unwrap_or(empty_loader()); - program_cache.environments.program_runtime_v2 = + global_program_cache.environments.program_runtime_v2 = program_runtime_environment_v2.unwrap_or(empty_loader()); } @@ -286,7 +286,7 @@ impl TransactionBatchProcessor { program_runtime_environment_v2: Option, ) { self.configure_program_runtime_environments_inner( - &mut self.program_cache.write().unwrap(), + &mut self.global_program_cache.write().unwrap(), program_runtime_environment_v1, program_runtime_environment_v2, ); @@ -299,7 +299,7 @@ impl TransactionBatchProcessor { &self, epoch: Epoch, ) -> Option { - self.program_cache + self.global_program_cache .try_read() .ok() .map(|cache| cache.get_environments_for_epoch(epoch)) @@ -333,49 +333,6 @@ impl TransactionBatchProcessor { let mut execute_timings = ExecuteTimings::default(); let mut processing_results = Vec::with_capacity(sanitized_txs.len()); - let native_loader = native_loader::id(); - let (program_accounts_map, filter_executable_us) = measure_us!({ - let mut program_accounts_map = Self::filter_executable_program_accounts( - callbacks, - sanitized_txs, - &check_results, - PROGRAM_OWNERS, - ); - for builtin_program in self.builtin_program_ids.read().unwrap().iter() { - program_accounts_map.insert(*builtin_program, (&native_loader, 0)); - } - program_accounts_map - }); - execute_timings - .saturating_add_in_place(ExecuteTimingType::FilterExecutableUs, filter_executable_us); - - let (mut program_cache_for_tx_batch, program_cache_us) = measure_us!({ - let program_cache_for_tx_batch = self.replenish_program_cache( - callbacks, - &program_accounts_map, - &mut execute_timings, - config.check_program_modification_slot, - config.limit_to_load_programs, - ); - - if program_cache_for_tx_batch.hit_max_limit { - return LoadAndExecuteSanitizedTransactionsOutput { - error_metrics, - execute_timings, - processing_results: (0..sanitized_txs.len()) - .map(|_| Err(TransactionError::ProgramCacheHitMaxLimit)) - .collect(), - // If we abort the batch and balance recording is enabled, no balances should be - // collected. If this is a leader thread, no batch will be committed. - balance_collector: None, - }; - } - - program_cache_for_tx_batch - }); - execute_timings - .saturating_add_in_place(ExecuteTimingType::ProgramCacheUs, program_cache_us); - // Determine a capacity for the internal account cache. This // over-allocates but avoids ever reallocating, and spares us from // deduplicating the account keys lists. @@ -395,6 +352,48 @@ impl TransactionBatchProcessor { .enable_transaction_balance_recording .then(|| BalanceCollector::new_with_transaction_count(sanitized_txs.len())); + // Create the batch-local program cache. + let mut program_cache_for_tx_batch = { + let global_program_cache = self.global_program_cache.read().unwrap(); + let mut program_cache_for_tx_batch = ProgramCacheForTxBatch::new_from_cache( + self.slot, + self.epoch, + &global_program_cache, + ); + drop(global_program_cache); + + let builtins = self.builtin_program_ids.read().unwrap().clone(); + + let ((), program_cache_us) = measure_us!({ + self.replenish_program_cache( + &account_loader, + &builtins, + &mut program_cache_for_tx_batch, + &mut execute_timings, + config.check_program_modification_slot, + config.limit_to_load_programs, + false, // increment_usage_counter + ); + }); + execute_timings + .saturating_add_in_place(ExecuteTimingType::ProgramCacheUs, program_cache_us); + + program_cache_for_tx_batch + }; + + if program_cache_for_tx_batch.hit_max_limit { + return LoadAndExecuteSanitizedTransactionsOutput { + error_metrics, + execute_timings, + processing_results: (0..sanitized_txs.len()) + .map(|_| Err(TransactionError::ProgramCacheHitMaxLimit)) + .collect(), + // If we abort the batch and balance recording is enabled, no balances should be + // collected. If this is a leader thread, no batch will be committed. + balance_collector: None, + }; + } + let (mut load_us, mut execution_us): (u64, u64) = (0, 0); // Validate, execute, and collect results from each transaction in order. @@ -439,6 +438,46 @@ impl TransactionBatchProcessor { Ok(ProcessedTransaction::FeesOnly(Box::new(fees_only_tx))) } TransactionLoadResult::Loaded(loaded_transaction) => { + let (program_accounts_set, filter_executable_us) = measure_us!(self + .filter_executable_program_accounts( + &account_loader, + &mut program_cache_for_tx_batch, + tx, + )); + execute_timings.saturating_add_in_place( + ExecuteTimingType::FilterExecutableUs, + filter_executable_us, + ); + + let ((), program_cache_us) = measure_us!({ + self.replenish_program_cache( + &account_loader, + &program_accounts_set, + &mut program_cache_for_tx_batch, + &mut execute_timings, + config.check_program_modification_slot, + config.limit_to_load_programs, + true, // increment_usage_counter + ); + }); + execute_timings.saturating_add_in_place( + ExecuteTimingType::ProgramCacheUs, + program_cache_us, + ); + + if program_cache_for_tx_batch.hit_max_limit { + return LoadAndExecuteSanitizedTransactionsOutput { + error_metrics, + execute_timings, + processing_results: (0..sanitized_txs.len()) + .map(|_| Err(TransactionError::ProgramCacheHitMaxLimit)) + .collect(), + // If we abort the batch and balance recording is enabled, no balances should be + // collected. If this is a leader thread, no batch will be committed. + balance_collector: None, + }; + } + let executed_tx = self.execute_loaded_transaction( callbacks, tx, @@ -454,6 +493,7 @@ impl TransactionBatchProcessor { // Also update local program cache with modifications made by the transaction, // if it executed successfully. account_loader.update_accounts_for_executed_tx(tx, &executed_tx); + if executed_tx.was_successful() { program_cache_for_tx_batch.merge(&executed_tx.programs_modified_by_tx); } @@ -477,7 +517,7 @@ impl TransactionBatchProcessor { // occurrences of cooperative loading. if program_cache_for_tx_batch.loaded_missing || program_cache_for_tx_batch.merged_modified { const SHRINK_LOADED_PROGRAMS_TO_PERCENTAGE: u8 = 90; - self.program_cache + self.global_program_cache .write() .unwrap() .evict_using_2s_random_selection( @@ -663,122 +703,105 @@ impl TransactionBatchProcessor { } } - /// Returns a map from executable program accounts (all accounts owned by any loader) - /// to their usage counters, for the transactions with a valid blockhash or nonce. - fn filter_executable_program_accounts<'a, CB: TransactionProcessingCallback>( - callbacks: &CB, - txs: &[impl SVMMessage], - check_results: &[TransactionCheckResult], - program_owners: &'a [Pubkey], - ) -> HashMap { - let mut result: HashMap = HashMap::new(); - check_results.iter().zip(txs).for_each(|etx| { - if let (Ok(_), tx) = etx { - tx.account_keys() - .iter() - .for_each(|key| match result.entry(*key) { - Entry::Occupied(mut entry) => { - let (_, count) = entry.get_mut(); - *count = count.saturating_add(1); - } - Entry::Vacant(entry) => { - if let Some(index) = - callbacks.account_matches_owners(key, program_owners) - { - if let Some(owner) = program_owners.get(index) { - entry.insert((owner, 1)); - } - } - } - }); + /// Appends to a set of executable program accounts (all accounts owned by any loader) + /// for transactions with a valid blockhash or nonce. + fn filter_executable_program_accounts( + &self, + account_loader: &AccountLoader, + program_cache_for_tx_batch: &mut ProgramCacheForTxBatch, + tx: &impl SVMMessage, + ) -> HashSet { + let mut program_accounts_set = HashSet::default(); + for account_key in tx.account_keys().iter() { + if let Some(cache_entry) = program_cache_for_tx_batch.find(account_key) { + cache_entry.tx_usage_counter.fetch_add(1, Ordering::Relaxed); + } else if account_loader + .get_account_shared_data(account_key) + .map(|(account, _slot)| PROGRAM_OWNERS.contains(account.owner())) + .unwrap_or(false) + { + program_accounts_set.insert(*account_key); } - }); - result + } + program_accounts_set } #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] fn replenish_program_cache( &self, - callback: &CB, - program_accounts_map: &HashMap, + account_loader: &AccountLoader, + program_accounts_set: &HashSet, + program_cache_for_tx_batch: &mut ProgramCacheForTxBatch, execute_timings: &mut ExecuteTimings, check_program_modification_slot: bool, limit_to_load_programs: bool, - ) -> ProgramCacheForTxBatch { - let mut missing_programs: Vec<(Pubkey, (ProgramCacheMatchCriteria, u64))> = - program_accounts_map - .iter() - .map(|(pubkey, (_, count))| { - let match_criteria = if check_program_modification_slot { - get_program_modification_slot(callback, pubkey) - .map_or(ProgramCacheMatchCriteria::Tombstone, |slot| { - ProgramCacheMatchCriteria::DeployedOnOrAfterSlot(slot) - }) - } else { - ProgramCacheMatchCriteria::NoCriteria - }; - (*pubkey, (match_criteria, *count)) - }) - .collect(); + increment_usage_counter: bool, + ) { + let mut missing_programs: Vec<(Pubkey, ProgramCacheMatchCriteria)> = program_accounts_set + .iter() + .map(|pubkey| { + let match_criteria = if check_program_modification_slot { + get_program_modification_slot(account_loader, pubkey) + .map_or(ProgramCacheMatchCriteria::Tombstone, |slot| { + ProgramCacheMatchCriteria::DeployedOnOrAfterSlot(slot) + }) + } else { + ProgramCacheMatchCriteria::NoCriteria + }; + (*pubkey, match_criteria) + }) + .collect(); - let mut loaded_programs_for_txs: Option = None; + let mut count_hits_and_misses = true; loop { let (program_to_store, task_cookie, task_waiter) = { // Lock the global cache. - let program_cache = self.program_cache.read().unwrap(); - // Initialize our local cache. - let is_first_round = loaded_programs_for_txs.is_none(); - if is_first_round { - loaded_programs_for_txs = Some(ProgramCacheForTxBatch::new_from_cache( - self.slot, - self.epoch, - &program_cache, - )); - } + let global_program_cache = self.global_program_cache.read().unwrap(); // Figure out which program needs to be loaded next. - let program_to_load = program_cache.extract( + let program_to_load = global_program_cache.extract( &mut missing_programs, - loaded_programs_for_txs.as_mut().unwrap(), - is_first_round, + program_cache_for_tx_batch, + increment_usage_counter, + count_hits_and_misses, ); + count_hits_and_misses = false; - let program_to_store = program_to_load.map(|(key, count)| { + let program_to_store = program_to_load.map(|key| { // Load, verify and compile one program. let program = load_program_with_pubkey( - callback, - &program_cache.get_environments_for_epoch(self.epoch), + account_loader, + &global_program_cache.get_environments_for_epoch(self.epoch), &key, self.slot, execute_timings, false, ) .expect("called load_program_with_pubkey() with nonexistent account"); - program.tx_usage_counter.store(count, Ordering::Relaxed); (key, program) }); - let task_waiter = Arc::clone(&program_cache.loading_task_waiter); + let task_waiter = Arc::clone(&global_program_cache.loading_task_waiter); (program_to_store, task_waiter.cookie(), task_waiter) // Unlock the global cache again. }; if let Some((key, program)) = program_to_store { - loaded_programs_for_txs.as_mut().unwrap().loaded_missing = true; - let mut program_cache = self.program_cache.write().unwrap(); + program_cache_for_tx_batch.loaded_missing = true; + let mut global_program_cache = self.global_program_cache.write().unwrap(); // Submit our last completed loading task. - if program_cache.finish_cooperative_loading_task(self.slot, key, program) + if global_program_cache.finish_cooperative_loading_task(self.slot, key, program) && limit_to_load_programs { // This branch is taken when there is an error in assigning a program to a // cache slot. It is not possible to mock this error for SVM unit // tests purposes. - let mut ret = ProgramCacheForTxBatch::new_from_cache( + *program_cache_for_tx_batch = ProgramCacheForTxBatch::new_from_cache( self.slot, self.epoch, - &program_cache, + &global_program_cache, ); - ret.hit_max_limit = true; - return ret; + program_cache_for_tx_batch.hit_max_limit = true; + return; } } else if missing_programs.is_empty() { break; @@ -789,8 +812,6 @@ impl TransactionBatchProcessor { let _new_cookie = task_waiter.wait(task_cookie); } } - - loaded_programs_for_txs.unwrap() } /// Execute a transaction using the provided loaded accounts and update @@ -833,11 +854,6 @@ impl TransactionBatchProcessor { compute_budget.max_instruction_stack_depth, compute_budget.max_instruction_trace_length, ); - transaction_context.set_remove_accounts_executable_flag_checks( - environment - .feature_set - .remove_accounts_executable_flag_checks, - ); let pre_account_state_info = TransactionAccountStateInfo::new(&transaction_context, tx, &environment.rent); @@ -990,11 +1006,7 @@ impl TransactionBatchProcessor { let stack_height = u8::try_from(stack_height).unwrap_or(u8::MAX); let instruction = CompiledInstruction::new_from_raw_parts( instruction_context - .get_index_of_program_account_in_transaction( - instruction_context - .get_number_of_program_accounts() - .saturating_sub(1), - ) + .get_index_of_program_account_in_transaction() .unwrap_or_default() as u8, instruction_context.get_instruction_data().to_vec(), (0..instruction_context.get_number_of_instruction_accounts()) @@ -1027,7 +1039,7 @@ impl TransactionBatchProcessor { ) { let mut sysvar_cache = self.sysvar_cache.write().unwrap(); sysvar_cache.fill_missing_entries(|pubkey, set_sysvar| { - if let Some(account) = callbacks.get_account_shared_data(pubkey) { + if let Some((account, _slot)) = callbacks.get_account_shared_data(pubkey) { set_sysvar(account.data()); } }); @@ -1043,21 +1055,12 @@ impl TransactionBatchProcessor { } /// Add a built-in program - pub fn add_builtin( - &self, - callbacks: &CB, - program_id: Pubkey, - name: &str, - builtin: ProgramCacheEntry, - ) { - debug!("Adding program {name} under {program_id:?}"); - callbacks.add_builtin_account(name, &program_id); + pub fn add_builtin(&self, program_id: Pubkey, builtin: ProgramCacheEntry) { self.builtin_program_ids.write().unwrap().insert(program_id); - self.program_cache + self.global_program_cache .write() .unwrap() .assign_program(program_id, Arc::new(builtin)); - debug!("Added program {name} under {program_id:?}"); } #[cfg(feature = "dev-context-only-utils")] @@ -1079,9 +1082,9 @@ mod tests { TRANSACTION_ACCOUNT_BASE_SIZE, }, nonce_info::NonceInfo, + rent_calculator::RENT_EXEMPT_RENT_EPOCH, rollback_accounts::RollbackAccounts, }, - agave_reserved_account_keys::ReservedAccountKeys, solana_account::{create_account_shared_data_for_test, WritableAccount}, solana_clock::Clock, solana_compute_budget_interface::ComputeBudgetInstruction, @@ -1099,21 +1102,18 @@ mod tests { loaded_programs::{BlockRelation, ProgramCacheEntryType}, }, solana_rent::Rent, - solana_rent_collector::RENT_EXEMPT_RENT_EPOCH, - solana_sdk_ids::{bpf_loader, system_program, sysvar}, + solana_sdk_ids::{bpf_loader, loader_v4, system_program, sysvar}, solana_signature::Signature, solana_svm_callback::{AccountState, InvokeContextCallback}, solana_transaction::{sanitized::SanitizedTransaction, Transaction}, solana_transaction_context::TransactionContext, solana_transaction_error::{TransactionError, TransactionError::DuplicateInstruction}, + std::collections::HashMap, test_case::test_case, }; fn new_unchecked_sanitized_message(message: Message) -> SanitizedMessage { - SanitizedMessage::Legacy(LegacyMessage::new( - message, - &ReservedAccountKeys::empty_key_set(), - )) + SanitizedMessage::Legacy(LegacyMessage::new(message, &HashSet::new())) } struct TestForkGraph {} @@ -1146,33 +1146,12 @@ mod tests { impl InvokeContextCallback for MockBankCallback {} impl TransactionProcessingCallback for MockBankCallback { - fn account_matches_owners(&self, account: &Pubkey, owners: &[Pubkey]) -> Option { - if let Some(data) = self.account_shared_data.read().unwrap().get(account) { - if data.lamports() == 0 { - None - } else { - owners.iter().position(|entry| data.owner() == entry) - } - } else { - None - } - } - - fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option { + fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option<(AccountSharedData, Slot)> { self.account_shared_data .read() .unwrap() .get(pubkey) - .cloned() - } - - fn add_builtin_account(&self, name: &str, program_id: &Pubkey) { - let mut account_data = AccountSharedData::default(); - account_data.set_data(name.as_bytes().to_vec()); - self.account_shared_data - .write() - .unwrap() - .insert(*program_id, account_data); + .map(|account| (account.clone(), 0)) } fn inspect_account( @@ -1269,17 +1248,23 @@ mod tests { #[test] fn test_inner_instructions_list_from_instruction_trace() { let instruction_trace = [1, 2, 1, 1, 2, 3, 2]; - let mut transaction_context = - TransactionContext::new(vec![], Rent::default(), 3, instruction_trace.len()); + let mut transaction_context = TransactionContext::new( + vec![( + Pubkey::new_unique(), + AccountSharedData::new(1, 1, &bpf_loader::ID), + )], + Rent::default(), + 3, + instruction_trace.len(), + ); for (index_in_trace, stack_height) in instruction_trace.into_iter().enumerate() { - while stack_height <= transaction_context.get_instruction_context_stack_height() { + while stack_height <= transaction_context.get_instruction_stack_height() { transaction_context.pop().unwrap(); } - if stack_height > transaction_context.get_instruction_context_stack_height() { + if stack_height > transaction_context.get_instruction_stack_height() { transaction_context - .get_next_instruction_context_mut() - .unwrap() - .configure(vec![], vec![], &[index_in_trace as u8]); + .configure_next_instruction_for_tests(0, vec![], &[index_in_trace as u8]) + .unwrap(); transaction_context.push().unwrap(); } } @@ -1342,7 +1327,7 @@ mod tests { let loaded_transaction = LoadedTransaction { accounts: vec![(Pubkey::new_unique(), AccountSharedData::default())], - program_indices: vec![vec![0]], + program_indices: vec![0], fee_details: FeeDetails::default(), rollback_accounts: RollbackAccounts::default(), compute_budget: SVMTransactionExecutionBudget::default(), @@ -1437,7 +1422,7 @@ mod tests { (key1, AccountSharedData::default()), (key2, AccountSharedData::default()), ], - program_indices: vec![vec![0]], + program_indices: vec![0], fee_details: FeeDetails::default(), rollback_accounts: RollbackAccounts::default(), compute_budget: SVMTransactionExecutionBudget::default(), @@ -1469,21 +1454,32 @@ mod tests { #[should_panic = "called load_program_with_pubkey() with nonexistent account"] fn test_replenish_program_cache_with_nonexistent_accounts() { let mock_bank = MockBankCallback::default(); + let account_loader = (&mock_bank).into(); let fork_graph = Arc::new(RwLock::new(TestForkGraph {})); let batch_processor = TransactionBatchProcessor::new(0, 0, Arc::downgrade(&fork_graph), None, None); let key = Pubkey::new_unique(); - let owner = Pubkey::new_unique(); - let mut account_maps: HashMap = HashMap::new(); - account_maps.insert(key, (&owner, 4)); + let mut account_set = HashSet::new(); + account_set.insert(key); + + let mut program_cache_for_tx_batch = { + let global_program_cache = batch_processor.global_program_cache.read().unwrap(); + ProgramCacheForTxBatch::new_from_cache( + batch_processor.slot, + batch_processor.epoch, + &global_program_cache, + ) + }; batch_processor.replenish_program_cache( - &mock_bank, - &account_maps, + &account_loader, + &account_set, + &mut program_cache_for_tx_batch, &mut ExecuteTimings::default(), false, true, + true, ); } @@ -1494,7 +1490,6 @@ mod tests { let batch_processor = TransactionBatchProcessor::new(0, 0, Arc::downgrade(&fork_graph), None, None); let key = Pubkey::new_unique(); - let owner = Pubkey::new_unique(); let mut account_data = AccountSharedData::default(); account_data.set_owner(bpf_loader::id()); @@ -1503,25 +1498,37 @@ mod tests { .write() .unwrap() .insert(key, account_data); + let account_loader = (&mock_bank).into(); - let mut account_maps: HashMap = HashMap::new(); - account_maps.insert(key, (&owner, 4)); + let mut account_set = HashSet::new(); + account_set.insert(key); let mut loaded_missing = 0; for limit_to_load_programs in [false, true] { - let result = batch_processor.replenish_program_cache( - &mock_bank, - &account_maps, + let mut program_cache_for_tx_batch = { + let global_program_cache = batch_processor.global_program_cache.read().unwrap(); + ProgramCacheForTxBatch::new_from_cache( + batch_processor.slot, + batch_processor.epoch, + &global_program_cache, + ) + }; + + batch_processor.replenish_program_cache( + &account_loader, + &account_set, + &mut program_cache_for_tx_batch, &mut ExecuteTimings::default(), false, limit_to_load_programs, + true, ); - assert!(!result.hit_max_limit); - if result.loaded_missing { + assert!(!program_cache_for_tx_batch.hit_max_limit); + if program_cache_for_tx_batch.loaded_missing { loaded_missing += 1; } - let program = result.find(&key).unwrap(); + let program = program_cache_for_tx_batch.find(&key).unwrap(); assert!(matches!( program.program, ProgramCacheEntryType::FailedVerification(_) @@ -1534,47 +1541,28 @@ mod tests { fn test_filter_executable_program_accounts() { let mock_bank = MockBankCallback::default(); let key1 = Pubkey::new_unique(); - let owner1 = Pubkey::new_unique(); + let owner1 = bpf_loader::id(); + let key2 = Pubkey::new_unique(); + let owner2 = loader_v4::id(); - let mut data = AccountSharedData::default(); - data.set_owner(owner1); - data.set_lamports(93); + let mut data1 = AccountSharedData::default(); + data1.set_owner(owner1); + data1.set_lamports(93); mock_bank .account_shared_data .write() .unwrap() - .insert(key1, data); - - let message = Message { - account_keys: vec![key1], - header: MessageHeader::default(), - instructions: vec![CompiledInstruction { - program_id_index: 0, - accounts: vec![], - data: vec![], - }], - recent_blockhash: Hash::default(), - }; - - let sanitized_message = new_unchecked_sanitized_message(message); - - let sanitized_transaction_1 = SanitizedTransaction::new_for_tests( - sanitized_message, - vec![Signature::new_unique()], - false, - ); + .insert(key1, data1); - let key2 = Pubkey::new_unique(); - let owner2 = Pubkey::new_unique(); - - let mut account_data = AccountSharedData::default(); - account_data.set_owner(owner2); - account_data.set_lamports(90); + let mut data2 = AccountSharedData::default(); + data2.set_owner(owner2); + data2.set_lamports(90); mock_bank .account_shared_data .write() .unwrap() - .insert(key2, account_data); + .insert(key2, data2); + let account_loader = (&mock_bank).into(); let message = Message { account_keys: vec![key1, key2], @@ -1589,34 +1577,22 @@ mod tests { let sanitized_message = new_unchecked_sanitized_message(message); - let sanitized_transaction_2 = SanitizedTransaction::new_for_tests( + let sanitized_transaction = SanitizedTransaction::new_for_tests( sanitized_message, vec![Signature::new_unique()], false, ); - let transactions = vec![ - sanitized_transaction_1.clone(), - sanitized_transaction_2.clone(), - sanitized_transaction_1, - ]; - let check_results = vec![ - Ok(CheckedTransactionDetails::default()), - Ok(CheckedTransactionDetails::default()), - Err(TransactionError::ProgramAccountNotFound), - ]; - let owners = vec![owner1, owner2]; - - let result = TransactionBatchProcessor::::filter_executable_program_accounts( - &mock_bank, - &transactions, - &check_results, - &owners, + let batch_processor = TransactionBatchProcessor::::default(); + let program_accounts_set = batch_processor.filter_executable_program_accounts( + &account_loader, + &mut ProgramCacheForTxBatch::default(), + &sanitized_transaction, ); - assert_eq!(result.len(), 2); - assert_eq!(result[&key1], (&owner1, 2)); - assert_eq!(result[&key2], (&owner2, 1)); + assert_eq!(program_accounts_set.len(), 2); + assert!(program_accounts_set.contains(&key1)); + assert!(program_accounts_set.contains(&key2)); } #[test] @@ -1626,8 +1602,8 @@ mod tests { let non_program_pubkey1 = Pubkey::new_unique(); let non_program_pubkey2 = Pubkey::new_unique(); - let program1_pubkey = Pubkey::new_unique(); - let program2_pubkey = Pubkey::new_unique(); + let program1_pubkey = bpf_loader::id(); + let program2_pubkey = loader_v4::id(); let account1_pubkey = Pubkey::new_unique(); let account2_pubkey = Pubkey::new_unique(); let account3_pubkey = Pubkey::new_unique(); @@ -1668,6 +1644,7 @@ mod tests { account4_pubkey, AccountSharedData::new(40, 1, &program2_pubkey), ); + let account_loader = (&bank).into(); let tx1 = Transaction::new_with_compiled_instructions( &[&keypair1], @@ -1687,123 +1664,34 @@ mod tests { ); let sanitized_tx2 = SanitizedTransaction::from_transaction_for_tests(tx2); - let owners = &[program1_pubkey, program2_pubkey]; - let programs = - TransactionBatchProcessor::::filter_executable_program_accounts( - &bank, - &[sanitized_tx1, sanitized_tx2], - &[ - Ok(CheckedTransactionDetails::default()), - Ok(CheckedTransactionDetails::default()), - ], - owners, - ); + let batch_processor = TransactionBatchProcessor::::default(); - // The result should contain only account3_pubkey, and account4_pubkey as the program accounts - assert_eq!(programs.len(), 2); - assert_eq!( - programs - .get(&account3_pubkey) - .expect("failed to find the program account"), - &(&program1_pubkey, 2) - ); - assert_eq!( - programs - .get(&account4_pubkey) - .expect("failed to find the program account"), - &(&program2_pubkey, 1) + let tx1_programs = batch_processor.filter_executable_program_accounts( + &account_loader, + &mut ProgramCacheForTxBatch::default(), + &sanitized_tx1, ); - } - - #[test] - fn test_filter_executable_program_accounts_invalid_blockhash() { - let keypair1 = Keypair::new(); - let keypair2 = Keypair::new(); - let non_program_pubkey1 = Pubkey::new_unique(); - let non_program_pubkey2 = Pubkey::new_unique(); - let program1_pubkey = Pubkey::new_unique(); - let program2_pubkey = Pubkey::new_unique(); - let account1_pubkey = Pubkey::new_unique(); - let account2_pubkey = Pubkey::new_unique(); - let account3_pubkey = Pubkey::new_unique(); - let account4_pubkey = Pubkey::new_unique(); - - let account5_pubkey = Pubkey::new_unique(); - - let bank = MockBankCallback::default(); - bank.account_shared_data.write().unwrap().insert( - non_program_pubkey1, - AccountSharedData::new(1, 10, &account5_pubkey), - ); - bank.account_shared_data.write().unwrap().insert( - non_program_pubkey2, - AccountSharedData::new(1, 10, &account5_pubkey), - ); - bank.account_shared_data.write().unwrap().insert( - program1_pubkey, - AccountSharedData::new(40, 1, &account5_pubkey), - ); - bank.account_shared_data.write().unwrap().insert( - program2_pubkey, - AccountSharedData::new(40, 1, &account5_pubkey), - ); - bank.account_shared_data.write().unwrap().insert( - account1_pubkey, - AccountSharedData::new(1, 10, &non_program_pubkey1), - ); - bank.account_shared_data.write().unwrap().insert( - account2_pubkey, - AccountSharedData::new(1, 10, &non_program_pubkey2), - ); - bank.account_shared_data.write().unwrap().insert( - account3_pubkey, - AccountSharedData::new(40, 1, &program1_pubkey), - ); - bank.account_shared_data.write().unwrap().insert( - account4_pubkey, - AccountSharedData::new(40, 1, &program2_pubkey), + assert_eq!(tx1_programs.len(), 1); + assert!( + tx1_programs.contains(&account3_pubkey), + "failed to find the program account", ); - let tx1 = Transaction::new_with_compiled_instructions( - &[&keypair1], - &[non_program_pubkey1], - Hash::new_unique(), - vec![account1_pubkey, account2_pubkey, account3_pubkey], - vec![CompiledInstruction::new(1, &(), vec![0])], + let tx2_programs = batch_processor.filter_executable_program_accounts( + &account_loader, + &mut ProgramCacheForTxBatch::default(), + &sanitized_tx2, ); - let sanitized_tx1 = SanitizedTransaction::from_transaction_for_tests(tx1); - let tx2 = Transaction::new_with_compiled_instructions( - &[&keypair2], - &[non_program_pubkey2], - Hash::new_unique(), - vec![account4_pubkey, account3_pubkey, account2_pubkey], - vec![CompiledInstruction::new(1, &(), vec![0])], + assert_eq!(tx2_programs.len(), 2); + assert!( + tx2_programs.contains(&account3_pubkey), + "failed to find the program account", ); - // Let's not register blockhash from tx2. This should cause the tx2 to fail - let sanitized_tx2 = SanitizedTransaction::from_transaction_for_tests(tx2); - - let owners = &[program1_pubkey, program2_pubkey]; - let check_results = vec![ - Ok(CheckedTransactionDetails::default()), - Err(TransactionError::BlockhashNotFound), - ]; - let programs = - TransactionBatchProcessor::::filter_executable_program_accounts( - &bank, - &[sanitized_tx1, sanitized_tx2], - &check_results, - owners, - ); - - // The result should contain only account3_pubkey as the program accounts - assert_eq!(programs.len(), 1); - assert_eq!( - programs - .get(&account3_pubkey) - .expect("failed to find the program account"), - &(&program1_pubkey, 1) + assert!( + tx2_programs.contains(&account4_pubkey), + "failed to find the program account", ); } @@ -1978,7 +1866,6 @@ mod tests { #[test] fn test_add_builtin() { - let mock_bank = MockBankCallback::default(); let fork_graph = Arc::new(RwLock::new(TestForkGraph {})); let batch_processor = TransactionBatchProcessor::new(0, 0, Arc::downgrade(&fork_graph), None, None); @@ -1991,23 +1878,23 @@ mod tests { |_invoke_context, _param0, _param1, _param2, _param3, _param4| {}, ); - batch_processor.add_builtin(&mock_bank, key, name, program); - - assert_eq!( - mock_bank.account_shared_data.read().unwrap()[&key].data(), - name.as_bytes() - ); + batch_processor.add_builtin(key, program); let mut loaded_programs_for_tx_batch = ProgramCacheForTxBatch::new_from_cache( 0, 0, - &batch_processor.program_cache.read().unwrap(), - ); - batch_processor.program_cache.write().unwrap().extract( - &mut vec![(key, (ProgramCacheMatchCriteria::NoCriteria, 1))], - &mut loaded_programs_for_tx_batch, - true, + &batch_processor.global_program_cache.read().unwrap(), ); + batch_processor + .global_program_cache + .write() + .unwrap() + .extract( + &mut vec![(key, ProgramCacheMatchCriteria::NoCriteria)], + &mut loaded_programs_for_tx_batch, + true, + true, + ); let entry = loaded_programs_for_tx_batch.find(&key).unwrap(); // Repeating code because ProgramCacheEntry does not implement clone. diff --git a/svm/tests/concurrent_tests.rs b/svm/tests/concurrent_tests.rs index af646cf8ac1fd5..619b76c7946c98 100644 --- a/svm/tests/concurrent_tests.rs +++ b/svm/tests/concurrent_tests.rs @@ -1,10 +1,7 @@ #![cfg(feature = "shuttle-test")] use { - crate::{ - mock_bank::{create_custom_loader, deploy_program, register_builtins, MockForkGraph}, - transaction_builder::SanitizedTransactionBuilder, - }, + crate::mock_bank::{create_custom_loader, deploy_program, register_builtins, MockForkGraph}, assert_matches::assert_matches, mock_bank::MockBankCallback, shuttle::{ @@ -12,17 +9,14 @@ use { thread, Runner, }, solana_account::{AccountSharedData, ReadableAccount, WritableAccount}, - solana_hash::Hash, - solana_instruction::AccountMeta, + solana_instruction::{AccountMeta, Instruction}, solana_program_runtime::{ execution_budget::SVMTransactionExecutionAndFeeBudgetLimits, - loaded_programs::ProgramCacheEntryType, + loaded_programs::{ProgramCacheEntryType, ProgramCacheForTxBatch}, }, solana_pubkey::Pubkey, - solana_sdk_ids::bpf_loader_upgradeable, - solana_signature::Signature, solana_svm::{ - account_loader::{CheckedTransactionDetails, TransactionCheckResult}, + account_loader::{AccountLoader, CheckedTransactionDetails, TransactionCheckResult}, transaction_processing_result::{ ProcessedTransaction, TransactionProcessingResultExtensions, }, @@ -31,13 +25,15 @@ use { TransactionProcessingEnvironment, }, }, - solana_timings::ExecuteTimings, - std::collections::HashMap, + solana_svm_feature_set::SVMFeatureSet, + solana_svm_timings::ExecuteTimings, + solana_transaction::{sanitized::SanitizedTransaction, Transaction}, + std::collections::HashSet, }; mod mock_bank; -mod transaction_builder; +const MAX_ITERATIONS: usize = 10_000; fn program_cache_execution(threads: usize) { let mut mock_bank = MockBankCallback::default(); @@ -45,18 +41,13 @@ fn program_cache_execution(threads: usize) { let batch_processor = TransactionBatchProcessor::new(5, 5, Arc::downgrade(&fork_graph), None, None); - const LOADER: Pubkey = bpf_loader_upgradeable::id(); let programs = vec![ deploy_program("hello-solana".to_string(), 0, &mut mock_bank), deploy_program("simple-transfer".to_string(), 0, &mut mock_bank), deploy_program("clock-sysvar".to_string(), 0, &mut mock_bank), ]; - let account_maps: HashMap = programs - .iter() - .enumerate() - .map(|(idx, key)| (*key, (&LOADER, idx as u64))) - .collect(); + let account_maps: HashSet = programs.iter().copied().collect(); let ths: Vec<_> = (0..threads) .map(|_| { @@ -69,12 +60,29 @@ fn program_cache_execution(threads: usize) { let maps = account_maps.clone(); let programs = programs.clone(); thread::spawn(move || { - let result = processor.replenish_program_cache( + let feature_set = SVMFeatureSet::all_enabled(); + let account_loader = AccountLoader::new_with_loaded_accounts_capacity( + None, &local_bank, + &feature_set, + 0, + ); + let mut result = { + let global_program_cache = processor.global_program_cache.read().unwrap(); + ProgramCacheForTxBatch::new_from_cache( + processor.slot, + processor.epoch, + &global_program_cache, + ) + }; + processor.replenish_program_cache( + &account_loader, &maps, + &mut result, &mut ExecuteTimings::default(), false, true, + true, ); for key in &programs { let cache_entry = result.find(key); @@ -105,7 +113,7 @@ fn test_program_cache_with_probabilistic_scheduler() { move || { program_cache_execution(4); }, - 300, + MAX_ITERATIONS, 5, ); } @@ -113,7 +121,7 @@ fn test_program_cache_with_probabilistic_scheduler() { // In this case, the scheduler is random and may preempt threads at any point and any time. #[test] fn test_program_cache_with_random_scheduler() { - shuttle::check_random(move || program_cache_execution(4), 300); + shuttle::check_random(move || program_cache_execution(4), MAX_ITERATIONS); } // This test explores all the possible thread scheduling patterns that might affect the program @@ -124,7 +132,7 @@ fn test_program_cache_with_exhaustive_scheduler() { // values in a thread. // Since this is not the case for the execution of jitted program, we can still run the test // but with decreased accuracy. - let scheduler = shuttle::scheduler::DfsScheduler::new(Some(500), true); + let scheduler = shuttle::scheduler::DfsScheduler::new(Some(MAX_ITERATIONS), true); let runner = Runner::new(scheduler, Default::default()); runner.run(move || program_cache_execution(4)); } @@ -147,7 +155,6 @@ fn svm_concurrent() { batch_processor.fill_missing_sysvar_cache_entries(&*mock_bank); register_builtins(&mock_bank, &batch_processor, false); - let mut transaction_builder = SanitizedTransactionBuilder::default(); let program_id = deploy_program("transfer-from-account".to_string(), 0, &mock_bank); const THREADS: usize = 4; @@ -192,40 +199,34 @@ fn svm_concurrent() { shared_data.insert(fee_payer, account_data); } - transaction_builder.create_instruction( - program_id, - vec![ - AccountMeta { - pubkey: sender, - is_signer: true, - is_writable: true, - }, - AccountMeta { - pubkey: recipient, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: read_account, - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: system_account, - is_signer: false, - is_writable: false, - }, - ], - HashMap::from([(sender, Signature::new_unique())]), - vec![0], - ); + let accounts = vec![ + AccountMeta { + pubkey: sender, + is_signer: true, + is_writable: true, + }, + AccountMeta { + pubkey: recipient, + is_signer: false, + is_writable: true, + }, + AccountMeta { + pubkey: read_account, + is_signer: false, + is_writable: false, + }, + AccountMeta { + pubkey: system_account, + is_signer: false, + is_writable: false, + }, + ]; + + let instruction = Instruction::new_with_bytes(program_id, &[0], accounts); + let legacy_transaction = Transaction::new_with_payer(&[instruction], Some(&fee_payer)); - let sanitized_transaction = transaction_builder.build( - Hash::default(), - (fee_payer, Signature::new_unique()), - true, - false, - ); + let sanitized_transaction = + SanitizedTransaction::try_from_legacy_transaction(legacy_transaction, &HashSet::new()); transactions[idx % THREADS].push(sanitized_transaction.unwrap()); check_data[idx % THREADS].push(CheckTxData { fee_payer, @@ -301,7 +302,7 @@ fn test_svm_with_probabilistic_scheduler() { move || { svm_concurrent(); }, - 300, + MAX_ITERATIONS, 5, ); } diff --git a/svm/tests/conformance.rs b/svm/tests/conformance.rs deleted file mode 100644 index 003ebe1d9b3a4b..00000000000000 --- a/svm/tests/conformance.rs +++ /dev/null @@ -1,545 +0,0 @@ -use { - crate::{ - mock_bank::{MockBankCallback, MockForkGraph}, - transaction_builder::SanitizedTransactionBuilder, - }, - agave_feature_set::{FeatureSet, FEATURE_NAMES}, - agave_syscalls::create_program_runtime_environment_v1, - prost::Message, - solana_account::{AccountSharedData, ReadableAccount, WritableAccount}, - solana_clock::Clock, - solana_epoch_schedule::EpochSchedule, - solana_hash::Hash, - solana_instruction::AccountMeta, - solana_log_collector::LogCollector, - solana_message::SanitizedMessage, - solana_program_runtime::{ - execution_budget::{SVMTransactionExecutionBudget, SVMTransactionExecutionCost}, - invoke_context::{EnvironmentConfig, InvokeContext}, - loaded_programs::{ProgramCacheEntry, ProgramCacheForTxBatch}, - }, - solana_pubkey::Pubkey, - solana_rent::Rent, - solana_signature::Signature, - solana_svm::{program_loader, transaction_processor::TransactionBatchProcessor}, - solana_svm_callback::TransactionProcessingCallback, - solana_svm_conformance::proto::{AcctState, InstrEffects, InstrFixture}, - solana_svm_transaction::instruction::SVMInstruction, - solana_sysvar::last_restart_slot, - solana_sysvar_id::SysvarId, - solana_timings::ExecuteTimings, - solana_transaction_context::{ - ExecutionRecord, IndexOfAccount, TransactionAccount, TransactionContext, - }, - std::{ - collections::{hash_map::Entry, HashMap}, - env, - ffi::OsString, - fs::{self, File}, - io::Read, - path::PathBuf, - process::Command, - sync::{Arc, RwLock}, - }, -}; - -mod mock_bank; -mod transaction_builder; - -const fn feature_u64(feature: &Pubkey) -> u64 { - let feature_id = feature.to_bytes(); - feature_id[0] as u64 - | ((feature_id[1] as u64) << 8) - | ((feature_id[2] as u64) << 16) - | ((feature_id[3] as u64) << 24) - | ((feature_id[4] as u64) << 32) - | ((feature_id[5] as u64) << 40) - | ((feature_id[6] as u64) << 48) - | ((feature_id[7] as u64) << 56) -} - -static INDEXED_FEATURES: std::sync::LazyLock> = - std::sync::LazyLock::new(|| { - FEATURE_NAMES - .keys() - .map(|pubkey| (feature_u64(pubkey), *pubkey)) - .collect() - }); - -fn setup() -> PathBuf { - let mut dir = env::current_dir().unwrap(); - dir.push("test-vectors"); - if !dir.exists() { - std::println!("Cloning test-vectors ..."); - Command::new("git") - .args([ - "clone", - "https://github.com/firedancer-io/test-vectors", - dir.as_os_str().to_str().unwrap(), - ]) - .status() - .expect("Failed to download test-vectors"); - - std::println!("Checking out commit 4abb2046cf51efe809498f4fd717023684050d2f"); - Command::new("git") - .current_dir(&dir) - .args(["checkout", "4abb2046cf51efe809498f4fd717023684050d2f"]) - .status() - .expect("Failed to checkout to proper test-vector commit"); - - std::println!("Setup done!"); - } - - dir -} - -fn cleanup() { - let mut dir = env::current_dir().unwrap(); - dir.push("test-vectors"); - - if dir.exists() { - fs::remove_dir_all(dir).expect("Failed to delete test-vectors repository"); - } -} - -#[test] -fn execute_fixtures() { - let mut base_dir = setup(); - base_dir.push("instr"); - base_dir.push("fixtures"); - - // bpf-loader tests - base_dir.push("bpf-loader"); - run_from_folder(&base_dir); - base_dir.pop(); - - // bpf-loader-v2 tests - base_dir.push("bpf-loader-v2"); - run_from_folder(&base_dir); - base_dir.pop(); - - // bpf-loader-v3 tests - base_dir.push("bpf-loader-v3"); - run_from_folder(&base_dir); - base_dir.pop(); - - // bpf-loader-v3 tests - base_dir.push("bpf-loader-v3-programs"); - run_from_folder(&base_dir); - base_dir.pop(); - - // System program tests - base_dir.push("system"); - run_from_folder(&base_dir); - base_dir.pop(); - - // non-builtin-programs tests - base_dir.push("unknown"); - run_from_folder(&base_dir); - base_dir.pop(); - - cleanup(); -} - -fn run_from_folder(base_dir: &PathBuf) { - for path in std::fs::read_dir(base_dir).unwrap() { - let filename = path.as_ref().unwrap().file_name(); - let mut file = File::open(path.as_ref().unwrap().path()).expect("file not found"); - let mut buffer = Vec::new(); - file.read_to_end(&mut buffer).expect("Failed to read file"); - let fixture = InstrFixture::decode(buffer.as_slice()).unwrap(); - run_fixture(fixture, filename); - } -} - -fn run_fixture(fixture: InstrFixture, filename: OsString) { - let input = fixture.input.unwrap(); - let output = fixture.output.as_ref().unwrap(); - - let mut transaction_builder = SanitizedTransactionBuilder::default(); - let program_id = Pubkey::new_from_array(input.program_id.try_into().unwrap()); - let mut accounts: Vec = Vec::with_capacity(input.instr_accounts.len()); - let mut signatures: HashMap = - HashMap::with_capacity(input.instr_accounts.len()); - - for item in input.instr_accounts { - let pubkey = Pubkey::new_from_array( - input.accounts[item.index as usize] - .address - .clone() - .try_into() - .unwrap(), - ); - accounts.push(AccountMeta { - pubkey, - is_signer: item.is_signer, - is_writable: item.is_writable, - }); - if item.is_signer { - signatures.insert(pubkey, Signature::new_unique()); - } - } - - transaction_builder.create_instruction(program_id, accounts, signatures, input.data); - - let mut feature_set = FeatureSet::default(); - if let Some(features) = &input.epoch_context.as_ref().unwrap().features { - for id in &features.features { - if let Some(pubkey) = INDEXED_FEATURES.get(id) { - feature_set.activate(pubkey, 0); - } - } - } - - let mut fee_payer = Pubkey::new_unique(); - let mut mock_bank = MockBankCallback::default(); - { - let mut account_data_map = mock_bank.account_shared_data.write().unwrap(); - for item in input.accounts { - let pubkey = Pubkey::new_from_array(item.address.try_into().unwrap()); - let mut account_data = AccountSharedData::default(); - account_data.set_lamports(item.lamports); - account_data.set_data(item.data); - account_data.set_owner(Pubkey::new_from_array( - item.owner.clone().try_into().unwrap(), - )); - account_data.set_executable(item.executable); - account_data.set_rent_epoch(item.rent_epoch); - - account_data_map.insert(pubkey, account_data); - } - let mut account_data = AccountSharedData::default(); - account_data.set_lamports(800000); - - while account_data_map.contains_key(&fee_payer) { - // The fee payer must not coincide with any of the previous accounts - fee_payer = Pubkey::new_unique(); - } - account_data_map.insert(fee_payer, account_data); - } - - let Ok(transaction) = transaction_builder.build( - Hash::default(), - (fee_payer, Signature::new_unique()), - false, - true, - ) else { - // If we can't build a sanitized transaction, - // the output must be a failed instruction as well - assert_ne!(output.result, 0); - return; - }; - - let transactions = vec![transaction]; - - let compute_budget = SVMTransactionExecutionBudget { - compute_unit_limit: input.cu_avail, - ..SVMTransactionExecutionBudget::default() - }; - - let v1_environment = create_program_runtime_environment_v1( - &feature_set.runtime_features(), - &compute_budget, - false, - false, - ) - .unwrap(); - - mock_bank.override_feature_set(feature_set.runtime_features()); - - let fork_graph = Arc::new(RwLock::new(MockForkGraph {})); - let batch_processor = TransactionBatchProcessor::new( - 42, - 2, - Arc::downgrade(&fork_graph), - Some(Arc::new(v1_environment)), - None, - ); - - batch_processor - .writable_sysvar_cache() - .write() - .unwrap() - .fill_missing_entries(|pubkey, callbackback| { - if let Some(account) = mock_bank.get_account_shared_data(pubkey) { - if account.lamports() > 0 { - callbackback(account.data()); - return; - } - } - - if *pubkey == Clock::id() { - let default_clock = Clock { - slot: 10, - ..Default::default() - }; - let clock_data = bincode::serialize(&default_clock).unwrap(); - callbackback(&clock_data); - } else if *pubkey == EpochSchedule::id() { - callbackback(&bincode::serialize(&EpochSchedule::default()).unwrap()); - } else if *pubkey == Rent::id() { - callbackback(&bincode::serialize(&Rent::default()).unwrap()); - } else if *pubkey == last_restart_slot::id() { - let slot_val = 5000_u64; - callbackback(&bincode::serialize(&slot_val).unwrap()); - } - }); - - execute_fixture_as_instr( - &mock_bank, - &batch_processor, - transactions[0].message(), - compute_budget, - output, - filename, - input.cu_avail, - ); -} - -fn execute_fixture_as_instr( - mock_bank: &MockBankCallback, - batch_processor: &TransactionBatchProcessor, - sanitized_message: &SanitizedMessage, - compute_budget: SVMTransactionExecutionBudget, - output: &InstrEffects, - filename: OsString, - cu_avail: u64, -) { - let rent = if let Ok(rent) = batch_processor.sysvar_cache().get_rent() { - (*rent).clone() - } else { - Rent::default() - }; - - let transaction_accounts: Vec = sanitized_message - .account_keys() - .iter() - .map(|key| (*key, mock_bank.get_account_shared_data(key).unwrap())) - .collect(); - - let mut transaction_context = TransactionContext::new( - transaction_accounts, - rent, - compute_budget.max_instruction_stack_depth, - compute_budget.max_instruction_trace_length, - ); - transaction_context.set_remove_accounts_executable_flag_checks(false); - - let mut loaded_programs = ProgramCacheForTxBatch::new( - 42, - batch_processor - .program_cache - .read() - .unwrap() - .environments - .clone(), - None, - 2, - ); - - let program_idx = sanitized_message.instructions()[0].program_id_index as usize; - let program_id = *sanitized_message.account_keys().get(program_idx).unwrap(); - - let loaded_program = program_loader::load_program_with_pubkey( - mock_bank, - &batch_processor.get_environments_for_epoch(2).unwrap(), - &program_id, - 42, - &mut ExecuteTimings::default(), - false, - ) - .unwrap(); - - loaded_programs.replenish(program_id, loaded_program); - loaded_programs.replenish( - solana_system_program::id(), - Arc::new(ProgramCacheEntry::new_builtin( - 0u64, - 0usize, - solana_system_program::system_processor::Entrypoint::vm, - )), - ); - - let log_collector = LogCollector::new_ref(); - - let sysvar_cache = &batch_processor.sysvar_cache(); - #[allow(deprecated)] - let (blockhash, lamports_per_signature) = batch_processor - .sysvar_cache() - .get_recent_blockhashes() - .ok() - .and_then(|x| (*x).last().cloned()) - .map(|x| (x.blockhash, x.fee_calculator.lamports_per_signature)) - .unwrap_or_default(); - - let env_config = EnvironmentConfig::new( - blockhash, - lamports_per_signature, - mock_bank, - &mock_bank.feature_set, - sysvar_cache, - ); - - let mut invoke_context = InvokeContext::new( - &mut transaction_context, - &mut loaded_programs, - env_config, - Some(log_collector.clone()), - compute_budget, - SVMTransactionExecutionCost::default(), - ); - - invoke_context - .prepare_next_top_level_instruction( - sanitized_message, - &SVMInstruction::from(&sanitized_message.instructions()[0]), - vec![program_idx as IndexOfAccount], - ) - .expect("Failed to configure instruction"); - let mut compute_units_consumed = 0u64; - let mut timings = ExecuteTimings::default(); - let result = invoke_context.process_instruction(&mut compute_units_consumed, &mut timings); - - if output.result == 0 { - assert!( - result.is_ok(), - "Instruction execution was NOT successful, but should have been: {filename:?}" - ); - } else { - assert!( - result.is_err(), - "Instruction execution was successful, but should NOT have been: {filename:?}" - ); - return; - } - - let ExecutionRecord { - accounts, - return_data, - .. - } = transaction_context.into(); - - verify_accounts_and_data( - &accounts, - output, - compute_units_consumed, - cu_avail, - &return_data.data, - filename, - ); -} - -fn verify_accounts_and_data( - accounts: &[TransactionAccount], - output: &InstrEffects, - consumed_units: u64, - cu_avail: u64, - return_data: &Vec, - filename: OsString, -) { - // The input created by firedancer is malformed in that there may be repeated accounts in the - // instruction execution output. This happens because the set system program as the program ID, - // as pass it as an account to be modified in the instruction. - let mut idx_map: HashMap> = HashMap::new(); - for (idx, item) in accounts.iter().enumerate() { - match idx_map.entry(item.0) { - Entry::Occupied(mut this) => { - this.get_mut().push(idx); - } - Entry::Vacant(this) => { - this.insert(vec![idx]); - } - } - } - - for item in &output.modified_accounts { - let pubkey = Pubkey::new_from_array(item.address.clone().try_into().unwrap()); - let indexes = *idx_map - .get(&pubkey) - .as_ref() - .expect("Account not in expected results"); - - let mut error: Option = Some("err".to_string()); - for idx in indexes { - let received_data = &accounts[*idx].1; - let check_result = check_account(received_data, item, &filename); - - if error.is_some() && check_result.is_none() { - // If at least one of the accounts pass the check, we have no error. - error = None; - } else if error.is_some() && check_result.is_some() { - error = check_result; - } - } - - if let Some(error) = error { - panic!("{}", error); - } - } - - assert_eq!( - consumed_units, - cu_avail.saturating_sub(output.cu_avail), - "Execution units differs in case: {filename:?}" - ); - - if return_data.is_empty() { - assert!(output.return_data.is_empty()); - } else { - assert_eq!(&output.return_data, return_data); - } -} - -fn check_account( - received: &AccountSharedData, - expected: &AcctState, - filename: &OsString, -) -> Option { - macro_rules! format_args { - ($received:expr, $expected:expr) => { - format!("received: {:?}\nexpected: {:?}", $received, $expected).as_str() - }; - } - - if received.lamports() != expected.lamports { - return Some( - format!("Lamports differ in case: {filename:?}\n") - + format_args!(received.lamports(), expected.lamports), - ); - } - - if received.data() != expected.data.as_slice() { - return Some( - format!("Account data differs in case: {filename:?}\n") - + format_args!(received.data(), expected.data.as_slice()), - ); - } - - let expected_owner = Pubkey::new_from_array(expected.owner.clone().try_into().unwrap()); - if received.owner() != &expected_owner { - return Some( - format!("Account owner differs in case: {filename:?}\n") - + format_args!(received.owner(), expected_owner), - ); - } - - if received.executable() != expected.executable { - return Some( - format!("Executable boolean differs in case: {filename:?}\n") - + format_args!(received.executable(), expected.executable), - ); - } - - // u64::MAX means we are not considering the epoch - if received.rent_epoch() != u64::MAX - && expected.rent_epoch != u64::MAX - && received.rent_epoch() != expected.rent_epoch - { - return Some( - format!("Rent epoch differs in case: {filename:?}\n") - + format_args!(received.rent_epoch(), expected.rent_epoch), - ); - } - - None -} diff --git a/svm/tests/example-programs/clock-sysvar/Cargo.toml b/svm/tests/example-programs/clock-sysvar/Cargo.toml index 8d4456ec8fd278..48fc16a5271eef 100644 --- a/svm/tests/example-programs/clock-sysvar/Cargo.toml +++ b/svm/tests/example-programs/clock-sysvar/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "clock-sysvar-program" -version = "3.0.0" +version = "3.1.0" edition = "2021" [dependencies] diff --git a/svm/tests/example-programs/hello-solana/Cargo.toml b/svm/tests/example-programs/hello-solana/Cargo.toml index 3a01690f137c39..0bf852428f07ae 100644 --- a/svm/tests/example-programs/hello-solana/Cargo.toml +++ b/svm/tests/example-programs/hello-solana/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "hello-solana-program" -version = "3.0.0" +version = "3.1.0" edition = "2021" [dependencies] diff --git a/svm/tests/example-programs/simple-transfer/Cargo.toml b/svm/tests/example-programs/simple-transfer/Cargo.toml index 73a40bc89561fa..be8a28fb75819b 100644 --- a/svm/tests/example-programs/simple-transfer/Cargo.toml +++ b/svm/tests/example-programs/simple-transfer/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "simple-transfer-program" -version = "3.0.0" +version = "3.1.0" edition = "2021" [dependencies] diff --git a/svm/tests/example-programs/transfer-from-account/Cargo.toml b/svm/tests/example-programs/transfer-from-account/Cargo.toml index ec14dca501a30e..901f73b23195f7 100644 --- a/svm/tests/example-programs/transfer-from-account/Cargo.toml +++ b/svm/tests/example-programs/transfer-from-account/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "transfer-from-account" -version = "3.0.0" +version = "3.1.0" edition = "2021" [dependencies] diff --git a/svm/tests/example-programs/write-to-account/Cargo.toml b/svm/tests/example-programs/write-to-account/Cargo.toml index 428e8441916c17..c33524e5330f13 100644 --- a/svm/tests/example-programs/write-to-account/Cargo.toml +++ b/svm/tests/example-programs/write-to-account/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "write-to-account" -version = "3.0.0" +version = "3.1.0" edition = "2021" [dependencies] diff --git a/svm/tests/integration_test.rs b/svm/tests/integration_test.rs index bef18b93e57482..66ff8af18e3561 100644 --- a/svm/tests/integration_test.rs +++ b/svm/tests/integration_test.rs @@ -7,10 +7,9 @@ use { program_data_size, register_builtins, MockBankCallback, MockForkGraph, EXECUTION_EPOCH, EXECUTION_SLOT, WALLCLOCK_TIME, }, - agave_feature_set::{self as feature_set, raise_cpi_nesting_limit_to_8, FeatureSet}, solana_account::{AccountSharedData, ReadableAccount, WritableAccount, PROGRAM_OWNERS}, solana_clock::Slot, - solana_compute_budget_instruction::instructions_processor::process_compute_budget_instructions, + solana_compute_budget::compute_budget_limits::ComputeBudgetLimits, solana_compute_budget_interface::ComputeBudgetInstruction, solana_fee_structure::FeeDetails, solana_hash::Hash, @@ -23,9 +22,11 @@ use { solana_native_token::LAMPORTS_PER_SOL, solana_nonce::{self as nonce, state::DurableNonce}, solana_program_entrypoint::MAX_PERMITTED_DATA_INCREASE, - solana_program_runtime::execution_budget::SVMTransactionExecutionAndFeeBudgetLimits, + solana_program_runtime::execution_budget::{ + SVMTransactionExecutionAndFeeBudgetLimits, MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, + }, solana_pubkey::Pubkey, - solana_sdk_ids::{bpf_loader_upgradeable, native_loader}, + solana_sdk_ids::{bpf_loader_upgradeable, compute_budget, native_loader}, solana_signer::Signer, solana_svm::{ account_loader::{CheckedTransactionDetails, TransactionCheckResult}, @@ -41,21 +42,60 @@ use { TransactionProcessingEnvironment, }, }, - solana_svm_transaction::svm_message::SVMMessage, + solana_svm_feature_set::SVMFeatureSet, + solana_svm_transaction::{instruction::SVMInstruction, svm_message::SVMMessage}, + solana_svm_type_overrides::sync::{Arc, RwLock}, solana_system_interface::{instruction as system_instruction, program as system_program}, solana_system_transaction as system_transaction, solana_sysvar::rent::Rent, solana_transaction::{sanitized::SanitizedTransaction, Transaction}, solana_transaction_context::TransactionReturnData, solana_transaction_error::TransactionError, - solana_type_overrides::sync::{Arc, RwLock}, - std::collections::HashMap, + std::{collections::HashMap, num::NonZeroU32, sync::atomic::Ordering}, test_case::test_case, }; // This module contains the implementation of TransactionProcessingCallback mod mock_bank; +// Local implementation of compute budget processing for tests. +fn process_test_compute_budget_instructions<'a>( + instructions: impl Iterator)> + Clone, +) -> Result { + let mut loaded_accounts_data_size_limit = None; + + // Scan for compute budget instructions. + // Only key on `SetLoadedAccountsDataSizeLimit`. + for (program_id, instruction) in instructions { + if *program_id == compute_budget::id() + && instruction.data.len() >= 5 + && instruction.data[0] == 4 + { + let size = u32::from_le_bytes([ + instruction.data[1], + instruction.data[2], + instruction.data[3], + instruction.data[4], + ]); + loaded_accounts_data_size_limit = Some(size); + } + } + + let loaded_accounts_bytes = + if let Some(requested_loaded_accounts_data_size_limit) = loaded_accounts_data_size_limit { + NonZeroU32::new(requested_loaded_accounts_data_size_limit) + .ok_or(TransactionError::InvalidLoadedAccountsDataSizeLimit)? + } else { + MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES + } + .min(MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES); + + Ok(ComputeBudgetLimits { + loaded_accounts_bytes, + ..Default::default() + }) +} + const DEPLOYMENT_SLOT: u64 = 0; const LAMPORTS_PER_SIGNATURE: u64 = 5000; const LAST_BLOCKHASH: Hash = Hash::new_from_array([7; 32]); // Arbitrary constant hash for advancing nonces @@ -114,10 +154,9 @@ impl SvmTestEnvironment<'_> { ..Default::default() }; - let feature_set = test_entry.feature_set(); let processing_environment = TransactionProcessingEnvironment { blockhash: LAST_BLOCKHASH, - feature_set: feature_set.runtime_features(), + feature_set: test_entry.feature_set, blockhash_lamports_per_signature: LAMPORTS_PER_SIGNATURE, ..TransactionProcessingEnvironment::default() }; @@ -264,7 +303,7 @@ impl SvmTestEnvironment<'_> { let programs_modified_by_tx = &executed_tx.programs_modified_by_tx; if executed_tx.was_successful() && !programs_modified_by_tx.is_empty() { self.batch_processor - .program_cache + .global_program_cache .write() .unwrap() .merge(programs_modified_by_tx); @@ -278,7 +317,7 @@ impl SvmTestEnvironment<'_> { pub fn is_program_blocked(&self, program_id: &Pubkey) -> bool { let (_, program_cache_entry) = self .batch_processor - .program_cache + .global_program_cache .read() .unwrap() .get_flattened_entries_for_tests() @@ -296,10 +335,10 @@ impl SvmTestEnvironment<'_> { } // container for a transaction batch and all data needed to run and verify it against svm -#[derive(Clone, Default, Debug)] +#[derive(Clone)] pub struct SvmTestEntry { - // features are enabled by default; these will be disabled - pub disabled_features: Vec, + // features configuration for this test + pub feature_set: SVMFeatureSet, // until LoaderV4 is live on mainnet, we default to omitting it, but can also test it pub with_loader_v4: bool, @@ -317,6 +356,19 @@ pub struct SvmTestEntry { pub final_accounts: AccountsMap, } +impl Default for SvmTestEntry { + fn default() -> Self { + Self { + feature_set: SVMFeatureSet::all_enabled(), + with_loader_v4: false, + initial_programs: Vec::new(), + initial_accounts: HashMap::new(), + transaction_batch: Vec::new(), + final_accounts: HashMap::new(), + } + } +} + impl SvmTestEntry { pub fn with_loader_v4() -> Self { Self { @@ -448,9 +500,8 @@ impl SvmTestEntry { .map(|item| { let message = SanitizedTransaction::from_transaction_for_tests(item.transaction); let check_result = item.check_result.map(|tx_details| { - let compute_budget_limits = process_compute_budget_instructions( + let compute_budget_limits = process_test_compute_budget_instructions( SVMMessage::program_instructions_iter(&message), - &self.feature_set(), ); let signature_count = message .num_transaction_signatures() @@ -465,8 +516,7 @@ impl SvmTestEntry { signature_count.saturating_mul(LAMPORTS_PER_SIGNATURE), v.get_prioritization_fee(), ), - self.feature_set() - .is_active(&raise_cpi_nesting_limit_to_8::id()), + self.feature_set.raise_cpi_nesting_limit_to_8, ) }); CheckedTransactionDetails::new(tx_details.nonce, compute_budget) @@ -485,16 +535,6 @@ impl SvmTestEntry { .map(|item| item.asserts) .collect() } - - // internal helper to map our feature list to a FeatureSet - fn feature_set(&self) -> FeatureSet { - let mut feature_set = FeatureSet::all_enabled(); - for feature_id in &self.disabled_features { - feature_set.deactivate(feature_id); - } - - feature_set - } } // one transaction in a batch plus check results for svm and asserts for tests @@ -2204,8 +2244,8 @@ fn simd83_account_reallocate(formalize_loaded_transaction_data_size: bool) -> Ve common_test_entry.add_initial_program(program_name); if !formalize_loaded_transaction_data_size { common_test_entry - .disabled_features - .push(feature_set::formalize_loaded_transaction_data_size::id()); + .feature_set + .formalize_loaded_transaction_data_size = false; } let fee_payer_keypair = Keypair::new(); @@ -2313,16 +2353,10 @@ fn svm_integration(test_entries: Vec) { } } -#[test_case(true; "remove accounts executable flag check")] -#[test_case(false; "don't remove accounts executable flag check")] -fn program_cache_create_account(remove_accounts_executable_flag_checks: bool) { +#[test] +fn program_cache_create_account() { for loader_id in PROGRAM_OWNERS { let mut test_entry = SvmTestEntry::with_loader_v4(); - if !remove_accounts_executable_flag_checks { - test_entry - .disabled_features - .push(feature_set::remove_accounts_executable_flag_checks::id()); - } let fee_payer_keypair = Keypair::new(); let fee_payer = fee_payer_keypair.pubkey(); @@ -2357,15 +2391,10 @@ fn program_cache_create_account(remove_accounts_executable_flag_checks: bool) { Hash::default(), ); - // fails at load-time for executable flag if feature is disabled - // if feature is enabled fails at execution - let expected_status = if remove_accounts_executable_flag_checks { - ExecutionStatus::ExecutedFailed - } else { - ExecutionStatus::ProcessedFailed - }; - - test_entry.push_transaction_with_status(invoke_transaction.clone(), expected_status); + test_entry.push_transaction_with_status( + invoke_transaction.clone(), + ExecutionStatus::ExecutedFailed, + ); test_entry.decrease_expected_lamports(&fee_payer, LAMPORTS_PER_SIGNATURE); let mut env = SvmTestEnvironment::create(test_entry); @@ -2379,7 +2408,8 @@ fn program_cache_create_account(remove_accounts_executable_flag_checks: bool) { ..SvmTestEntry::default() }; - test_entry.push_transaction_with_status(invoke_transaction, expected_status); + test_entry + .push_transaction_with_status(invoke_transaction, ExecutionStatus::ExecutedFailed); test_entry.decrease_expected_lamports(&fee_payer, LAMPORTS_PER_SIGNATURE); // test in different entry same slot @@ -2628,6 +2658,258 @@ fn program_cache_loaderv3_buffer_swap(invoke_changed_program: bool) { assert!(env.is_program_blocked(&target)); } +#[test] +fn program_cache_stats() { + let mut test_entry = SvmTestEntry::default(); + + let program_name = "hello-solana"; + let noop_program = program_address(program_name); + + let fee_payer_keypair = Keypair::new(); + let fee_payer = fee_payer_keypair.pubkey(); + + let mut fee_payer_data = AccountSharedData::default(); + fee_payer_data.set_lamports(LAMPORTS_PER_SOL * 100); + test_entry.add_initial_account(fee_payer, &fee_payer_data); + + test_entry + .initial_programs + .push((program_name.to_string(), DEPLOYMENT_SLOT, Some(fee_payer))); + + let missing_program = Pubkey::new_unique(); + + // set up a future upgrade after the first batch + let buffer_address = Pubkey::new_unique(); + { + let mut data = bincode::serialize(&UpgradeableLoaderState::Buffer { + authority_address: Some(fee_payer), + }) + .unwrap(); + let mut program_bytecode = load_program(program_name.to_string()); + data.append(&mut program_bytecode); + + let buffer_account = AccountSharedData::create( + LAMPORTS_PER_SOL, + data, + bpf_loader_upgradeable::id(), + true, + u64::MAX, + ); + + test_entry.add_initial_account(buffer_address, &buffer_account); + } + + let make_transaction = |instructions: &[Instruction]| { + Transaction::new_signed_with_payer( + instructions, + Some(&fee_payer), + &[&fee_payer_keypair], + Hash::default(), + ) + }; + + let succesful_noop_instruction = Instruction::new_with_bytes(noop_program, &[], vec![]); + let succesful_transfer_instruction = + system_instruction::transfer(&fee_payer, &Pubkey::new_unique(), LAMPORTS_PER_SOL); + let failing_transfer_instruction = + system_instruction::transfer(&fee_payer, &Pubkey::new_unique(), LAMPORTS_PER_SOL * 1000); + let fee_only_noop_instruction = Instruction::new_with_bytes(missing_program, &[], vec![]); + + let mut noop_tx_usage = 0; + let mut system_tx_usage = 0; + let mut successful_transfers = 0; + + test_entry.push_transaction(make_transaction(&[succesful_noop_instruction.clone()])); + noop_tx_usage += 1; + + test_entry.push_transaction(make_transaction(&[succesful_transfer_instruction.clone()])); + system_tx_usage += 1; + successful_transfers += 1; + + test_entry.push_transaction_with_status( + make_transaction(&[failing_transfer_instruction.clone()]), + ExecutionStatus::ExecutedFailed, + ); + system_tx_usage += 1; + + test_entry.push_transaction(make_transaction(&[ + succesful_noop_instruction.clone(), + succesful_noop_instruction.clone(), + succesful_transfer_instruction.clone(), + succesful_transfer_instruction.clone(), + succesful_noop_instruction.clone(), + ])); + noop_tx_usage += 1; + system_tx_usage += 1; + successful_transfers += 2; + + test_entry.push_transaction_with_status( + make_transaction(&[ + failing_transfer_instruction.clone(), + succesful_noop_instruction.clone(), + succesful_transfer_instruction.clone(), + ]), + ExecutionStatus::ExecutedFailed, + ); + noop_tx_usage += 1; + system_tx_usage += 1; + + // load failure/fee-only does not touch the program cache + test_entry.push_transaction_with_status( + make_transaction(&[ + succesful_noop_instruction.clone(), + fee_only_noop_instruction.clone(), + ]), + ExecutionStatus::ProcessedFailed, + ); + + test_entry.decrease_expected_lamports( + &fee_payer, + LAMPORTS_PER_SIGNATURE * test_entry.transaction_batch.len() as u64 + + LAMPORTS_PER_SOL * successful_transfers, + ); + + // nor does discard + test_entry.transaction_batch.push(TransactionBatchItem { + transaction: make_transaction(&[succesful_transfer_instruction.clone()]), + check_result: Err(TransactionError::BlockhashNotFound), + asserts: ExecutionStatus::Discarded.into(), + }); + + let mut env = SvmTestEnvironment::create(test_entry); + env.execute(); + + // check all usage stats are as we expect + let global_program_cache = env + .batch_processor + .global_program_cache + .read() + .unwrap() + .get_flattened_entries_for_tests() + .into_iter() + .rev() + .collect::>(); + + let (_, noop_entry) = global_program_cache + .iter() + .find(|(pubkey, _)| *pubkey == noop_program) + .unwrap(); + + assert_eq!( + noop_entry.tx_usage_counter.load(Ordering::Relaxed), + noop_tx_usage, + "noop_tx_usage matches" + ); + + let (_, system_entry) = global_program_cache + .iter() + .find(|(pubkey, _)| *pubkey == system_program::id()) + .unwrap(); + + assert_eq!( + system_entry.tx_usage_counter.load(Ordering::Relaxed), + system_tx_usage, + "system_tx_usage matches" + ); + + assert!( + !global_program_cache + .iter() + .any(|(pubkey, _)| *pubkey == missing_program), + "missing_program is missing" + ); + + // set up the second batch + let mut test_entry = SvmTestEntry { + initial_accounts: env.test_entry.final_accounts.clone(), + final_accounts: env.test_entry.final_accounts.clone(), + ..SvmTestEntry::default() + }; + + // upgrade the program. this blocks execution but does not create a tombstone + // the main thing we are testing is the tx counter is ported across upgrades + // + // note the upgrade transaction actually counts as a usage, per the existing rules + // the program cache must load the program because it has no idea if it will be used for cpi + test_entry.push_transaction(Transaction::new_signed_with_payer( + &[loaderv3_instruction::upgrade( + &noop_program, + &buffer_address, + &fee_payer, + &Pubkey::new_unique(), + )], + Some(&fee_payer), + &[&fee_payer_keypair], + Hash::default(), + )); + noop_tx_usage += 1; + + test_entry.drop_expected_account(buffer_address); + + test_entry.push_transaction_with_status( + make_transaction(&[succesful_noop_instruction.clone()]), + ExecutionStatus::ExecutedFailed, + ); + noop_tx_usage += 1; + + test_entry.decrease_expected_lamports(&fee_payer, LAMPORTS_PER_SIGNATURE * 2); + + env.test_entry = test_entry; + env.execute(); + + let (_, noop_entry) = env + .batch_processor + .global_program_cache + .read() + .unwrap() + .get_flattened_entries_for_tests() + .into_iter() + .rev() + .find(|(pubkey, _)| *pubkey == noop_program) + .unwrap(); + + assert_eq!( + noop_entry.tx_usage_counter.load(Ordering::Relaxed), + noop_tx_usage, + "noop_tx_usage matches" + ); + + // third batch, this creates a delayed visibility tombstone + let mut test_entry = SvmTestEntry { + initial_accounts: env.test_entry.final_accounts.clone(), + final_accounts: env.test_entry.final_accounts.clone(), + ..SvmTestEntry::default() + }; + + test_entry.push_transaction_with_status( + make_transaction(&[succesful_noop_instruction.clone()]), + ExecutionStatus::ExecutedFailed, + ); + noop_tx_usage += 1; + + test_entry.decrease_expected_lamports(&fee_payer, LAMPORTS_PER_SIGNATURE); + + env.test_entry = test_entry; + env.execute(); + + let (_, noop_entry) = env + .batch_processor + .global_program_cache + .read() + .unwrap() + .get_flattened_entries_for_tests() + .into_iter() + .rev() + .find(|(pubkey, _)| *pubkey == noop_program) + .unwrap(); + + assert_eq!( + noop_entry.tx_usage_counter.load(Ordering::Relaxed), + noop_tx_usage, + "noop_tx_usage matches" + ); +} + #[derive(Clone, PartialEq, Eq)] enum Inspect<'a> { LiveRead(&'a AccountSharedData), @@ -2668,8 +2950,8 @@ fn svm_inspect_nonce_load_failure( if !formalize_loaded_transaction_data_size { test_entry - .disabled_features - .push(feature_set::formalize_loaded_transaction_data_size::id()); + .feature_set + .formalize_loaded_transaction_data_size = false; } let fee_payer_keypair = Keypair::new(); @@ -3003,7 +3285,6 @@ mod balance_collector { super::*, rand0_7::prelude::*, solana_program_pack::Pack, - solana_sdk_ids::bpf_loader, spl_generic_token::token_2022, spl_token_interface::state::{ Account as TokenAccount, AccountState as TokenAccountState, Mint, @@ -3011,10 +3292,6 @@ mod balance_collector { test_case::test_case, }; - // this could be part of mock_bank but so far nothing but this uses it - static SPL_TOKEN_BYTES: &[u8] = - include_bytes!("../../program-test/src/programs/spl_token-3.5.0.so"); - const STARTING_BALANCE: u64 = LAMPORTS_PER_SOL * 100; // a helper for constructing a transfer instruction, agnostic over system/token @@ -3135,13 +3412,10 @@ mod balance_collector { u64::MAX, ); - let spl_token = AccountSharedData::create( - LAMPORTS_PER_SOL, - SPL_TOKEN_BYTES.to_vec(), - bpf_loader::id(), - true, - u64::MAX, - ); + let (_, spl_token) = + solana_program_binaries::by_id(&spl_token_interface::id(), &Rent::default()) + .unwrap() + .swap_remove(0); for _ in 0..100 { let mut test_entry = SvmTestEntry::default(); diff --git a/svm/tests/mock_bank.rs b/svm/tests/mock_bank.rs index 03189f340b93cf..324c0cb0ff48ef 100644 --- a/svm/tests/mock_bank.rs +++ b/svm/tests/mock_bank.rs @@ -29,8 +29,8 @@ use { solana_svm_callback::{AccountState, InvokeContextCallback, TransactionProcessingCallback}, solana_svm_feature_set::SVMFeatureSet, solana_svm_transaction::svm_message::SVMMessage, + solana_svm_type_overrides::sync::{Arc, RwLock}, solana_sysvar_id::SysvarId, - solana_type_overrides::sync::{Arc, RwLock}, std::{ cmp::Ordering, collections::HashMap, @@ -68,39 +68,12 @@ pub struct MockBankCallback { impl InvokeContextCallback for MockBankCallback {} impl TransactionProcessingCallback for MockBankCallback { - fn account_matches_owners(&self, account: &Pubkey, owners: &[Pubkey]) -> Option { - if let Some(data) = self.account_shared_data.read().unwrap().get(account) { - if data.lamports() == 0 { - None - } else { - owners.iter().position(|entry| data.owner() == entry) - } - } else { - None - } - } - - fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option { + fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option<(AccountSharedData, Slot)> { self.account_shared_data .read() .unwrap() .get(pubkey) - .cloned() - } - - fn add_builtin_account(&self, name: &str, program_id: &Pubkey) { - let account_data = AccountSharedData::from(Account { - lamports: 5000, - data: name.as_bytes().to_vec(), - owner: solana_sdk_ids::native_loader::id(), - executable: true, - rent_epoch: 0, - }); - - self.account_shared_data - .write() - .unwrap() - .insert(*program_id, account_data); + .map(|account| (account.clone(), 0)) } fn inspect_account(&self, address: &Pubkey, account_state: AccountState, is_writable: bool) { @@ -131,6 +104,29 @@ impl MockBankCallback { ) } + pub fn add_builtin( + &self, + batch_processor: &TransactionBatchProcessor, + program_id: Pubkey, + name: &str, + builtin: ProgramCacheEntry, + ) { + let account_data = AccountSharedData::from(Account { + lamports: 5000, + data: name.as_bytes().to_vec(), + owner: solana_sdk_ids::native_loader::id(), + executable: true, + rent_epoch: 0, + }); + + self.account_shared_data + .write() + .unwrap() + .insert(program_id, account_data); + + batch_processor.add_builtin(program_id, builtin); + } + #[allow(unused)] pub fn override_feature_set(&mut self, new_set: SVMFeatureSet) { self.feature_set = new_set @@ -280,8 +276,8 @@ pub fn register_builtins( const DEPLOYMENT_SLOT: u64 = 0; // We must register LoaderV3 as a loadable account, otherwise programs won't execute. let loader_v3_name = "solana_bpf_loader_upgradeable_program"; - batch_processor.add_builtin( - mock_bank, + mock_bank.add_builtin( + batch_processor, solana_sdk_ids::bpf_loader_upgradeable::id(), loader_v3_name, ProgramCacheEntry::new_builtin( @@ -293,8 +289,8 @@ pub fn register_builtins( // Other loaders are needed for testing program cache behavior. let loader_v1_name = "solana_bpf_loader_deprecated_program"; - batch_processor.add_builtin( - mock_bank, + mock_bank.add_builtin( + batch_processor, bpf_loader_deprecated::id(), loader_v1_name, ProgramCacheEntry::new_builtin( @@ -305,8 +301,8 @@ pub fn register_builtins( ); let loader_v2_name = "solana_bpf_loader_program"; - batch_processor.add_builtin( - mock_bank, + mock_bank.add_builtin( + batch_processor, bpf_loader::id(), loader_v2_name, ProgramCacheEntry::new_builtin( @@ -318,8 +314,8 @@ pub fn register_builtins( if with_loader_v4 { let loader_v4_name = "solana_loader_v4_program"; - batch_processor.add_builtin( - mock_bank, + mock_bank.add_builtin( + batch_processor, loader_v4::id(), loader_v4_name, ProgramCacheEntry::new_builtin( @@ -333,8 +329,8 @@ pub fn register_builtins( // In order to perform a transference of native tokens using the system instruction, // the system program builtin must be registered. let system_program_name = "system_program"; - batch_processor.add_builtin( - mock_bank, + mock_bank.add_builtin( + batch_processor, solana_system_program::id(), system_program_name, ProgramCacheEntry::new_builtin( @@ -346,8 +342,8 @@ pub fn register_builtins( // For testing realloc, we need the compute budget program let compute_budget_program_name = "compute_budget_program"; - batch_processor.add_builtin( - mock_bank, + mock_bank.add_builtin( + batch_processor, compute_budget::id(), compute_budget_program_name, ProgramCacheEntry::new_builtin( diff --git a/svm/tests/transaction_builder.rs b/svm/tests/transaction_builder.rs deleted file mode 100644 index 8df4e9b93d195b..00000000000000 --- a/svm/tests/transaction_builder.rs +++ /dev/null @@ -1,250 +0,0 @@ -use { - agave_reserved_account_keys::ReservedAccountKeys, - solana_hash::Hash, - solana_instruction::AccountMeta, - solana_message::{ - compiled_instruction::CompiledInstruction, - v0::{self, LoadedAddresses, MessageAddressTableLookup}, - AddressLoader, AddressLoaderError, Message, MessageHeader, VersionedMessage, - }, - solana_pubkey::Pubkey, - solana_signature::Signature, - solana_transaction::{ - sanitized::SanitizedTransaction, - versioned::{sanitized::SanitizedVersionedTransaction, VersionedTransaction}, - }, - solana_transaction_error::TransactionError, - std::collections::{HashMap, HashSet}, -}; - -#[derive(Default)] -pub struct SanitizedTransactionBuilder { - instructions: Vec, - num_required_signatures: u8, - num_readonly_signed_accounts: u8, - num_readonly_unsigned_accounts: u8, - signed_readonly_accounts: Vec<(Pubkey, Signature)>, - signed_mutable_accounts: Vec<(Pubkey, Signature)>, - unsigned_readonly_accounts: Vec, - unsigned_mutable_accounts: Vec, -} - -#[derive(PartialEq, Eq, Hash, Clone)] -enum AccountType { - Readonly, - Writable, - SignerReadonly, - SignerWritable, -} - -struct InnerInstruction { - program_id: Pubkey, - accounts: Vec<(Pubkey, AccountType)>, - data: Vec, -} - -#[derive(Clone)] -struct MockLoader {} - -// This implementation is only necessary if one is using account table lookups. -impl AddressLoader for MockLoader { - fn load_addresses( - self, - _lookups: &[MessageAddressTableLookup], - ) -> Result { - Ok(LoadedAddresses { - writable: vec![], - readonly: vec![], - }) - } -} - -impl SanitizedTransactionBuilder { - pub fn create_instruction( - &mut self, - program_id: Pubkey, - // The fee payer and the program id shall not appear in the accounts vector - accounts: Vec, - signatures: HashMap, - data: Vec, - ) { - let mut instruction = InnerInstruction { - program_id, - accounts: Vec::new(), - data, - }; - - for item in &accounts { - let acc_type = match (item.is_signer, item.is_writable) { - (true, true) => { - self.num_required_signatures = self.num_required_signatures.saturating_add(1); - self.signed_mutable_accounts - .push((item.pubkey, signatures[&item.pubkey])); - AccountType::SignerWritable - } - (true, false) => { - self.num_required_signatures = self.num_required_signatures.saturating_add(1); - self.num_readonly_signed_accounts = - self.num_readonly_signed_accounts.saturating_add(1); - self.signed_readonly_accounts - .push((item.pubkey, signatures[&item.pubkey])); - AccountType::SignerReadonly - } - (false, true) => { - self.unsigned_mutable_accounts.push(item.pubkey); - AccountType::Writable - } - (false, false) => { - self.num_readonly_unsigned_accounts = - self.num_readonly_unsigned_accounts.saturating_add(1); - self.unsigned_readonly_accounts.push(item.pubkey); - AccountType::Readonly - } - }; - instruction.accounts.push((item.pubkey, acc_type)); - } - - self.instructions.push(instruction); - } - - pub fn build( - &mut self, - block_hash: Hash, - fee_payer: (Pubkey, Signature), - v0_message: bool, - ignore_reserved_accounts: bool, - ) -> Result { - let mut account_keys = Vec::with_capacity( - self.signed_mutable_accounts - .len() - .saturating_add(self.signed_readonly_accounts.len()) - .saturating_add(self.unsigned_mutable_accounts.len()) - .saturating_add(self.unsigned_readonly_accounts.len()) - .saturating_add(1), - ); - let header = MessageHeader { - // The fee payer always requires a signature so +1 - num_required_signatures: self.num_required_signatures.saturating_add(1), - num_readonly_signed_accounts: self.num_readonly_signed_accounts, - // The program id is always a readonly unsigned account - num_readonly_unsigned_accounts: self.num_readonly_unsigned_accounts.saturating_add(1), - }; - - let mut compiled_instructions = Vec::new(); - - let mut signatures = Vec::with_capacity( - self.signed_mutable_accounts - .len() - .saturating_add(self.signed_readonly_accounts.len()) - .saturating_add(1), - ); - let mut positions: HashMap<(Pubkey, AccountType), usize> = HashMap::new(); - - account_keys.push(fee_payer.0); - signatures.push(fee_payer.1); - - let mut positions_lambda = |key: &Pubkey, ty: AccountType| { - positions.insert((*key, ty), account_keys.len()); - account_keys.push(*key); - }; - - self.signed_mutable_accounts - .iter() - .for_each(|(key, signature)| { - positions_lambda(key, AccountType::SignerWritable); - signatures.push(*signature); - }); - self.signed_readonly_accounts - .iter() - .for_each(|(key, signature)| { - positions_lambda(key, AccountType::SignerReadonly); - signatures.push(*signature); - }); - self.unsigned_mutable_accounts - .iter() - .for_each(|key| positions_lambda(key, AccountType::Writable)); - self.unsigned_readonly_accounts - .iter() - .for_each(|key| positions_lambda(key, AccountType::Readonly)); - - let instructions = self.clean_up(); - - for item in instructions { - let accounts = item - .accounts - .iter() - .map(|key| positions[key] as u8) - .collect::>(); - let instruction = CompiledInstruction { - program_id_index: push_and_return_index(item.program_id, &mut account_keys), - accounts, - data: item.data, - }; - - compiled_instructions.push(instruction); - } - - let message = if v0_message { - let message = v0::Message { - header, - account_keys, - recent_blockhash: block_hash, - instructions: compiled_instructions, - address_table_lookups: vec![], - }; - - VersionedMessage::V0(message) - } else { - let message = Message { - header, - account_keys, - recent_blockhash: block_hash, - instructions: compiled_instructions, - }; - - VersionedMessage::Legacy(message) - }; - - let transaction = VersionedTransaction { - signatures, - message, - }; - - let sanitized_versioned_transaction = - SanitizedVersionedTransaction::try_new(transaction).unwrap(); - - let loader = MockLoader {}; - - let reserved_active = &ReservedAccountKeys::new_all_activated().active; - let all_inactive = HashSet::new(); - SanitizedTransaction::try_new( - sanitized_versioned_transaction, - Hash::new_unique(), - false, - loader, - if ignore_reserved_accounts { - &all_inactive - } else { - reserved_active - }, - ) - } - - fn clean_up(&mut self) -> Vec { - let instructions = std::mem::take(&mut self.instructions); - self.num_required_signatures = 0; - self.num_readonly_signed_accounts = 0; - self.num_readonly_unsigned_accounts = 0; - self.signed_mutable_accounts.clear(); - self.signed_readonly_accounts.clear(); - self.unsigned_mutable_accounts.clear(); - self.unsigned_readonly_accounts.clear(); - - instructions - } -} - -fn push_and_return_index(value: Pubkey, vector: &mut Vec) -> u8 { - vector.push(value); - vector.len().saturating_sub(1) as u8 -} diff --git a/syscalls/Cargo.toml b/syscalls/Cargo.toml index d870434d930b1a..55758f2e632555 100644 --- a/syscalls/Cargo.toml +++ b/syscalls/Cargo.toml @@ -16,9 +16,9 @@ targets = ["x86_64-unknown-linux-gnu"] default = ["metrics"] metrics = ["solana-program-runtime/metrics"] shuttle-test = [ - "solana-type-overrides/shuttle-test", "solana-program-runtime/shuttle-test", "solana-sbpf/shuttle-test", + "solana-svm-type-overrides/shuttle-test", ] svm-internal = [] @@ -29,33 +29,34 @@ num-traits = { workspace = true } solana-account = { workspace = true } solana-account-info = { workspace = true } solana-big-mod-exp = { workspace = true } -solana-blake3-hasher = { workspace = true } +solana-blake3-hasher = { workspace = true, features = ["blake3"] } solana-bn254 = { workspace = true } solana-clock = { workspace = true } solana-cpi = { workspace = true } solana-curve25519 = { workspace = true } solana-hash = { workspace = true } solana-instruction = { workspace = true } -solana-keccak-hasher = { workspace = true } +solana-keccak-hasher = { workspace = true, features = ["sha3"] } solana-loader-v3-interface = { workspace = true, features = ["serde"] } -solana-log-collector = { workspace = true } -solana-measure = { workspace = true } solana-poseidon = { workspace = true } solana-program-entrypoint = { workspace = true } solana-program-runtime = { workspace = true } solana-pubkey = { workspace = true } -solana-sbpf = { workspace = true } +solana-sbpf = { workspace = true, features = ["jit"] } solana-sdk-ids = { workspace = true } solana-secp256k1-recover = { workspace = true } solana-sha256-hasher = { workspace = true } solana-stable-layout = { workspace = true } +solana-stake-interface = { workspace = true } solana-svm-callback = { workspace = true } solana-svm-feature-set = { workspace = true } +solana-svm-log-collector = { workspace = true } +solana-svm-measure = { workspace = true } +solana-svm-timings = { workspace = true } +solana-svm-type-overrides = { workspace = true } solana-sysvar = { workspace = true } solana-sysvar-id = { workspace = true } -solana-timings = { workspace = true } solana-transaction-context = { workspace = true, features = ["bincode"] } -solana-type-overrides = { workspace = true } thiserror = { workspace = true } [dev-dependencies] @@ -70,6 +71,7 @@ solana-pubkey = { workspace = true, features = ["rand"] } solana-rent = { workspace = true } solana-slot-hashes = { workspace = true } solana-transaction-context = { workspace = true, features = ["dev-context-only-utils"] } +static_assertions = { workspace = true } test-case = { workspace = true } [lints] diff --git a/syscalls/src/cpi.rs b/syscalls/src/cpi.rs index eb7836940483af..b31ae7f0400779 100644 --- a/syscalls/src/cpi.rs +++ b/syscalls/src/cpi.rs @@ -1,363 +1,13 @@ use { super::*, - crate::{translate_inner, translate_slice_inner, translate_type_inner}, solana_instruction::Instruction, - solana_loader_v3_interface::instruction as bpf_loader_upgradeable, - solana_measure::measure::Measure, - solana_program_runtime::{ - invoke_context::SerializedAccountMetadata, - serialization::{create_memory_region_of_account, modify_memory_region_of_account}, + solana_program_runtime::cpi::{ + cpi_common, translate_accounts_c, translate_accounts_rust, translate_instruction_c, + translate_instruction_rust, translate_signers_c, translate_signers_rust, + SyscallInvokeSigned, TranslatedAccount, }, - solana_sbpf::ebpf, - solana_stable_layout::stable_instruction::StableInstruction, - solana_transaction_context::BorrowedAccount, - std::mem, }; -const MAX_CPI_INSTRUCTION_DATA_LEN: u64 = 10 * 1024; -const MAX_CPI_INSTRUCTION_ACCOUNTS: u8 = u8::MAX; -const MAX_CPI_ACCOUNT_INFOS: usize = 128; - -fn check_account_info_pointer( - invoke_context: &InvokeContext, - vm_addr: u64, - expected_vm_addr: u64, - field: &str, -) -> Result<(), Error> { - if vm_addr != expected_vm_addr { - ic_msg!( - invoke_context, - "Invalid account info pointer `{}': {:#x} != {:#x}", - field, - vm_addr, - expected_vm_addr - ); - return Err(SyscallError::InvalidPointer.into()); - } - Ok(()) -} - -// This version is missing lifetime 'a of the return type in the parameter &MemoryMapping. -fn translate_type_mut<'a, T>( - memory_mapping: &MemoryMapping, - vm_addr: u64, - check_aligned: bool, -) -> Result<&'a mut T, Error> { - translate_type_inner!(memory_mapping, AccessType::Store, vm_addr, T, check_aligned) -} -// This version is missing the lifetime 'a of the return type in the parameter &MemoryMapping. -fn translate_slice_mut<'a, T>( - memory_mapping: &MemoryMapping, - vm_addr: u64, - len: u64, - check_aligned: bool, -) -> Result<&'a mut [T], Error> { - translate_slice_inner!( - memory_mapping, - AccessType::Store, - vm_addr, - len, - T, - check_aligned, - ) -} - -/// Host side representation of AccountInfo or SolAccountInfo passed to the CPI syscall. -/// -/// At the start of a CPI, this can be different from the data stored in the -/// corresponding BorrowedAccount, and needs to be synched. -struct CallerAccount<'a> { - lamports: &'a mut u64, - owner: &'a mut Pubkey, - // The original data length of the account at the start of the current - // instruction. We use this to determine wether an account was shrunk or - // grown before or after CPI, and to derive the vm address of the realloc - // region. - original_data_len: usize, - // This points to the data section for this account, as serialized and - // mapped inside the vm (see serialize_parameters() in - // BpfExecutor::execute). - // - // This is only set when account_data_direct_mapping is off. - serialized_data: &'a mut [u8], - // Given the corresponding input AccountInfo::data, vm_data_addr points to - // the pointer field and ref_to_len_in_vm points to the length field. - vm_data_addr: u64, - ref_to_len_in_vm: &'a mut u64, -} - -impl<'a> CallerAccount<'a> { - fn get_serialized_data( - memory_mapping: &MemoryMapping<'_>, - vm_addr: u64, - len: u64, - stricter_abi_and_runtime_constraints: bool, - account_data_direct_mapping: bool, - ) -> Result<&'a mut [u8], Error> { - if stricter_abi_and_runtime_constraints && account_data_direct_mapping { - Ok(&mut []) - } else if stricter_abi_and_runtime_constraints { - // Workaround the memory permissions (as these are from the PoV of being inside the VM) - let serialization_ptr = translate_slice_mut::( - memory_mapping, - solana_sbpf::ebpf::MM_INPUT_START, - 1, - false, // Don't care since it is byte aligned - )? - .as_mut_ptr(); - unsafe { - Ok(std::slice::from_raw_parts_mut( - serialization_ptr - .add(vm_addr.saturating_sub(solana_sbpf::ebpf::MM_INPUT_START) as usize), - len as usize, - )) - } - } else { - translate_slice_mut::( - memory_mapping, - vm_addr, - len, - false, // Don't care since it is byte aligned - ) - } - } - - // Create a CallerAccount given an AccountInfo. - fn from_account_info( - invoke_context: &InvokeContext, - memory_mapping: &MemoryMapping<'_>, - check_aligned: bool, - _vm_addr: u64, - account_info: &AccountInfo, - account_metadata: &SerializedAccountMetadata, - ) -> Result, Error> { - let stricter_abi_and_runtime_constraints = invoke_context - .get_feature_set() - .stricter_abi_and_runtime_constraints; - - if stricter_abi_and_runtime_constraints { - check_account_info_pointer( - invoke_context, - account_info.key as *const _ as u64, - account_metadata.vm_key_addr, - "key", - )?; - check_account_info_pointer( - invoke_context, - account_info.owner as *const _ as u64, - account_metadata.vm_owner_addr, - "owner", - )?; - } - - // account_info points to host memory. The addresses used internally are - // in vm space so they need to be translated. - let lamports = { - // Double translate lamports out of RefCell - let ptr = translate_type::( - memory_mapping, - account_info.lamports.as_ptr() as u64, - check_aligned, - )?; - if stricter_abi_and_runtime_constraints { - if account_info.lamports.as_ptr() as u64 >= ebpf::MM_INPUT_START { - return Err(SyscallError::InvalidPointer.into()); - } - - check_account_info_pointer( - invoke_context, - *ptr, - account_metadata.vm_lamports_addr, - "lamports", - )?; - } - translate_type_mut::(memory_mapping, *ptr, check_aligned)? - }; - - let owner = translate_type_mut::( - memory_mapping, - account_info.owner as *const _ as u64, - check_aligned, - )?; - - let (serialized_data, vm_data_addr, ref_to_len_in_vm) = { - if stricter_abi_and_runtime_constraints - && account_info.data.as_ptr() as u64 >= ebpf::MM_INPUT_START - { - return Err(SyscallError::InvalidPointer.into()); - } - - // Double translate data out of RefCell - let data = *translate_type::<&[u8]>( - memory_mapping, - account_info.data.as_ptr() as *const _ as u64, - check_aligned, - )?; - if stricter_abi_and_runtime_constraints { - check_account_info_pointer( - invoke_context, - data.as_ptr() as u64, - account_metadata.vm_data_addr, - "data", - )?; - } - - consume_compute_meter( - invoke_context, - (data.len() as u64) - .checked_div(invoke_context.get_execution_cost().cpi_bytes_per_unit) - .unwrap_or(u64::MAX), - )?; - - let vm_len_addr = (account_info.data.as_ptr() as *const u64 as u64) - .saturating_add(size_of::() as u64); - if stricter_abi_and_runtime_constraints { - // In the same vein as the other check_account_info_pointer() checks, we don't lock - // this pointer to a specific address but we don't want it to be inside accounts, or - // callees might be able to write to the pointed memory. - if vm_len_addr >= ebpf::MM_INPUT_START { - return Err(SyscallError::InvalidPointer.into()); - } - } - let ref_to_len_in_vm = translate_type_mut::(memory_mapping, vm_len_addr, false)?; - let vm_data_addr = data.as_ptr() as u64; - let serialized_data = CallerAccount::get_serialized_data( - memory_mapping, - vm_data_addr, - data.len() as u64, - stricter_abi_and_runtime_constraints, - invoke_context.account_data_direct_mapping, - )?; - (serialized_data, vm_data_addr, ref_to_len_in_vm) - }; - - Ok(CallerAccount { - lamports, - owner, - original_data_len: account_metadata.original_data_len, - serialized_data, - vm_data_addr, - ref_to_len_in_vm, - }) - } - - // Create a CallerAccount given a SolAccountInfo. - fn from_sol_account_info( - invoke_context: &InvokeContext, - memory_mapping: &MemoryMapping<'_>, - check_aligned: bool, - vm_addr: u64, - account_info: &SolAccountInfo, - account_metadata: &SerializedAccountMetadata, - ) -> Result, Error> { - let stricter_abi_and_runtime_constraints = invoke_context - .get_feature_set() - .stricter_abi_and_runtime_constraints; - - if stricter_abi_and_runtime_constraints { - check_account_info_pointer( - invoke_context, - account_info.key_addr, - account_metadata.vm_key_addr, - "key", - )?; - - check_account_info_pointer( - invoke_context, - account_info.owner_addr, - account_metadata.vm_owner_addr, - "owner", - )?; - - check_account_info_pointer( - invoke_context, - account_info.lamports_addr, - account_metadata.vm_lamports_addr, - "lamports", - )?; - - check_account_info_pointer( - invoke_context, - account_info.data_addr, - account_metadata.vm_data_addr, - "data", - )?; - } - - // account_info points to host memory. The addresses used internally are - // in vm space so they need to be translated. - let lamports = - translate_type_mut::(memory_mapping, account_info.lamports_addr, check_aligned)?; - let owner = - translate_type_mut::(memory_mapping, account_info.owner_addr, check_aligned)?; - - consume_compute_meter( - invoke_context, - account_info - .data_len - .checked_div(invoke_context.get_execution_cost().cpi_bytes_per_unit) - .unwrap_or(u64::MAX), - )?; - - let serialized_data = CallerAccount::get_serialized_data( - memory_mapping, - account_info.data_addr, - account_info.data_len, - stricter_abi_and_runtime_constraints, - invoke_context.account_data_direct_mapping, - )?; - - // we already have the host addr we want: &mut account_info.data_len. - // The account info might be read only in the vm though, so we translate - // to ensure we can write. This is tested by programs/sbf/rust/ro_modify - // which puts SolAccountInfo in rodata. - let vm_len_addr = vm_addr - .saturating_add(&account_info.data_len as *const u64 as u64) - .saturating_sub(account_info as *const _ as *const u64 as u64); - let ref_to_len_in_vm = translate_type_mut::(memory_mapping, vm_len_addr, false)?; - - Ok(CallerAccount { - lamports, - owner, - original_data_len: account_metadata.original_data_len, - serialized_data, - vm_data_addr: account_info.data_addr, - ref_to_len_in_vm, - }) - } -} - -struct TranslatedAccount<'a> { - index_in_caller: IndexOfAccount, - caller_account: CallerAccount<'a>, - update_caller_account_region: bool, - update_caller_account_info: bool, -} - -/// Implemented by language specific data structure translators -trait SyscallInvokeSigned { - fn translate_instruction( - addr: u64, - memory_mapping: &MemoryMapping, - invoke_context: &mut InvokeContext, - check_aligned: bool, - ) -> Result; - fn translate_accounts<'a>( - account_infos_addr: u64, - account_infos_len: u64, - memory_mapping: &MemoryMapping<'_>, - invoke_context: &mut InvokeContext, - check_aligned: bool, - ) -> Result>, Error>; - fn translate_signers( - program_id: &Pubkey, - signers_seeds_addr: u64, - signers_seeds_len: u64, - memory_mapping: &MemoryMapping, - check_aligned: bool, - ) -> Result, Error>; -} - declare_builtin_function!( /// Cross-program invocation called from Rust SyscallInvokeSignedRust, @@ -389,52 +39,7 @@ impl SyscallInvokeSigned for SyscallInvokeSignedRust { invoke_context: &mut InvokeContext, check_aligned: bool, ) -> Result { - let ix = translate_type::(memory_mapping, addr, check_aligned)?; - let account_metas = translate_slice::( - memory_mapping, - ix.accounts.as_vaddr(), - ix.accounts.len(), - check_aligned, - )?; - let data = translate_slice::( - memory_mapping, - ix.data.as_vaddr(), - ix.data.len(), - check_aligned, - )? - .to_vec(); - - check_instruction_size(account_metas.len(), data.len(), invoke_context)?; - - if invoke_context.get_feature_set().loosen_cpi_size_restriction { - consume_compute_meter( - invoke_context, - (data.len() as u64) - .checked_div(invoke_context.get_execution_cost().cpi_bytes_per_unit) - .unwrap_or(u64::MAX), - )?; - } - - let mut accounts = Vec::with_capacity(account_metas.len()); - #[allow(clippy::needless_range_loop)] - for account_index in 0..account_metas.len() { - #[allow(clippy::indexing_slicing)] - let account_meta = &account_metas[account_index]; - if unsafe { - std::ptr::read_volatile(&account_meta.is_signer as *const _ as *const u8) > 1 - || std::ptr::read_volatile(&account_meta.is_writable as *const _ as *const u8) - > 1 - } { - return Err(Box::new(InstructionError::InvalidArgument)); - } - accounts.push(account_meta.clone()); - } - - Ok(Instruction { - accounts, - data, - program_id: ix.program_id, - }) + translate_instruction_rust(addr, memory_mapping, invoke_context, check_aligned) } fn translate_accounts<'a>( @@ -444,23 +49,12 @@ impl SyscallInvokeSigned for SyscallInvokeSignedRust { invoke_context: &mut InvokeContext, check_aligned: bool, ) -> Result>, Error> { - let (account_infos, account_info_keys) = translate_account_infos( + translate_accounts_rust( account_infos_addr, account_infos_len, - |account_info: &AccountInfo| account_info.key as *const _ as u64, memory_mapping, invoke_context, check_aligned, - )?; - - translate_and_update_accounts( - &account_info_keys, - account_infos, - account_infos_addr, - invoke_context, - memory_mapping, - check_aligned, - CallerAccount::from_account_info, ) } @@ -471,95 +65,16 @@ impl SyscallInvokeSigned for SyscallInvokeSignedRust { memory_mapping: &MemoryMapping, check_aligned: bool, ) -> Result, Error> { - let mut signers = Vec::new(); - if signers_seeds_len > 0 { - let signers_seeds = translate_slice::>>( - memory_mapping, - signers_seeds_addr, - signers_seeds_len, - check_aligned, - )?; - if signers_seeds.len() > MAX_SIGNERS { - return Err(Box::new(SyscallError::TooManySigners)); - } - for signer_seeds in signers_seeds.iter() { - let untranslated_seeds = translate_slice::>( - memory_mapping, - signer_seeds.ptr(), - signer_seeds.len(), - check_aligned, - )?; - if untranslated_seeds.len() > MAX_SEEDS { - return Err(Box::new(InstructionError::MaxSeedLengthExceeded)); - } - let seeds = untranslated_seeds - .iter() - .map(|untranslated_seed| { - untranslated_seed.translate(memory_mapping, check_aligned) - }) - .collect::, Error>>()?; - let signer = Pubkey::create_program_address(&seeds, program_id) - .map_err(SyscallError::BadSeeds)?; - signers.push(signer); - } - Ok(signers) - } else { - Ok(vec![]) - } + translate_signers_rust( + program_id, + signers_seeds_addr, + signers_seeds_len, + memory_mapping, + check_aligned, + ) } } -/// Rust representation of C's SolInstruction -#[derive(Debug)] -#[repr(C)] -struct SolInstruction { - program_id_addr: u64, - accounts_addr: u64, - accounts_len: u64, - data_addr: u64, - data_len: u64, -} - -/// Rust representation of C's SolAccountMeta -#[derive(Debug)] -#[repr(C)] -struct SolAccountMeta { - pubkey_addr: u64, - is_writable: bool, - is_signer: bool, -} - -/// Rust representation of C's SolAccountInfo -#[derive(Debug)] -#[repr(C)] -struct SolAccountInfo { - key_addr: u64, - lamports_addr: u64, - data_len: u64, - data_addr: u64, - owner_addr: u64, - rent_epoch: u64, - is_signer: bool, - is_writable: bool, - executable: bool, -} - -/// Rust representation of C's SolSignerSeed -#[derive(Debug)] -#[repr(C)] -struct SolSignerSeedC { - addr: u64, - len: u64, -} - -/// Rust representation of C's SolSignerSeeds -#[derive(Debug)] -#[repr(C)] -struct SolSignerSeedsC { - addr: u64, - len: u64, -} - declare_builtin_function!( /// Cross-program invocation called from C SyscallInvokeSignedC, @@ -591,57 +106,7 @@ impl SyscallInvokeSigned for SyscallInvokeSignedC { invoke_context: &mut InvokeContext, check_aligned: bool, ) -> Result { - let ix_c = translate_type::(memory_mapping, addr, check_aligned)?; - - let program_id = - translate_type::(memory_mapping, ix_c.program_id_addr, check_aligned)?; - let account_metas = translate_slice::( - memory_mapping, - ix_c.accounts_addr, - ix_c.accounts_len, - check_aligned, - )?; - let data = - translate_slice::(memory_mapping, ix_c.data_addr, ix_c.data_len, check_aligned)? - .to_vec(); - - check_instruction_size(ix_c.accounts_len as usize, data.len(), invoke_context)?; - - if invoke_context.get_feature_set().loosen_cpi_size_restriction { - consume_compute_meter( - invoke_context, - (data.len() as u64) - .checked_div(invoke_context.get_execution_cost().cpi_bytes_per_unit) - .unwrap_or(u64::MAX), - )?; - } - - let mut accounts = Vec::with_capacity(ix_c.accounts_len as usize); - #[allow(clippy::needless_range_loop)] - for account_index in 0..ix_c.accounts_len as usize { - #[allow(clippy::indexing_slicing)] - let account_meta = &account_metas[account_index]; - if unsafe { - std::ptr::read_volatile(&account_meta.is_signer as *const _ as *const u8) > 1 - || std::ptr::read_volatile(&account_meta.is_writable as *const _ as *const u8) - > 1 - } { - return Err(Box::new(InstructionError::InvalidArgument)); - } - let pubkey = - translate_type::(memory_mapping, account_meta.pubkey_addr, check_aligned)?; - accounts.push(AccountMeta { - pubkey: *pubkey, - is_signer: account_meta.is_signer, - is_writable: account_meta.is_writable, - }); - } - - Ok(Instruction { - accounts, - data, - program_id: *program_id, - }) + translate_instruction_c(addr, memory_mapping, invoke_context, check_aligned) } fn translate_accounts<'a>( @@ -651,23 +116,12 @@ impl SyscallInvokeSigned for SyscallInvokeSignedC { invoke_context: &mut InvokeContext, check_aligned: bool, ) -> Result>, Error> { - let (account_infos, account_info_keys) = translate_account_infos( + translate_accounts_c( account_infos_addr, account_infos_len, - |account_info: &SolAccountInfo| account_info.key_addr, memory_mapping, invoke_context, check_aligned, - )?; - - translate_and_update_accounts( - &account_info_keys, - account_infos, - account_infos_addr, - invoke_context, - memory_mapping, - check_aligned, - CallerAccount::from_sol_account_info, ) } @@ -678,1648 +132,12 @@ impl SyscallInvokeSigned for SyscallInvokeSignedC { memory_mapping: &MemoryMapping, check_aligned: bool, ) -> Result, Error> { - if signers_seeds_len > 0 { - let signers_seeds = translate_slice::( - memory_mapping, - signers_seeds_addr, - signers_seeds_len, - check_aligned, - )?; - if signers_seeds.len() > MAX_SIGNERS { - return Err(Box::new(SyscallError::TooManySigners)); - } - Ok(signers_seeds - .iter() - .map(|signer_seeds| { - let seeds = translate_slice::( - memory_mapping, - signer_seeds.addr, - signer_seeds.len, - check_aligned, - )?; - if seeds.len() > MAX_SEEDS { - return Err(Box::new(InstructionError::MaxSeedLengthExceeded) as Error); - } - let seeds_bytes = seeds - .iter() - .map(|seed| { - translate_slice::( - memory_mapping, - seed.addr, - seed.len, - check_aligned, - ) - }) - .collect::, Error>>()?; - Pubkey::create_program_address(&seeds_bytes, program_id) - .map_err(|err| Box::new(SyscallError::BadSeeds(err)) as Error) - }) - .collect::, Error>>()?) - } else { - Ok(vec![]) - } - } -} - -fn translate_account_infos<'a, T, F>( - account_infos_addr: u64, - account_infos_len: u64, - key_addr: F, - memory_mapping: &'a MemoryMapping, - invoke_context: &mut InvokeContext, - check_aligned: bool, -) -> Result<(&'a [T], Vec<&'a Pubkey>), Error> -where - F: Fn(&T) -> u64, -{ - let stricter_abi_and_runtime_constraints = invoke_context - .get_feature_set() - .stricter_abi_and_runtime_constraints; - - // In the same vein as the other check_account_info_pointer() checks, we don't lock - // this pointer to a specific address but we don't want it to be inside accounts, or - // callees might be able to write to the pointed memory. - if stricter_abi_and_runtime_constraints - && account_infos_addr - .saturating_add(account_infos_len.saturating_mul(std::mem::size_of::() as u64)) - >= ebpf::MM_INPUT_START - { - return Err(SyscallError::InvalidPointer.into()); - } - - let account_infos = translate_slice::( - memory_mapping, - account_infos_addr, - account_infos_len, - check_aligned, - )?; - check_account_infos(account_infos.len(), invoke_context)?; - let mut account_info_keys = Vec::with_capacity(account_infos_len as usize); - #[allow(clippy::needless_range_loop)] - for account_index in 0..account_infos_len as usize { - #[allow(clippy::indexing_slicing)] - let account_info = &account_infos[account_index]; - account_info_keys.push(translate_type::( - memory_mapping, - key_addr(account_info), - check_aligned, - )?); - } - Ok((account_infos, account_info_keys)) -} - -// Finish translating accounts, build CallerAccount values and update callee -// accounts in preparation of executing the callee. -fn translate_and_update_accounts<'a, T, F>( - account_info_keys: &[&Pubkey], - account_infos: &[T], - account_infos_addr: u64, - invoke_context: &mut InvokeContext, - memory_mapping: &MemoryMapping<'_>, - check_aligned: bool, - do_translate: F, -) -> Result>, Error> -where - F: Fn( - &InvokeContext, - &MemoryMapping<'_>, - bool, - u64, - &T, - &SerializedAccountMetadata, - ) -> Result, Error>, -{ - let transaction_context = &invoke_context.transaction_context; - let next_instruction_accounts = transaction_context - .get_next_instruction_context()? - .instruction_accounts(); - let instruction_context = transaction_context.get_current_instruction_context()?; - let mut accounts = Vec::with_capacity(next_instruction_accounts.len()); - - // unwrapping here is fine: we're in a syscall and the method below fails - // only outside syscalls - let accounts_metadata = &invoke_context - .get_syscall_context() - .unwrap() - .accounts_metadata; - - let stricter_abi_and_runtime_constraints = invoke_context - .get_feature_set() - .stricter_abi_and_runtime_constraints; - - for (instruction_account_index, instruction_account) in - next_instruction_accounts.iter().enumerate() - { - if instruction_account_index as IndexOfAccount != instruction_account.index_in_callee { - continue; // Skip duplicate account - } - - let index_in_caller = instruction_context - .get_index_of_account_in_instruction(instruction_account.index_in_transaction)?; - let callee_account = instruction_context - .try_borrow_instruction_account(transaction_context, index_in_caller)?; - let account_key = invoke_context - .transaction_context - .get_key_of_account_at_index(instruction_account.index_in_transaction)?; - - #[allow(deprecated)] - if callee_account.is_executable() { - // Use the known account - consume_compute_meter( - invoke_context, - (callee_account.get_data().len() as u64) - .checked_div(invoke_context.get_execution_cost().cpi_bytes_per_unit) - .unwrap_or(u64::MAX), - )?; - } else if let Some(caller_account_index) = - account_info_keys.iter().position(|key| *key == account_key) - { - let serialized_metadata = - accounts_metadata - .get(index_in_caller as usize) - .ok_or_else(|| { - ic_msg!( - invoke_context, - "Internal error: index mismatch for account {}", - account_key - ); - Box::new(InstructionError::MissingAccount) - })?; - - // build the CallerAccount corresponding to this account. - if caller_account_index >= account_infos.len() { - return Err(Box::new(SyscallError::InvalidLength)); - } - #[allow(clippy::indexing_slicing)] - let caller_account = - do_translate( - invoke_context, - memory_mapping, - check_aligned, - account_infos_addr.saturating_add( - caller_account_index.saturating_mul(mem::size_of::()) as u64, - ), - &account_infos[caller_account_index], - serialized_metadata, - )?; - - // before initiating CPI, the caller may have modified the - // account (caller_account). We need to update the corresponding - // BorrowedAccount (callee_account) so the callee can see the - // changes. - let update_caller = update_callee_account( - check_aligned, - &caller_account, - callee_account, - stricter_abi_and_runtime_constraints, - invoke_context.account_data_direct_mapping, - )?; - - accounts.push(TranslatedAccount { - index_in_caller, - caller_account, - update_caller_account_region: instruction_account.is_writable() || update_caller, - update_caller_account_info: instruction_account.is_writable(), - }); - } else { - ic_msg!( - invoke_context, - "Instruction references an unknown account {}", - account_key - ); - return Err(Box::new(InstructionError::MissingAccount)); - } - } - - Ok(accounts) -} - -fn check_instruction_size( - num_accounts: usize, - data_len: usize, - invoke_context: &mut InvokeContext, -) -> Result<(), Error> { - if invoke_context.get_feature_set().loosen_cpi_size_restriction { - let data_len = data_len as u64; - let max_data_len = MAX_CPI_INSTRUCTION_DATA_LEN; - if data_len > max_data_len { - return Err(Box::new(SyscallError::MaxInstructionDataLenExceeded { - data_len, - max_data_len, - })); - } - - let num_accounts = num_accounts as u64; - let max_accounts = MAX_CPI_INSTRUCTION_ACCOUNTS as u64; - if num_accounts > max_accounts { - return Err(Box::new(SyscallError::MaxInstructionAccountsExceeded { - num_accounts, - max_accounts, - })); - } - } else { - let max_size = invoke_context.get_compute_budget().max_cpi_instruction_size; - let size = num_accounts - .saturating_mul(size_of::()) - .saturating_add(data_len); - if size > max_size { - return Err(Box::new(SyscallError::InstructionTooLarge(size, max_size))); - } - } - Ok(()) -} - -fn check_account_infos( - num_account_infos: usize, - invoke_context: &mut InvokeContext, -) -> Result<(), Error> { - if invoke_context.get_feature_set().loosen_cpi_size_restriction { - let max_cpi_account_infos = if invoke_context - .get_feature_set() - .increase_tx_account_lock_limit - { - MAX_CPI_ACCOUNT_INFOS - } else { - 64 - }; - let num_account_infos = num_account_infos as u64; - let max_account_infos = max_cpi_account_infos as u64; - if num_account_infos > max_account_infos { - return Err(Box::new(SyscallError::MaxInstructionAccountInfosExceeded { - num_account_infos, - max_account_infos, - })); - } - } else { - let adjusted_len = num_account_infos.saturating_mul(size_of::()); - - if adjusted_len > invoke_context.get_compute_budget().max_cpi_instruction_size { - // Cap the number of account_infos a caller can pass to approximate - // maximum that accounts that could be passed in an instruction - return Err(Box::new(SyscallError::TooManyAccounts)); - }; - } - Ok(()) -} - -fn check_authorized_program( - program_id: &Pubkey, - instruction_data: &[u8], - invoke_context: &InvokeContext, -) -> Result<(), Error> { - if native_loader::check_id(program_id) - || bpf_loader::check_id(program_id) - || bpf_loader_deprecated::check_id(program_id) - || (solana_sdk_ids::bpf_loader_upgradeable::check_id(program_id) - && !(bpf_loader_upgradeable::is_upgrade_instruction(instruction_data) - || bpf_loader_upgradeable::is_set_authority_instruction(instruction_data) - || (invoke_context - .get_feature_set() - .enable_bpf_loader_set_authority_checked_ix - && bpf_loader_upgradeable::is_set_authority_checked_instruction( - instruction_data, - )) - || (invoke_context - .get_feature_set() - .enable_extend_program_checked - && bpf_loader_upgradeable::is_extend_program_checked_instruction( - instruction_data, - )) - || bpf_loader_upgradeable::is_close_instruction(instruction_data))) - || invoke_context.is_precompile(program_id) - { - return Err(Box::new(SyscallError::ProgramNotSupported(*program_id))); - } - Ok(()) -} - -/// Call process instruction, common to both Rust and C -fn cpi_common( - invoke_context: &mut InvokeContext, - instruction_addr: u64, - account_infos_addr: u64, - account_infos_len: u64, - signers_seeds_addr: u64, - signers_seeds_len: u64, - memory_mapping: &mut MemoryMapping, -) -> Result { - let check_aligned = invoke_context.get_check_aligned(); - - // CPI entry. - // - // Translate the inputs to the syscall and synchronize the caller's account - // changes so the callee can see them. - consume_compute_meter( - invoke_context, - invoke_context.get_execution_cost().invoke_units, - )?; - if let Some(execute_time) = invoke_context.execute_time.as_mut() { - execute_time.stop(); - invoke_context.timings.execute_us += execute_time.as_us(); - } - - let instruction = S::translate_instruction( - instruction_addr, - memory_mapping, - invoke_context, - check_aligned, - )?; - let transaction_context = &invoke_context.transaction_context; - let instruction_context = transaction_context.get_current_instruction_context()?; - let caller_program_id = instruction_context.get_last_program_key(transaction_context)?; - let signers = S::translate_signers( - caller_program_id, - signers_seeds_addr, - signers_seeds_len, - memory_mapping, - check_aligned, - )?; - check_authorized_program(&instruction.program_id, &instruction.data, invoke_context)?; - invoke_context.prepare_next_instruction(&instruction, &signers)?; - - let mut accounts = S::translate_accounts( - account_infos_addr, - account_infos_len, - memory_mapping, - invoke_context, - check_aligned, - )?; - - // Process the callee instruction - let mut compute_units_consumed = 0; - invoke_context - .process_instruction(&mut compute_units_consumed, &mut ExecuteTimings::default())?; - - // re-bind to please the borrow checker - let transaction_context = &invoke_context.transaction_context; - let instruction_context = transaction_context.get_current_instruction_context()?; - - // CPI exit. - // - // Synchronize the callee's account changes so the caller can see them. - let stricter_abi_and_runtime_constraints = invoke_context - .get_feature_set() - .stricter_abi_and_runtime_constraints; - - for translate_account in accounts.iter_mut() { - let mut callee_account = instruction_context.try_borrow_instruction_account( - transaction_context, - translate_account.index_in_caller, - )?; - if translate_account.update_caller_account_info { - update_caller_account( - invoke_context, - memory_mapping, - check_aligned, - &mut translate_account.caller_account, - &mut callee_account, - stricter_abi_and_runtime_constraints, - )?; - } - } - - if stricter_abi_and_runtime_constraints { - for translate_account in accounts.iter() { - let mut callee_account = instruction_context.try_borrow_instruction_account( - transaction_context, - translate_account.index_in_caller, - )?; - if translate_account.update_caller_account_region { - update_caller_account_region( - memory_mapping, - check_aligned, - &translate_account.caller_account, - &mut callee_account, - invoke_context.account_data_direct_mapping, - )?; - } - } - } - - invoke_context.execute_time = Some(Measure::start("execute")); - Ok(SUCCESS) -} - -// Update the given account before executing CPI. -// -// caller_account and callee_account describe the same account. At CPI entry -// caller_account might include changes the caller has made to the account -// before executing CPI. -// -// This method updates callee_account so the CPI callee can see the caller's -// changes. -// -// When true is returned, the caller account must be updated after CPI. This -// is only set for stricter_abi_and_runtime_constraints when the pointer may have changed. -fn update_callee_account( - check_aligned: bool, - caller_account: &CallerAccount, - mut callee_account: BorrowedAccount<'_>, - stricter_abi_and_runtime_constraints: bool, - account_data_direct_mapping: bool, -) -> Result { - let mut must_update_caller = false; - - if callee_account.get_lamports() != *caller_account.lamports { - callee_account.set_lamports(*caller_account.lamports)?; - } - - if stricter_abi_and_runtime_constraints { - let prev_len = callee_account.get_data().len(); - let post_len = *caller_account.ref_to_len_in_vm as usize; - if prev_len != post_len { - let is_caller_loader_deprecated = !check_aligned; - let address_space_reserved_for_account = if is_caller_loader_deprecated { - caller_account.original_data_len - } else { - caller_account - .original_data_len - .saturating_add(MAX_PERMITTED_DATA_INCREASE) - }; - if post_len > address_space_reserved_for_account { - return Err(InstructionError::InvalidRealloc.into()); - } - callee_account.set_data_length(post_len)?; - // pointer to data may have changed, so caller must be updated - must_update_caller = true; - } - if !account_data_direct_mapping && callee_account.can_data_be_changed().is_ok() { - callee_account.set_data_from_slice(caller_account.serialized_data)?; - } - } else { - // The redundant check helps to avoid the expensive data comparison if we can - match callee_account.can_data_be_resized(caller_account.serialized_data.len()) { - Ok(()) => callee_account.set_data_from_slice(caller_account.serialized_data)?, - Err(err) if callee_account.get_data() != caller_account.serialized_data => { - return Err(Box::new(err)); - } - _ => {} - } - } - - // Change the owner at the end so that we are allowed to change the lamports and data before - if callee_account.get_owner() != caller_account.owner { - callee_account.set_owner(caller_account.owner.as_ref())?; - // caller gave ownership and thus write access away, so caller must be updated - must_update_caller = true; - } - - Ok(must_update_caller) -} - -fn update_caller_account_region( - memory_mapping: &mut MemoryMapping, - check_aligned: bool, - caller_account: &CallerAccount, - callee_account: &mut BorrowedAccount<'_>, - account_data_direct_mapping: bool, -) -> Result<(), Error> { - let is_caller_loader_deprecated = !check_aligned; - let address_space_reserved_for_account = if is_caller_loader_deprecated { - caller_account.original_data_len - } else { - caller_account - .original_data_len - .saturating_add(MAX_PERMITTED_DATA_INCREASE) - }; - - if address_space_reserved_for_account > 0 { - // We can trust vm_data_addr to point to the correct region because we - // enforce that in CallerAccount::from_(sol_)account_info. - let (region_index, region) = memory_mapping - .find_region(caller_account.vm_data_addr) - .ok_or_else(|| Box::new(InstructionError::MissingAccount))?; - // vm_data_addr must always point to the beginning of the region - debug_assert_eq!(region.vm_addr, caller_account.vm_data_addr); - let mut new_region; - if !account_data_direct_mapping { - new_region = region.clone(); - modify_memory_region_of_account(callee_account, &mut new_region); - } else { - new_region = create_memory_region_of_account(callee_account, region.vm_addr)?; - } - memory_mapping.replace_region(region_index, new_region)?; - } - - Ok(()) -} - -// Update the given account after executing CPI. -// -// caller_account and callee_account describe to the same account. At CPI exit -// callee_account might include changes the callee has made to the account -// after executing. -// -// This method updates caller_account so the CPI caller can see the callee's -// changes. -// -// Safety: Once `stricter_abi_and_runtime_constraints` is enabled all fields of [CallerAccount] used -// in this function should never point inside the address space reserved for -// accounts (regardless of the current size of an account). -fn update_caller_account( - invoke_context: &InvokeContext, - memory_mapping: &MemoryMapping<'_>, - check_aligned: bool, - caller_account: &mut CallerAccount<'_>, - callee_account: &mut BorrowedAccount<'_>, - stricter_abi_and_runtime_constraints: bool, -) -> Result<(), Error> { - *caller_account.lamports = callee_account.get_lamports(); - *caller_account.owner = *callee_account.get_owner(); - - let prev_len = *caller_account.ref_to_len_in_vm as usize; - let post_len = callee_account.get_data().len(); - let is_caller_loader_deprecated = !check_aligned; - let address_space_reserved_for_account = - if stricter_abi_and_runtime_constraints && is_caller_loader_deprecated { - caller_account.original_data_len - } else { - caller_account - .original_data_len - .saturating_add(MAX_PERMITTED_DATA_INCREASE) - }; - - if post_len > address_space_reserved_for_account - && (stricter_abi_and_runtime_constraints || prev_len != post_len) - { - let max_increase = - address_space_reserved_for_account.saturating_sub(caller_account.original_data_len); - ic_msg!( - invoke_context, - "Account data size realloc limited to {max_increase} in inner instructions", - ); - return Err(Box::new(InstructionError::InvalidRealloc)); - } - - if prev_len != post_len { - // when stricter_abi_and_runtime_constraints is enabled we don't cache the serialized data in - // caller_account.serialized_data. See CallerAccount::from_account_info. - if !(stricter_abi_and_runtime_constraints && invoke_context.account_data_direct_mapping) { - // If the account has been shrunk, we're going to zero the unused memory - // *that was previously used*. - if post_len < prev_len { - caller_account - .serialized_data - .get_mut(post_len..) - .ok_or_else(|| Box::new(InstructionError::AccountDataTooSmall))? - .fill(0); - } - // Set the length of caller_account.serialized_data to post_len. - caller_account.serialized_data = CallerAccount::get_serialized_data( - memory_mapping, - caller_account.vm_data_addr, - post_len as u64, - stricter_abi_and_runtime_constraints, - invoke_context.account_data_direct_mapping, - )?; - } - // this is the len field in the AccountInfo::data slice - *caller_account.ref_to_len_in_vm = post_len as u64; - - // this is the len field in the serialized parameters - let serialized_len_ptr = translate_type_mut::( + translate_signers_c( + program_id, + signers_seeds_addr, + signers_seeds_len, memory_mapping, - caller_account - .vm_data_addr - .saturating_sub(std::mem::size_of::() as u64), check_aligned, - )?; - *serialized_len_ptr = post_len as u64; - } - - if !(stricter_abi_and_runtime_constraints && invoke_context.account_data_direct_mapping) { - // Propagate changes in the callee up to the caller. - let to_slice = &mut caller_account.serialized_data; - let from_slice = callee_account - .get_data() - .get(0..post_len) - .ok_or(SyscallError::InvalidLength)?; - if to_slice.len() != from_slice.len() { - return Err(Box::new(InstructionError::AccountDataTooSmall)); - } - to_slice.copy_from_slice(from_slice); - } - - Ok(()) -} - -#[allow(clippy::indexing_slicing)] -#[allow(clippy::arithmetic_side_effects)] -#[cfg(test)] -mod tests { - use { - super::*, - assert_matches::assert_matches, - solana_account::{Account, AccountSharedData, ReadableAccount}, - solana_clock::Epoch, - solana_instruction::Instruction, - solana_program_runtime::{ - invoke_context::{BpfAllocator, SerializedAccountMetadata, SyscallContext}, - with_mock_invoke_context_with_feature_set, - }, - solana_sbpf::{ - ebpf::MM_INPUT_START, memory_region::MemoryRegion, program::SBPFVersion, vm::Config, - }, - solana_sdk_ids::system_program, - solana_transaction_context::{InstructionAccount, TransactionAccount}, - std::{ - cell::{Cell, RefCell}, - mem, ptr, - rc::Rc, - slice, - }, - test_case::test_matrix, - }; - - macro_rules! mock_invoke_context { - ($invoke_context:ident, - $transaction_context:ident, - $instruction_data:expr, - $transaction_accounts:expr, - $program_accounts:expr, - $instruction_accounts:expr) => { - let instruction_data = $instruction_data; - let instruction_accounts = $instruction_accounts - .iter() - .enumerate() - .map(|(index_in_callee, index_in_transaction)| { - InstructionAccount::new( - *index_in_transaction as IndexOfAccount, - index_in_callee as IndexOfAccount, - false, - $transaction_accounts[*index_in_transaction as usize].2, - ) - }) - .collect::>(); - let transaction_accounts = $transaction_accounts - .into_iter() - .map(|a| (a.0, a.1)) - .collect::>(); - let mut feature_set = SVMFeatureSet::all_enabled(); - feature_set.stricter_abi_and_runtime_constraints = false; - let feature_set = &feature_set; - with_mock_invoke_context_with_feature_set!( - $invoke_context, - $transaction_context, - feature_set, - transaction_accounts - ); - $invoke_context - .transaction_context - .get_next_instruction_context_mut() - .unwrap() - .configure($program_accounts, instruction_accounts, instruction_data); - $invoke_context.push().unwrap(); - }; - } - - macro_rules! borrow_instruction_account { - ($invoke_context:expr, $index:expr) => {{ - let instruction_context = $invoke_context - .transaction_context - .get_current_instruction_context() - .unwrap(); - instruction_context - .try_borrow_instruction_account($invoke_context.transaction_context, $index) - .unwrap() - }}; - } - - #[test] - fn test_translate_instruction() { - let transaction_accounts = - transaction_with_one_writable_instruction_account(b"foo".to_vec()); - mock_invoke_context!( - invoke_context, - transaction_context, - b"instruction data", - transaction_accounts, - vec![0], - &[1] - ); - - let program_id = Pubkey::new_unique(); - let accounts = vec![AccountMeta { - pubkey: Pubkey::new_unique(), - is_signer: true, - is_writable: false, - }]; - let data = b"ins data".to_vec(); - let vm_addr = MM_INPUT_START; - let (_mem, region) = MockInstruction { - program_id, - accounts: accounts.clone(), - data: data.clone(), - } - .into_region(vm_addr); - - let config = Config { - aligned_memory_mapping: false, - ..Config::default() - }; - let memory_mapping = MemoryMapping::new(vec![region], &config, SBPFVersion::V3).unwrap(); - - let ins = SyscallInvokeSignedRust::translate_instruction( - vm_addr, - &memory_mapping, - &mut invoke_context, - true, // check_aligned - ) - .unwrap(); - assert_eq!(ins.program_id, program_id); - assert_eq!(ins.accounts, accounts); - assert_eq!(ins.data, data); - } - - #[test] - fn test_translate_signers() { - let transaction_accounts = - transaction_with_one_writable_instruction_account(b"foo".to_vec()); - mock_invoke_context!( - invoke_context, - transaction_context, - b"instruction data", - transaction_accounts, - vec![0], - &[1] - ); - - let program_id = Pubkey::new_unique(); - let (derived_key, bump_seed) = Pubkey::find_program_address(&[b"foo"], &program_id); - - let vm_addr = MM_INPUT_START; - let (_mem, region) = mock_signers(&[b"foo", &[bump_seed]], vm_addr); - - let config = Config { - aligned_memory_mapping: false, - ..Config::default() - }; - let memory_mapping = MemoryMapping::new(vec![region], &config, SBPFVersion::V3).unwrap(); - - let signers = SyscallInvokeSignedRust::translate_signers( - &program_id, - vm_addr, - 1, - &memory_mapping, - true, // check_aligned - ) - .unwrap(); - assert_eq!(signers[0], derived_key); - } - - #[test] - fn test_caller_account_from_account_info() { - let transaction_accounts = - transaction_with_one_writable_instruction_account(b"foo".to_vec()); - let account = transaction_accounts[1].1.clone(); - mock_invoke_context!( - invoke_context, - transaction_context, - b"instruction data", - transaction_accounts, - vec![0], - &[1] - ); - - let key = Pubkey::new_unique(); - let vm_addr = MM_INPUT_START; - let (_mem, region, account_metadata) = - MockAccountInfo::new(key, &account).into_region(vm_addr); - - let config = Config { - aligned_memory_mapping: false, - ..Config::default() - }; - let memory_mapping = MemoryMapping::new(vec![region], &config, SBPFVersion::V3).unwrap(); - - let account_info = translate_type::(&memory_mapping, vm_addr, false).unwrap(); - - let caller_account = CallerAccount::from_account_info( - &invoke_context, - &memory_mapping, - true, // check_aligned - vm_addr, - account_info, - &account_metadata, - ) - .unwrap(); - assert_eq!(*caller_account.lamports, account.lamports()); - assert_eq!(caller_account.owner, account.owner()); - assert_eq!(caller_account.original_data_len, account.data().len()); - assert_eq!( - *caller_account.ref_to_len_in_vm as usize, - account.data().len() - ); - assert_eq!(caller_account.serialized_data, account.data()); - } - - #[test_matrix([false, true])] - fn test_update_caller_account_lamports_owner(stricter_abi_and_runtime_constraints: bool) { - let transaction_accounts = transaction_with_one_writable_instruction_account(vec![]); - let account = transaction_accounts[1].1.clone(); - mock_invoke_context!( - invoke_context, - transaction_context, - b"instruction data", - transaction_accounts, - vec![0], - &[1] - ); - - let mut mock_caller_account = - MockCallerAccount::new(1234, *account.owner(), account.data(), false); - - let config = Config { - aligned_memory_mapping: false, - ..Config::default() - }; - let memory_mapping = MemoryMapping::new( - mock_caller_account.regions.split_off(0), - &config, - SBPFVersion::V3, - ) - .unwrap(); - - let mut caller_account = mock_caller_account.caller_account(); - - let mut callee_account = borrow_instruction_account!(invoke_context, 0); - - callee_account.set_lamports(42).unwrap(); - callee_account - .set_owner(Pubkey::new_unique().as_ref()) - .unwrap(); - - update_caller_account( - &invoke_context, - &memory_mapping, - true, // check_aligned - &mut caller_account, - &mut callee_account, - stricter_abi_and_runtime_constraints, - ) - .unwrap(); - - assert_eq!(*caller_account.lamports, 42); - assert_eq!(caller_account.owner, callee_account.get_owner()); - } - - #[test] - fn test_update_caller_account_data() { - let transaction_accounts = - transaction_with_one_writable_instruction_account(b"foobar".to_vec()); - let account = transaction_accounts[1].1.clone(); - let original_data_len = account.data().len(); - - mock_invoke_context!( - invoke_context, - transaction_context, - b"instruction data", - transaction_accounts, - vec![0], - &[1] - ); - - let mut mock_caller_account = - MockCallerAccount::new(account.lamports(), *account.owner(), account.data(), false); - - let config = Config { - aligned_memory_mapping: false, - ..Config::default() - }; - let memory_mapping = MemoryMapping::new( - mock_caller_account.regions.clone(), - &config, - SBPFVersion::V3, - ) - .unwrap(); - - let data_slice = mock_caller_account.data_slice(); - let len_ptr = unsafe { - data_slice - .as_ptr() - .offset(-(mem::size_of::() as isize)) - }; - let serialized_len = || unsafe { *len_ptr.cast::() as usize }; - let mut caller_account = mock_caller_account.caller_account(); - - let mut callee_account = borrow_instruction_account!(invoke_context, 0); - - for (new_value, expected_realloc_size) in [ - (b"foo".to_vec(), MAX_PERMITTED_DATA_INCREASE + 3), - (b"foobaz".to_vec(), MAX_PERMITTED_DATA_INCREASE), - (b"foobazbad".to_vec(), MAX_PERMITTED_DATA_INCREASE - 3), - ] { - assert_eq!(caller_account.serialized_data, callee_account.get_data()); - callee_account.set_data_from_slice(&new_value).unwrap(); - - update_caller_account( - &invoke_context, - &memory_mapping, - true, // check_aligned - &mut caller_account, - &mut callee_account, - false, - ) - .unwrap(); - - let data_len = callee_account.get_data().len(); - assert_eq!(data_len, *caller_account.ref_to_len_in_vm as usize); - assert_eq!(data_len, serialized_len()); - assert_eq!(data_len, caller_account.serialized_data.len()); - assert_eq!( - callee_account.get_data(), - &caller_account.serialized_data[..data_len] - ); - assert_eq!(data_slice[data_len..].len(), expected_realloc_size); - assert!(is_zeroed(&data_slice[data_len..])); - } - - callee_account - .set_data_length(original_data_len + MAX_PERMITTED_DATA_INCREASE) - .unwrap(); - update_caller_account( - &invoke_context, - &memory_mapping, - true, // check_aligned - &mut caller_account, - &mut callee_account, - false, - ) - .unwrap(); - let data_len = callee_account.get_data().len(); - assert_eq!(data_slice[data_len..].len(), 0); - assert!(is_zeroed(&data_slice[data_len..])); - - callee_account - .set_data_length(original_data_len + MAX_PERMITTED_DATA_INCREASE + 1) - .unwrap(); - assert_matches!( - update_caller_account( - &invoke_context, - &memory_mapping, - true, // check_aligned - &mut caller_account, - &mut callee_account, - false, - ), - Err(error) if error.downcast_ref::().unwrap() == &InstructionError::InvalidRealloc - ); - - // close the account - callee_account.set_data_length(0).unwrap(); - callee_account - .set_owner(system_program::id().as_ref()) - .unwrap(); - update_caller_account( - &invoke_context, - &memory_mapping, - true, // check_aligned - &mut caller_account, - &mut callee_account, - false, - ) - .unwrap(); - let data_len = callee_account.get_data().len(); - assert_eq!(data_len, 0); - } - - #[test_matrix([false, true])] - fn test_update_callee_account_lamports_owner(stricter_abi_and_runtime_constraints: bool) { - let transaction_accounts = transaction_with_one_writable_instruction_account(vec![]); - let account = transaction_accounts[1].1.clone(); - - mock_invoke_context!( - invoke_context, - transaction_context, - b"instruction data", - transaction_accounts, - vec![0], - &[1] - ); - - let mut mock_caller_account = - MockCallerAccount::new(1234, *account.owner(), account.data(), false); - - let caller_account = mock_caller_account.caller_account(); - - let callee_account = borrow_instruction_account!(invoke_context, 0); - - *caller_account.lamports = 42; - *caller_account.owner = Pubkey::new_unique(); - - update_callee_account( - true, // check_aligned - &caller_account, - callee_account, - stricter_abi_and_runtime_constraints, - true, // account_data_direct_mapping ) - .unwrap(); - - let callee_account = borrow_instruction_account!(invoke_context, 0); - assert_eq!(callee_account.get_lamports(), 42); - assert_eq!(caller_account.owner, callee_account.get_owner()); - } - - #[test_matrix([false, true])] - fn test_update_callee_account_data_writable(stricter_abi_and_runtime_constraints: bool) { - let transaction_accounts = - transaction_with_one_writable_instruction_account(b"foobar".to_vec()); - let account = transaction_accounts[1].1.clone(); - - mock_invoke_context!( - invoke_context, - transaction_context, - b"instruction data", - transaction_accounts, - vec![0], - &[1] - ); - - let mut mock_caller_account = - MockCallerAccount::new(1234, *account.owner(), account.data(), false); - - let mut caller_account = mock_caller_account.caller_account(); - let callee_account = borrow_instruction_account!(invoke_context, 0); - - // stricter_abi_and_runtime_constraints does not copy data in update_callee_account() - caller_account.serialized_data[0] = b'b'; - update_callee_account( - true, // check_aligned - &caller_account, - callee_account, - false, // stricter_abi_and_runtime_constraints - false, // account_data_direct_mapping - ) - .unwrap(); - let callee_account = borrow_instruction_account!(invoke_context, 0); - assert_eq!(callee_account.get_data(), b"boobar"); - - // growing resize - let mut data = b"foobarbaz".to_vec(); - *caller_account.ref_to_len_in_vm = data.len() as u64; - caller_account.serialized_data = &mut data; - assert_eq!( - update_callee_account( - true, // check_aligned - &caller_account, - callee_account, - stricter_abi_and_runtime_constraints, - true, // account_data_direct_mapping - ) - .unwrap(), - stricter_abi_and_runtime_constraints, - ); - - // truncating resize - let mut data = b"baz".to_vec(); - *caller_account.ref_to_len_in_vm = data.len() as u64; - caller_account.serialized_data = &mut data; - let callee_account = borrow_instruction_account!(invoke_context, 0); - assert_eq!( - update_callee_account( - true, // check_aligned - &caller_account, - callee_account, - stricter_abi_and_runtime_constraints, - true, // account_data_direct_mapping - ) - .unwrap(), - stricter_abi_and_runtime_constraints, - ); - - // close the account - let mut data = Vec::new(); - caller_account.serialized_data = &mut data; - *caller_account.ref_to_len_in_vm = 0; - let mut owner = system_program::id(); - caller_account.owner = &mut owner; - let callee_account = borrow_instruction_account!(invoke_context, 0); - update_callee_account( - true, // check_aligned - &caller_account, - callee_account, - stricter_abi_and_runtime_constraints, - true, // account_data_direct_mapping - ) - .unwrap(); - let callee_account = borrow_instruction_account!(invoke_context, 0); - assert_eq!(callee_account.get_data(), b""); - - // growing beyond address_space_reserved_for_account - *caller_account.ref_to_len_in_vm = (7 + MAX_PERMITTED_DATA_INCREASE) as u64; - let result = update_callee_account( - true, // check_aligned - &caller_account, - callee_account, - stricter_abi_and_runtime_constraints, - true, // account_data_direct_mapping - ); - if stricter_abi_and_runtime_constraints { - assert_matches!( - result, - Err(error) if error.downcast_ref::().unwrap() == &InstructionError::InvalidRealloc - ); - } else { - result.unwrap(); - } - } - - #[test_matrix([false, true])] - fn test_update_callee_account_data_readonly(stricter_abi_and_runtime_constraints: bool) { - let transaction_accounts = - transaction_with_one_readonly_instruction_account(b"foobar".to_vec()); - let account = transaction_accounts[1].1.clone(); - - mock_invoke_context!( - invoke_context, - transaction_context, - b"instruction data", - transaction_accounts, - vec![0], - &[1] - ); - - let mut mock_caller_account = - MockCallerAccount::new(1234, *account.owner(), account.data(), false); - let mut caller_account = mock_caller_account.caller_account(); - let callee_account = borrow_instruction_account!(invoke_context, 0); - - // stricter_abi_and_runtime_constraints does not copy data in update_callee_account() - caller_account.serialized_data[0] = b'b'; - assert_matches!( - update_callee_account( - true, // check_aligned - &caller_account, - callee_account, - false, // stricter_abi_and_runtime_constraints - false, // account_data_direct_mapping - ), - Err(error) if error.downcast_ref::().unwrap() == &InstructionError::ExternalAccountDataModified - ); - - // growing resize - let mut data = b"foobarbaz".to_vec(); - *caller_account.ref_to_len_in_vm = data.len() as u64; - caller_account.serialized_data = &mut data; - let callee_account = borrow_instruction_account!(invoke_context, 0); - assert_matches!( - update_callee_account( - true, // check_aligned - &caller_account, - callee_account, - stricter_abi_and_runtime_constraints, - true, // account_data_direct_mapping - ), - Err(error) if error.downcast_ref::().unwrap() == &InstructionError::AccountDataSizeChanged - ); - - // truncating resize - let mut data = b"baz".to_vec(); - *caller_account.ref_to_len_in_vm = data.len() as u64; - caller_account.serialized_data = &mut data; - let callee_account = borrow_instruction_account!(invoke_context, 0); - assert_matches!( - update_callee_account( - true, // check_aligned - &caller_account, - callee_account, - stricter_abi_and_runtime_constraints, - true, // account_data_direct_mapping - ), - Err(error) if error.downcast_ref::().unwrap() == &InstructionError::AccountDataSizeChanged - ); - } - - #[test] - fn test_translate_accounts_rust() { - let transaction_accounts = - transaction_with_one_writable_instruction_account(b"foobar".to_vec()); - let account = transaction_accounts[1].1.clone(); - let key = transaction_accounts[1].0; - let original_data_len = account.data().len(); - - let vm_addr = MM_INPUT_START; - let (_mem, region, account_metadata) = - MockAccountInfo::new(key, &account).into_region(vm_addr); - - let config = Config { - aligned_memory_mapping: false, - ..Config::default() - }; - let memory_mapping = MemoryMapping::new(vec![region], &config, SBPFVersion::V3).unwrap(); - - mock_invoke_context!( - invoke_context, - transaction_context, - b"instruction data", - transaction_accounts, - vec![0], - &[1, 1] - ); - - invoke_context - .set_syscall_context(SyscallContext { - allocator: BpfAllocator::new(solana_program_entrypoint::HEAP_LENGTH as u64), - accounts_metadata: vec![account_metadata], - trace_log: Vec::new(), - }) - .unwrap(); - - invoke_context - .transaction_context - .get_next_instruction_context_mut() - .unwrap() - .configure( - vec![0], - vec![ - InstructionAccount::new(1, 0, false, true), - InstructionAccount::new(1, 0, false, true), - ], - &[], - ); - let accounts = SyscallInvokeSignedRust::translate_accounts( - vm_addr, - 1, - &memory_mapping, - &mut invoke_context, - true, // check_aligned - ) - .unwrap(); - assert_eq!(accounts.len(), 1); - let caller_account = &accounts[0].caller_account; - assert_eq!(caller_account.serialized_data, account.data()); - assert_eq!(caller_account.original_data_len, original_data_len); - } - - type TestTransactionAccount = (Pubkey, AccountSharedData, bool); - struct MockCallerAccount { - lamports: u64, - owner: Pubkey, - vm_addr: u64, - data: Vec, - len: u64, - regions: Vec, - stricter_abi_and_runtime_constraints: bool, - } - - impl MockCallerAccount { - fn new( - lamports: u64, - owner: Pubkey, - data: &[u8], - stricter_abi_and_runtime_constraints: bool, - ) -> MockCallerAccount { - let vm_addr = MM_INPUT_START; - let mut region_addr = vm_addr; - let region_len = mem::size_of::() - + if stricter_abi_and_runtime_constraints { - 0 - } else { - data.len() + MAX_PERMITTED_DATA_INCREASE - }; - let mut d = vec![0; region_len]; - let mut regions = vec![]; - - // always write the [len] part even when stricter_abi_and_runtime_constraints - unsafe { ptr::write_unaligned::(d.as_mut_ptr().cast(), data.len() as u64) }; - - // write the account data when not stricter_abi_and_runtime_constraints - if !stricter_abi_and_runtime_constraints { - d[mem::size_of::()..][..data.len()].copy_from_slice(data); - } - - // create a region for [len][data+realloc if !stricter_abi_and_runtime_constraints] - regions.push(MemoryRegion::new_writable(&mut d[..region_len], vm_addr)); - region_addr += region_len as u64; - - if stricter_abi_and_runtime_constraints { - // create a region for the directly mapped data - regions.push(MemoryRegion::new_readonly(data, region_addr)); - region_addr += data.len() as u64; - - // create a region for the realloc padding - regions.push(MemoryRegion::new_writable( - &mut d[mem::size_of::()..], - region_addr, - )); - } else { - // caller_account.serialized_data must have the actual data length - d.truncate(mem::size_of::() + data.len()); - } - - MockCallerAccount { - lamports, - owner, - vm_addr, - data: d, - len: data.len() as u64, - regions, - stricter_abi_and_runtime_constraints, - } - } - - fn data_slice<'a>(&self) -> &'a [u8] { - // lifetime crimes - unsafe { - slice::from_raw_parts( - self.data[mem::size_of::()..].as_ptr(), - self.data.capacity() - mem::size_of::(), - ) - } - } - - fn caller_account(&mut self) -> CallerAccount { - let data = if self.stricter_abi_and_runtime_constraints { - &mut [] - } else { - &mut self.data[mem::size_of::()..] - }; - CallerAccount { - lamports: &mut self.lamports, - owner: &mut self.owner, - original_data_len: self.len as usize, - serialized_data: data, - vm_data_addr: self.vm_addr + mem::size_of::() as u64, - ref_to_len_in_vm: &mut self.len, - } - } - } - - fn transaction_with_one_writable_instruction_account( - data: Vec, - ) -> Vec { - let program_id = Pubkey::new_unique(); - let account = AccountSharedData::from(Account { - lamports: 1, - data, - owner: program_id, - executable: false, - rent_epoch: 100, - }); - vec![ - ( - program_id, - AccountSharedData::from(Account { - lamports: 0, - data: vec![], - owner: bpf_loader::id(), - executable: true, - rent_epoch: 0, - }), - false, - ), - (Pubkey::new_unique(), account, true), - ] - } - - fn transaction_with_one_readonly_instruction_account( - data: Vec, - ) -> Vec { - let program_id = Pubkey::new_unique(); - let account_owner = Pubkey::new_unique(); - let account = AccountSharedData::from(Account { - lamports: 1, - data, - owner: account_owner, - executable: false, - rent_epoch: 100, - }); - vec![ - ( - program_id, - AccountSharedData::from(Account { - lamports: 0, - data: vec![], - owner: bpf_loader::id(), - executable: true, - rent_epoch: 0, - }), - false, - ), - (Pubkey::new_unique(), account, true), - ] - } - - struct MockInstruction { - program_id: Pubkey, - accounts: Vec, - data: Vec, - } - - impl MockInstruction { - fn into_region(self, vm_addr: u64) -> (Vec, MemoryRegion) { - let accounts_len = mem::size_of::() * self.accounts.len(); - - let size = mem::size_of::() + accounts_len + self.data.len(); - - let mut data = vec![0; size]; - - let vm_addr = vm_addr as usize; - let accounts_addr = vm_addr + mem::size_of::(); - let data_addr = accounts_addr + accounts_len; - - let ins = Instruction { - program_id: self.program_id, - accounts: unsafe { - Vec::from_raw_parts( - accounts_addr as *mut _, - self.accounts.len(), - self.accounts.len(), - ) - }, - data: unsafe { - Vec::from_raw_parts(data_addr as *mut _, self.data.len(), self.data.len()) - }, - }; - let ins = StableInstruction::from(ins); - - unsafe { - ptr::write_unaligned(data.as_mut_ptr().cast(), ins); - data[accounts_addr - vm_addr..][..accounts_len].copy_from_slice( - slice::from_raw_parts(self.accounts.as_ptr().cast(), accounts_len), - ); - data[data_addr - vm_addr..].copy_from_slice(&self.data); - } - - let region = MemoryRegion::new_writable(data.as_mut_slice(), vm_addr as u64); - (data, region) - } - } - - fn mock_signers(signers: &[&[u8]], vm_addr: u64) -> (Vec, MemoryRegion) { - let vm_addr = vm_addr as usize; - - // calculate size - let fat_ptr_size_of_slice = mem::size_of::<&[()]>(); // pointer size + length size - let singers_length = signers.len(); - let sum_signers_data_length: usize = signers.iter().map(|s| s.len()).sum(); - - // init data vec - let total_size = fat_ptr_size_of_slice - + singers_length * fat_ptr_size_of_slice - + sum_signers_data_length; - let mut data = vec![0; total_size]; - - // data is composed by 3 parts - // A. - // [ singers address, singers length, ..., - // B. | - // signer1 address, signer1 length, signer2 address ..., - // ^ p1 ---> - // C. | - // signer1 data, signer2 data, ... ] - // ^ p2 ---> - - // A. - data[..fat_ptr_size_of_slice / 2] - .clone_from_slice(&(fat_ptr_size_of_slice + vm_addr).to_le_bytes()); - data[fat_ptr_size_of_slice / 2..fat_ptr_size_of_slice] - .clone_from_slice(&(singers_length).to_le_bytes()); - - // B. + C. - let (mut p1, mut p2) = ( - fat_ptr_size_of_slice, - fat_ptr_size_of_slice + singers_length * fat_ptr_size_of_slice, - ); - for signer in signers.iter() { - let signer_length = signer.len(); - - // B. - data[p1..p1 + fat_ptr_size_of_slice / 2] - .clone_from_slice(&(p2 + vm_addr).to_le_bytes()); - data[p1 + fat_ptr_size_of_slice / 2..p1 + fat_ptr_size_of_slice] - .clone_from_slice(&(signer_length).to_le_bytes()); - p1 += fat_ptr_size_of_slice; - - // C. - data[p2..p2 + signer_length].clone_from_slice(signer); - p2 += signer_length; - } - - let region = MemoryRegion::new_writable(data.as_mut_slice(), vm_addr as u64); - (data, region) - } - - struct MockAccountInfo<'a> { - key: Pubkey, - is_signer: bool, - is_writable: bool, - lamports: u64, - data: &'a [u8], - owner: Pubkey, - executable: bool, - rent_epoch: Epoch, - } - - impl MockAccountInfo<'_> { - fn new(key: Pubkey, account: &AccountSharedData) -> MockAccountInfo { - MockAccountInfo { - key, - is_signer: false, - is_writable: false, - lamports: account.lamports(), - data: account.data(), - owner: *account.owner(), - executable: account.executable(), - rent_epoch: account.rent_epoch(), - } - } - - fn into_region(self, vm_addr: u64) -> (Vec, MemoryRegion, SerializedAccountMetadata) { - let size = mem::size_of::() - + mem::size_of::() * 2 - + mem::size_of::>>() - + mem::size_of::() - + mem::size_of::>>() - + self.data.len(); - let mut data = vec![0; size]; - - let vm_addr = vm_addr as usize; - let key_addr = vm_addr + mem::size_of::(); - let lamports_cell_addr = key_addr + mem::size_of::(); - let lamports_addr = lamports_cell_addr + mem::size_of::>>(); - let owner_addr = lamports_addr + mem::size_of::(); - let data_cell_addr = owner_addr + mem::size_of::(); - let data_addr = data_cell_addr + mem::size_of::>>(); - - let info = AccountInfo { - key: unsafe { (key_addr as *const Pubkey).as_ref() }.unwrap(), - is_signer: self.is_signer, - is_writable: self.is_writable, - lamports: unsafe { - Rc::from_raw((lamports_cell_addr + RcBox::<&mut u64>::VALUE_OFFSET) as *const _) - }, - data: unsafe { - Rc::from_raw((data_cell_addr + RcBox::<&mut [u8]>::VALUE_OFFSET) as *const _) - }, - owner: unsafe { (owner_addr as *const Pubkey).as_ref() }.unwrap(), - executable: self.executable, - rent_epoch: self.rent_epoch, - }; - - unsafe { - ptr::write_unaligned(data.as_mut_ptr().cast(), info); - ptr::write_unaligned( - (data.as_mut_ptr() as usize + key_addr - vm_addr) as *mut _, - self.key, - ); - ptr::write_unaligned( - (data.as_mut_ptr() as usize + lamports_cell_addr - vm_addr) as *mut _, - RcBox::new(RefCell::new((lamports_addr as *mut u64).as_mut().unwrap())), - ); - ptr::write_unaligned( - (data.as_mut_ptr() as usize + lamports_addr - vm_addr) as *mut _, - self.lamports, - ); - ptr::write_unaligned( - (data.as_mut_ptr() as usize + owner_addr - vm_addr) as *mut _, - self.owner, - ); - ptr::write_unaligned( - (data.as_mut_ptr() as usize + data_cell_addr - vm_addr) as *mut _, - RcBox::new(RefCell::new(slice::from_raw_parts_mut( - data_addr as *mut u8, - self.data.len(), - ))), - ); - data[data_addr - vm_addr..].copy_from_slice(self.data); - } - - let region = MemoryRegion::new_writable(data.as_mut_slice(), vm_addr as u64); - ( - data, - region, - SerializedAccountMetadata { - original_data_len: self.data.len(), - vm_key_addr: key_addr as u64, - vm_lamports_addr: lamports_addr as u64, - vm_owner_addr: owner_addr as u64, - vm_data_addr: data_addr as u64, - }, - ) - } - } - - #[repr(C)] - struct RcBox { - strong: Cell, - weak: Cell, - value: T, - } - - impl RcBox { - const VALUE_OFFSET: usize = mem::size_of::>() * 2; - fn new(value: T) -> RcBox { - RcBox { - strong: Cell::new(0), - weak: Cell::new(0), - value, - } - } - } - - fn is_zeroed(data: &[u8]) -> bool { - data.iter().all(|b| *b == 0) } } diff --git a/syscalls/src/lib.rs b/syscalls/src/lib.rs index 91640df50477ec..9f223d9fdce945 100644 --- a/syscalls/src/lib.rs +++ b/syscalls/src/lib.rs @@ -10,29 +10,28 @@ pub use self::{ SyscallGetSysvar, }, }; +use solana_program_runtime::memory::translate_vm_slice; #[allow(deprecated)] use { crate::mem_ops::is_nonoverlapping, - solana_account_info::AccountInfo, solana_big_mod_exp::{big_mod_exp, BigModExpParams}, solana_blake3_hasher as blake3, solana_bn254::prelude::{ - alt_bn128_addition, alt_bn128_multiplication, alt_bn128_multiplication_128, - alt_bn128_pairing, AltBn128Error, ALT_BN128_ADDITION_OUTPUT_LEN, - ALT_BN128_MULTIPLICATION_OUTPUT_LEN, ALT_BN128_PAIRING_ELEMENT_LEN, - ALT_BN128_PAIRING_OUTPUT_LEN, + alt_bn128_addition, alt_bn128_multiplication, alt_bn128_pairing, + ALT_BN128_ADDITION_OUTPUT_LEN, ALT_BN128_MULTIPLICATION_OUTPUT_LEN, + ALT_BN128_PAIRING_ELEMENT_LEN, ALT_BN128_PAIRING_OUTPUT_LEN, }, solana_cpi::MAX_RETURN_DATA, solana_hash::Hash, solana_instruction::{error::InstructionError, AccountMeta, ProcessedSiblingInstruction}, - solana_keccak_hasher as keccak, - solana_log_collector::{ic_logger_msg, ic_msg}, - solana_poseidon as poseidon, - solana_program_entrypoint::{BPF_ALIGN_OF_U128, MAX_PERMITTED_DATA_INCREASE, SUCCESS}, + solana_keccak_hasher as keccak, solana_poseidon as poseidon, + solana_program_entrypoint::{BPF_ALIGN_OF_U128, SUCCESS}, solana_program_runtime::{ + cpi::CpiError, execution_budget::{SVMTransactionExecutionBudget, SVMTransactionExecutionCost}, invoke_context::InvokeContext, - stable_log, + memory::MemoryTranslationError, + stable_log, translate_inner, translate_slice_inner, translate_type_inner, }, solana_pubkey::{Pubkey, PubkeyError, MAX_SEEDS, MAX_SEED_LEN, PUBKEY_BYTES}, solana_sbpf::{ @@ -41,20 +40,17 @@ use { program::{BuiltinProgram, SBPFVersion}, vm::Config, }, - solana_sdk_ids::{bpf_loader, bpf_loader_deprecated, native_loader}, solana_secp256k1_recover::{ Secp256k1RecoverError, SECP256K1_PUBLIC_KEY_LENGTH, SECP256K1_SIGNATURE_LENGTH, }, solana_sha256_hasher::Hasher, solana_svm_feature_set::SVMFeatureSet, - solana_sysvar::Sysvar, - solana_sysvar_id::SysvarId, - solana_timings::ExecuteTimings, - solana_transaction_context::IndexOfAccount, - solana_type_overrides::sync::Arc, + solana_svm_log_collector::{ic_logger_msg, ic_msg}, + solana_svm_type_overrides::sync::Arc, + solana_sysvar::SysvarSerialize, + solana_transaction_context::vm_slice::VmSlice, std::{ alloc::Layout, - marker::PhantomData, mem::{align_of, size_of}, slice::from_raw_parts_mut, str::{from_utf8, Utf8Error}, @@ -67,9 +63,6 @@ mod logging; mod mem_ops; mod sysvar; -/// Maximum signers -const MAX_SIGNERS: usize = 16; - /// Error definitions #[derive(Debug, ThisError, PartialEq, Eq)] pub enum SyscallError { @@ -126,6 +119,48 @@ pub enum SyscallError { ArithmeticOverflow, } +impl From for SyscallError { + fn from(error: MemoryTranslationError) -> Self { + match error { + MemoryTranslationError::UnalignedPointer => SyscallError::UnalignedPointer, + MemoryTranslationError::InvalidLength => SyscallError::InvalidLength, + } + } +} + +impl From for SyscallError { + fn from(error: CpiError) -> Self { + match error { + CpiError::InvalidPointer => SyscallError::InvalidPointer, + CpiError::TooManySigners => SyscallError::TooManySigners, + CpiError::BadSeeds(e) => SyscallError::BadSeeds(e), + CpiError::InvalidLength => SyscallError::InvalidLength, + CpiError::MaxInstructionAccountsExceeded { + num_accounts, + max_accounts, + } => SyscallError::MaxInstructionAccountsExceeded { + num_accounts, + max_accounts, + }, + CpiError::MaxInstructionDataLenExceeded { + data_len, + max_data_len, + } => SyscallError::MaxInstructionDataLenExceeded { + data_len, + max_data_len, + }, + CpiError::MaxInstructionAccountInfosExceeded { + num_account_infos, + max_account_infos, + } => SyscallError::MaxInstructionAccountInfosExceeded { + num_account_infos, + max_account_infos, + }, + CpiError::ProgramNotSupported(pubkey) => SyscallError::ProgramNotSupported(pubkey), + } + } +} + type Error = Box; trait HasherImpl { @@ -225,60 +260,6 @@ impl HasherImpl for Keccak256Hasher { } } -// The VmSlice class is used for cases when you need a slice that is stored in the BPF -// interpreter's virtual address space. Because this source code can be compiled with -// addresses of different bit depths, we cannot assume that the 64-bit BPF interpreter's -// pointer sizes can be mapped to physical pointer sizes. In particular, if you need a -// slice-of-slices in the virtual space, the inner slices will be different sizes in a -// 32-bit app build than in the 64-bit virtual space. Therefore instead of a slice-of-slices, -// you should implement a slice-of-VmSlices, which can then use VmSlice::translate() to -// map to the physical address. -// This class must consist only of 16 bytes: a u64 ptr and a u64 len, to match the 64-bit -// implementation of a slice in Rust. The PhantomData entry takes up 0 bytes. - -#[repr(C)] -pub struct VmSlice { - ptr: u64, - len: u64, - resource_type: PhantomData, -} - -impl VmSlice { - pub fn new(ptr: u64, len: u64) -> Self { - VmSlice { - ptr, - len, - resource_type: PhantomData, - } - } - - pub fn ptr(&self) -> u64 { - self.ptr - } - pub fn len(&self) -> u64 { - self.len - } - - pub fn is_empty(&self) -> bool { - self.len == 0 - } - - /// Adjust the length of the vector. This is unchecked, and it assumes that the pointer - /// points to valid memory of the correct length after vm-translation. - pub fn resize(&mut self, len: u64) { - self.len = len; - } - - /// Returns a slice using a mapped physical address - pub fn translate<'a>( - &self, - memory_mapping: &'a MemoryMapping, - check_aligned: bool, - ) -> Result<&'a [T], Error> { - translate_slice::(memory_mapping, self.ptr, self.len, check_aligned) - } -} - fn consume_compute_meter(invoke_context: &InvokeContext, amount: u64) -> Result<(), Error> { invoke_context.consume_checked(amount)?; Ok(()) @@ -552,63 +533,6 @@ pub fn create_program_runtime_environment_v2<'a>( BuiltinProgram::new_loader(config) } -fn address_is_aligned(address: u64) -> bool { - (address as *mut T as usize) - .checked_rem(align_of::()) - .map(|rem| rem == 0) - .expect("T to be non-zero aligned") -} - -// Do not use this directly -#[macro_export] -macro_rules! translate_inner { - ($memory_mapping:expr, $map:ident, $access_type:expr, $vm_addr:expr, $len:expr $(,)?) => { - Result::::from( - $memory_mapping - .$map($access_type, $vm_addr, $len) - .map_err(|err| err.into()), - ) - }; -} -// Do not use this directly -#[macro_export] -macro_rules! translate_type_inner { - ($memory_mapping:expr, $access_type:expr, $vm_addr:expr, $T:ty, $check_aligned:expr $(,)?) => {{ - let host_addr = translate_inner!( - $memory_mapping, - map, - $access_type, - $vm_addr, - size_of::<$T>() as u64 - )?; - if !$check_aligned { - Ok(unsafe { std::mem::transmute::(host_addr) }) - } else if !address_is_aligned::<$T>(host_addr) { - Err(SyscallError::UnalignedPointer.into()) - } else { - Ok(unsafe { &mut *(host_addr as *mut $T) }) - } - }}; -} -// Do not use this directly -#[macro_export] -macro_rules! translate_slice_inner { - ($memory_mapping:expr, $access_type:expr, $vm_addr:expr, $len:expr, $T:ty, $check_aligned:expr $(,)?) => {{ - if $len == 0 { - return Ok(&mut []); - } - let total_size = $len.saturating_mul(size_of::<$T>() as u64); - if isize::try_from(total_size).is_err() { - return Err(SyscallError::InvalidLength.into()); - } - let host_addr = translate_inner!($memory_mapping, map, $access_type, $vm_addr, total_size)?; - if $check_aligned && !address_is_aligned::<$T>(host_addr) { - return Err(SyscallError::UnalignedPointer.into()); - } - Ok(unsafe { from_raw_parts_mut(host_addr as *mut $T, $len as usize) }) - }}; -} - fn translate_type<'a, T>( memory_mapping: &'a MemoryMapping, vm_addr: u64, @@ -860,7 +784,7 @@ fn translate_and_check_program_address_inputs<'a>( if untranslated_seed.len() > MAX_SEED_LEN as u64 { return Err(SyscallError::BadSeeds(PubkeyError::MaxSeedLengthExceeded).into()); } - untranslated_seed.translate(memory_mapping, check_aligned) + translate_vm_slice(untranslated_seed, memory_mapping, check_aligned) }) .collect::, Error>>()?; let program_id = translate_type::(memory_mapping, program_id_addr, check_aligned)?; @@ -1462,7 +1386,7 @@ declare_builtin_function!( let program_id = *transaction_context .get_current_instruction_context() .and_then(|instruction_context| { - instruction_context.get_last_program_key(transaction_context) + instruction_context.get_program_key() })?; transaction_context.set_return_data(program_id, return_data)?; @@ -1582,19 +1506,12 @@ declare_builtin_function!( let _ = result_header; *program_id = *instruction_context - .get_last_program_key(invoke_context.transaction_context)?; + .get_program_key()?; data.clone_from_slice(instruction_context.get_instruction_data()); let account_metas = (0..instruction_context.get_number_of_instruction_accounts()) .map(|instruction_account_index| { Ok(AccountMeta { - pubkey: *invoke_context - .transaction_context - .get_key_of_account_at_index( - instruction_context - .get_index_of_instruction_account_in_transaction( - instruction_account_index, - )?, - )?, + pubkey: *instruction_context.get_key_of_instruction_account(instruction_account_index)?, is_signer: instruction_context .is_instruction_account_signer(instruction_account_index)?, is_writable: instruction_context @@ -1694,42 +1611,14 @@ declare_builtin_function!( let calculation = match group_op { ALT_BN128_ADD => alt_bn128_addition, - ALT_BN128_MUL => { - let fix_alt_bn128_multiplication_input_length = invoke_context - .get_feature_set() - .fix_alt_bn128_multiplication_input_length; - if fix_alt_bn128_multiplication_input_length { - alt_bn128_multiplication - } else { - alt_bn128_multiplication_128 - } - } + ALT_BN128_MUL => alt_bn128_multiplication, ALT_BN128_PAIRING => alt_bn128_pairing, _ => { return Err(SyscallError::InvalidAttribute.into()); } }; - let simplify_alt_bn128_syscall_error_codes = invoke_context - .get_feature_set() - .simplify_alt_bn128_syscall_error_codes; - - let result_point = match calculation(input) { - Ok(result_point) => result_point, - Err(e) => { - return if simplify_alt_bn128_syscall_error_codes { - Ok(1) - } else { - Ok(e.into()) - }; - } - }; - - // This can never happen and should be removed when the - // simplify_alt_bn128_syscall_error_codes feature gets activated - if result_point.len() != output && !simplify_alt_bn128_syscall_error_codes { - return Ok(AltBn128Error::SliceOutOfBounds.into()); - } + let Ok(result_point) = calculation(input) else { return Ok(1) }; call_result.copy_from_slice(&result_point); Ok(SUCCESS) @@ -1858,7 +1747,9 @@ declare_builtin_function!( )?; let inputs = inputs .iter() - .map(|input| input.translate(memory_mapping, invoke_context.get_check_aligned())) + .map(|input| { + translate_vm_slice(input, memory_mapping, invoke_context.get_check_aligned()) + }) .collect::, Error>>()?; let simplify_alt_bn128_syscall_error_codes = invoke_context @@ -2065,7 +1956,7 @@ declare_builtin_function!( )?; for val in vals.iter() { - let bytes = val.translate(memory_mapping, invoke_context.get_check_aligned())?; + let bytes = translate_vm_slice(val, memory_mapping, invoke_context.get_check_aligned())?; let cost = compute_cost.mem_op_base_cost.max( hash_byte_cost.saturating_mul( val.len() @@ -2161,6 +2052,7 @@ mod tests { assert_matches::assert_matches, core::slice, solana_account::{create_account_shared_data_for_test, AccountSharedData}, + solana_account_info::AccountInfo, solana_clock::Clock, solana_epoch_rewards::EpochRewards, solana_epoch_schedule::EpochSchedule, @@ -2172,6 +2064,7 @@ mod tests { solana_program_runtime::{ execution_budget::MAX_HEAP_FRAME_BYTES, invoke_context::{BpfAllocator, InvokeContext, SyscallContext}, + memory::address_is_aligned, with_mock_invoke_context, }, solana_sbpf::{ @@ -2182,12 +2075,15 @@ mod tests { program::SBPFVersion, vm::Config, }, - solana_sdk_ids::{bpf_loader, bpf_loader_upgradeable, sysvar}, + solana_sdk_ids::{ + bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, native_loader, sysvar, + }, solana_sha256_hasher::hashv, solana_slot_hashes::{self as slot_hashes, SlotHashes}, solana_stable_layout::stable_instruction::StableInstruction, - solana_sysvar::stake_history::{self, StakeHistory, StakeHistoryEntry}, - solana_transaction_context::InstructionAccount, + solana_stake_interface::stake_history::{self, StakeHistory, StakeHistoryEntry}, + solana_sysvar_id::SysvarId, + solana_transaction_context::{IndexOfAccount, InstructionAccount}, std::{ hash::{DefaultHasher, Hash, Hasher}, mem, @@ -2221,9 +2117,8 @@ mod tests { with_mock_invoke_context!($invoke_context, transaction_context, transaction_accounts); $invoke_context .transaction_context - .get_next_instruction_context_mut() - .unwrap() - .configure(vec![0, 1], vec![], &[]); + .configure_next_instruction_for_tests(1, vec![], &[]) + .unwrap(); $invoke_context.push().unwrap(); }; } @@ -4437,26 +4332,28 @@ mod tests { while stack_height <= invoke_context .transaction_context - .get_instruction_context_stack_height() + .get_instruction_stack_height() { invoke_context.transaction_context.pop().unwrap(); } if stack_height > invoke_context .transaction_context - .get_instruction_context_stack_height() + .get_instruction_stack_height() { let instruction_accounts = vec![InstructionAccount::new( index_in_trace.saturating_add(1) as IndexOfAccount, - 0, false, false, )]; invoke_context .transaction_context - .get_next_instruction_context_mut() - .unwrap() - .configure(vec![0], instruction_accounts, &[index_in_trace as u8]); + .configure_next_instruction_for_tests( + 0, + instruction_accounts, + &[index_in_trace as u8], + ) + .unwrap(); invoke_context.transaction_context.push().unwrap(); } } diff --git a/syscalls/src/logging.rs b/syscalls/src/logging.rs index fc89cf5e321a8d..3f3b0ef784e44e 100644 --- a/syscalls/src/logging.rs +++ b/syscalls/src/logging.rs @@ -1,4 +1,6 @@ -use {super::*, solana_sbpf::vm::ContextObject}; +use { + super::*, solana_program_runtime::memory::translate_vm_slice, solana_sbpf::vm::ContextObject, +}; declare_builtin_function!( /// Log a user's info message @@ -143,7 +145,7 @@ declare_builtin_function!( let mut fields = Vec::with_capacity(untranslated_fields.len()); for untranslated_field in untranslated_fields { - fields.push(untranslated_field.translate(memory_mapping, invoke_context.get_check_aligned())?); + fields.push(translate_vm_slice(untranslated_field, memory_mapping, invoke_context.get_check_aligned())?); } let log_collector = invoke_context.get_log_collector(); diff --git a/syscalls/src/sysvar.rs b/syscalls/src/sysvar.rs index 13e58aea8fb494..17c03b9006f9dc 100644 --- a/syscalls/src/sysvar.rs +++ b/syscalls/src/sysvar.rs @@ -1,9 +1,9 @@ use { super::*, crate::translate_mut, - solana_program_runtime::execution_budget::SVMTransactionExecutionCost, + solana_program_runtime::execution_budget::SVMTransactionExecutionCost, solana_sbpf::ebpf, }; -fn get_sysvar( +fn get_sysvar( sysvar: Result, InstructionError>, var_addr: u64, check_aligned: bool, @@ -17,13 +17,21 @@ fn get_sysvar( .sysvar_base_cost .saturating_add(size_of::() as u64), )?; + + if var_addr >= ebpf::MM_INPUT_START + && invoke_context + .get_feature_set() + .stricter_abi_and_runtime_constraints + { + return Err(SyscallError::InvalidPointer.into()); + } translate_mut!( memory_mapping, check_aligned, let var: &mut T = map(var_addr)?; ); - // this clone looks unecessary now, but it exists to zero out trailing alignment bytes + // this clone looks unnecessary now, but it exists to zero out trailing alignment bytes // it is unclear whether this should ever matter // but there are tests using MemoryMapping that expect to see this // we preserve the previous behavior out of an abundance of caution @@ -203,6 +211,13 @@ declare_builtin_function!( .saturating_add(std::cmp::max(sysvar_buf_cost, mem_op_base_cost)), )?; + if var_addr >= ebpf::MM_INPUT_START + && invoke_context + .get_feature_set() + .stricter_abi_and_runtime_constraints + { + return Err(SyscallError::InvalidPointer.into()); + } // Abort: "Not all bytes in VM memory range `[var_addr, var_addr + length)` are writable." translate_mut!( memory_mapping, diff --git a/system-test/abi-testcases/.gitignore b/system-test/abi-testcases/.gitignore deleted file mode 100644 index 0dd0c9df34bfbc..00000000000000 --- a/system-test/abi-testcases/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -/baseline-run.sh -/config/ -/releases/ diff --git a/system-test/abi-testcases/mixed-validator-test.sh b/system-test/abi-testcases/mixed-validator-test.sh deleted file mode 100755 index c0400560dc519e..00000000000000 --- a/system-test/abi-testcases/mixed-validator-test.sh +++ /dev/null @@ -1,145 +0,0 @@ -#!/usr/bin/env bash -# -# Basic empirical ABI system test - can validators on all supported versions of -# Solana talk to each other? -# - -set -e -cd "$(dirname "$0")" -SOLANA_ROOT="$(cd ../..; pwd)" - -logDir="$PWD"/logs -ledgerDir="$PWD"/config -rm -rf "$ledgerDir" "$logDir" -mkdir -p "$logDir" - -baselineVersion=1.1.18 # <-- oldest version we remain compatible with -otherVersions=( - beta - edge -) - -solanaInstallDataDir=$PWD/releases -solanaInstallGlobalOpts=( - --data-dir "$solanaInstallDataDir" - --config "$solanaInstallDataDir"/config.yml - --no-modify-path -) - -# Install all the solana versions -bootstrapInstall() { - declare v=$1 - if [[ ! -h $solanaInstallDataDir/active_release ]]; then - sh "$SOLANA_ROOT"/install/agave-install-init.sh "$v" "${solanaInstallGlobalOpts[@]}" - fi - export PATH="$solanaInstallDataDir/active_release/bin/:$PATH" -} - -bootstrapInstall "$baselineVersion" -for v in "${otherVersions[@]}"; do - agave-install-init "${solanaInstallGlobalOpts[@]}" "$v" - solana -V -done - - -ORIGINAL_PATH=$PATH -solanaInstallUse() { - declare version=$1 - echo "--- Now using solana $version" - SOLANA_BIN="$solanaInstallDataDir/releases/$version/solana-release/bin" - export PATH="$SOLANA_BIN:$ORIGINAL_PATH" -} - -killSession() { - tmux kill-session -t abi || true -} - -export RUST_BACKTRACE=1 - -# Start up the bootstrap validator using the baseline version -solanaInstallUse "$baselineVersion" -echo "--- Starting $baselineVersion bootstrap validator" -trap 'killSession' INT TERM ERR EXIT -killSession -( - set -x - if [[ ! -x baseline-run.sh ]]; then - curl https://raw.githubusercontent.com/solana-labs/solana/v"$baselineVersion"/run.sh -o baseline-run.sh - chmod +x baseline-run.sh - fi - tmux new -s abi -d " \ - ./baseline-run.sh 2>&1 | tee $logDir/$baselineVersion.log \ - " - - SECONDS= - while [[ ! -f config/baseline-run/init-completed ]]; do - sleep 5 - if [[ $SECONDS -gt 60 ]]; then - echo "Error: validator failed to start" - exit 1 - fi - done - - solana --url http://127.0.0.1:8899 show-validators -) - -# Ensure all versions can see the bootstrap validator -for v in "${otherVersions[@]}"; do - solanaInstallUse "$v" - echo "--- Looking for bootstrap validator on gossip" - ( - set -x - "$SOLANA_BIN"/solana-gossip spy \ - --entrypoint 127.0.0.1:8001 \ - --num-nodes-exactly 1 \ - --timeout 30 - ) - echo Ok -done - -# Start a validator for each version and look for it -# -# Once https://github.com/solana-labs/solana/issues/7738 is resolved, remove -# `--no-snapshot-fetch` when starting the validators -# -nodeCount=1 -for v in "${otherVersions[@]}"; do - nodeCount=$((nodeCount + 1)) - solanaInstallUse "$v" - # start another validator - ledger="$ledgerDir"/ledger-"$v" - rm -rf "$ledger" - echo "--- Looking for $nodeCount validators on gossip" - ( - set -x - tmux new-window -t abi -n "$v" " \ - $SOLANA_BIN/agave-validator \ - --ledger $ledger \ - --no-snapshot-fetch \ - --entrypoint 127.0.0.1:8001 \ - -o - 2>&1 | tee $logDir/$v.log \ - " - "$SOLANA_BIN"/solana-gossip spy \ - --entrypoint 127.0.0.1:8001 \ - --num-nodes-exactly $nodeCount \ - --timeout 30 - - # Wait for it to make a snapshot root - SECONDS= - while [[ ! -d $ledger/snapshot ]]; do - sleep 5 - if [[ $SECONDS -gt 60 ]]; then - echo "Error: validator failed to create a snapshot" - exit 1 - fi - done - ) - echo Ok -done - -# Terminate all the validators -killSession - -echo -echo Pass -exit 0 diff --git a/system-test/abi-testcases/mixed-validator-test.yml b/system-test/abi-testcases/mixed-validator-test.yml deleted file mode 100644 index e9345ebd08dc57..00000000000000 --- a/system-test/abi-testcases/mixed-validator-test.yml +++ /dev/null @@ -1,5 +0,0 @@ -steps: - - command: "system-test/abi-testcases/mixed-validator-test.sh" - label: "Mixed Validator Test" - artifact_paths: - - "system-test/abi-testcases/logs/*" diff --git a/system-test/automation_utils.sh b/system-test/automation_utils.sh deleted file mode 100755 index 3211ab978e5fce..00000000000000 --- a/system-test/automation_utils.sh +++ /dev/null @@ -1,353 +0,0 @@ -#!/usr/bin/env bash - -# | source | this file -# shellcheck disable=SC1090 -# shellcheck disable=SC1091 -# shellcheck disable=SC2034 - -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -REPO_ROOT=${DIR}/.. - -source "${REPO_ROOT}"/ci/upload-ci-artifact.sh - -function execution_step { - # shellcheck disable=SC2124 - STEP="$@" - echo --- "${STEP[@]}" -} - -function collect_logs { - execution_step "Collect logs from remote nodes" - rm -rf "${REPO_ROOT}"/net/log - "${REPO_ROOT}"/net/net.sh logs - for logfile in "${REPO_ROOT}"/net/log/*; do - ( - upload-ci-artifact "$logfile" - ) - done -} - -function analyze_packet_loss { - ( - set -x - # shellcheck disable=SC1091 - source "${REPO_ROOT}"/net/config/config - mkdir -p iftop-logs - execution_step "Map private -> public IP addresses in iftop logs" - # shellcheck disable=SC2154 - for i in "${!validatorIpList[@]}"; do - # shellcheck disable=SC2154 - # shellcheck disable=SC2086 - # shellcheck disable=SC2027 - echo "{\"private\": \""${validatorIpListPrivate[$i]}""\", \"public\": \""${validatorIpList[$i]}""\"}," - done > ip_address_map.txt - - for ip in "${validatorIpList[@]}"; do - "${REPO_ROOT}"/net/scp.sh ip_address_map.txt solana@"$ip":~/solana/ - done - - execution_step "Remotely post-process iftop logs" - # shellcheck disable=SC2154 - for ip in "${validatorIpList[@]}"; do - iftop_log=iftop-logs/$ip-iftop.log - # shellcheck disable=SC2016 - "${REPO_ROOT}"/net/ssh.sh solana@"$ip" 'PATH=$PATH:~/.cargo/bin/ ~/solana/scripts/iftop-postprocess.sh ~/solana/iftop.log temp.log ~solana/solana/ip_address_map.txt' > "$iftop_log" - upload-ci-artifact "$iftop_log" - done - - execution_step "Analyzing Packet Loss" - "${REPO_ROOT}"/solana-release/bin/solana-log-analyzer analyze -f ./iftop-logs/ | sort -k 2 -g - ) -} - -function wait_for_max_stake { - max_stake="$1" - if [[ $max_stake -eq 100 ]]; then - return - fi - - source "${REPO_ROOT}"/net/common.sh - loadConfigFile - - # shellcheck disable=SC2154 - # shellcheck disable=SC2029 - ssh "${sshOptions[@]}" "${validatorIpList[0]}" "RUST_LOG=info \$HOME/.cargo/bin/solana wait-for-max-stake $max_stake --url http://127.0.0.1:8899" -} - -function wait_for_equal_stake { - source "${REPO_ROOT}"/net/common.sh - loadConfigFile - - max_stake=$((100 / ${#validatorIpList[@]} + 1)) - execution_step "Waiting for max stake to fall below ${max_stake}%" - - wait_for_max_stake $max_stake -} - -function get_slot { - source "${REPO_ROOT}"/net/common.sh - loadConfigFile - ssh "${sshOptions[@]}" "${validatorIpList[0]}" '$HOME/.cargo/bin/solana --url http://127.0.0.1:8899 slot' -} - -function get_bootstrap_validator_ip_address { - source "${REPO_ROOT}"/net/common.sh - loadConfigFile - echo "${validatorIpList[0]}" -} - -function get_active_stake { - source "${REPO_ROOT}"/net/common.sh - loadConfigFile - ssh "${sshOptions[@]}" "${validatorIpList[0]}" \ - '$HOME/.cargo/bin/solana --url http://127.0.0.1:8899 validators --output=json | grep -o "totalActiveStake\": [0-9]*" | cut -d: -f2' -} - -function get_current_stake { - source "${REPO_ROOT}"/net/common.sh - loadConfigFile - ssh "${sshOptions[@]}" "${validatorIpList[0]}" \ - '$HOME/.cargo/bin/solana --url http://127.0.0.1:8899 validators --output=json | grep -o "totalCurrentStake\": [0-9]*" | cut -d: -f2' -} - -function get_validator_confirmation_time { - SINCE=$1 - declare q_mean_confirmation=' - SELECT ROUND(MEAN("duration_ms")) as "mean_confirmation_ms" - FROM "'$TESTNET_TAG'"."autogen"."validator-confirmation" - WHERE time > now() - '"$SINCE"'s' - - mean_confirmation_ms=$( \ - curl -G "${INFLUX_HOST}/query?u=ro&p=topsecret" \ - --data-urlencode "db=${TESTNET_TAG}" \ - --data-urlencode "q=$q_mean_confirmation" | - python3 "${REPO_ROOT}"/system-test/testnet-automation-json-parser.py --empty_error | - cut -d' ' -f2) -} - -function collect_performance_statistics { - execution_step "Collect performance statistics about run" - # total_transactions will be 0 when the node is leader, so exclude those - declare q_mean_tps=' - SELECT ROUND(MEAN("median_sum")) as "mean_tps" FROM ( - SELECT MEDIAN(sum_total_transactions) AS "median_sum" FROM ( - SELECT SUM("total_transactions") AS "sum_total_transactions" - FROM "'$TESTNET_TAG'"."autogen"."replay-slot-stats" - WHERE time > now() - '"$TEST_DURATION_SECONDS"'s AND total_transactions > 0 - GROUP BY time(1s), host_id) - GROUP BY time(1s) - )' - - declare q_max_tps=' - SELECT MAX("median_sum") as "max_tps" FROM ( - SELECT MEDIAN(sum_total_transactions) AS "median_sum" FROM ( - SELECT SUM("total_transactions") AS "sum_total_transactions" - FROM "'$TESTNET_TAG'"."autogen"."replay-slot-stats" - WHERE time > now() - '"$TEST_DURATION_SECONDS"'s AND total_transactions > 0 - GROUP BY time(1s), host_id) - GROUP BY time(1s) - )' - - declare q_mean_confirmation=' - SELECT round(mean("duration_ms")) as "mean_confirmation_ms" - FROM "'$TESTNET_TAG'"."autogen"."validator-confirmation" - WHERE time > now() - '"$TEST_DURATION_SECONDS"'s' - - declare q_max_confirmation=' - SELECT round(max("duration_ms")) as "max_confirmation_ms" - FROM "'$TESTNET_TAG'"."autogen"."validator-confirmation" - WHERE time > now() - '"$TEST_DURATION_SECONDS"'s' - - declare q_99th_confirmation=' - SELECT round(percentile("duration_ms", 99)) as "99th_percentile_confirmation_ms" - FROM "'$TESTNET_TAG'"."autogen"."validator-confirmation" - WHERE time > now() - '"$TEST_DURATION_SECONDS"'s' - - declare q_max_tower_distance_observed=' - SELECT MAX("tower_distance") as "max_tower_distance" FROM ( - SELECT last("slot") - last("root") as "tower_distance" - FROM "'$TESTNET_TAG'"."autogen"."tower-observed" - WHERE time > now() - '"$TEST_DURATION_SECONDS"'s - GROUP BY time(1s), host_id)' - - declare q_last_tower_distance_observed=' - SELECT MEAN("tower_distance") as "last_tower_distance" FROM ( - SELECT last("slot") - last("root") as "tower_distance" - FROM "'$TESTNET_TAG'"."autogen"."tower-observed" - GROUP BY host_id)' - - curl -G "${INFLUX_HOST}/query?u=ro&p=topsecret" \ - --data-urlencode "db=${TESTNET_TAG}" \ - --data-urlencode "q=$q_mean_tps;$q_max_tps;$q_mean_confirmation;$q_max_confirmation;$q_99th_confirmation;$q_max_tower_distance_observed;$q_last_tower_distance_observed" | - python3 "${REPO_ROOT}"/system-test/testnet-automation-json-parser.py >>"$RESULT_FILE" - - declare q_dropped_vote_hash_count=' - SELECT sum("count") as "sum_dropped_vote_hash" - FROM "'$TESTNET_TAG'"."autogen"."dropped-vote-hash" - WHERE time > now() - '"$TEST_DURATION_SECONDS"'s' - - # store in variable to be returned - dropped_vote_hash_count=$( \ - curl -G "${INFLUX_HOST}/query?u=ro&p=topsecret" \ - --data-urlencode "db=${TESTNET_TAG}" \ - --data-urlencode "q=$q_dropped_vote_hash_count" | - python3 "${REPO_ROOT}"/system-test/testnet-automation-json-parser-missing.py) -} - -function upload_results_to_slack() { - echo --- Uploading results to Slack Performance Results App - - if [[ -z $SLACK_WEBHOOK_URL ]] ; then - echo "SLACK_WEBHOOOK_URL undefined" - exit 1 - fi - - [[ -n $BUILDKITE_MESSAGE ]] || BUILDKITE_MESSAGE="Message not defined" - - COMMIT=$(git rev-parse HEAD) - COMMIT_BUTTON_TEXT="$(echo "$COMMIT" | head -c 8)" - COMMIT_URL="https://github.com/solana-labs/solana/commit/${COMMIT}" - - if [[ -n $BUILDKITE_BUILD_URL ]] ; then - BUILD_BUTTON_TEXT="Build Kite Job" - else - BUILD_BUTTON_TEXT="Build URL not defined" - BUILDKITE_BUILD_URL="https://buildkite.com/solana-labs/" - fi - - GRAFANA_URL="https://internal-metrics.solana.com:3000/d/monitor-${CHANNEL:-edge}/cluster-telemetry-${CHANNEL:-edge}?var-testnet=${TESTNET_TAG:-testnet-automation}&from=${TESTNET_START_UNIX_MSECS:-0}&to=${TESTNET_FINISH_UNIX_MSECS:-0}" - - [[ -n $RESULT_DETAILS ]] || RESULT_DETAILS="Undefined" - [[ -n $TEST_CONFIGURATION ]] || TEST_CONFIGURATION="Undefined" - - payLoad="$(cat <) | [$BUILD_BUTTON_TEXT](<$BUILDKITE_BUILD_URL>) | [Grafana](<$GRAFANA_URL>)\n\ -Test Configuration:\n\ -\`\`\`$TEST_CONFIGURATION\`\`\`\n\ -Result Details:\n\ -\`\`\`$SANITIZED_RESULT\`\`\`\n\ -" -} -EOF -} - -function get_net_launch_software_version_launch_args() { - declare channel="${1?}" - declare artifact_basename="${2?}" - declare return_varname="${3:?}" - if [[ -n $channel ]]; then - eval "$return_varname=-t\ \$channel" - else - execution_step "Downloading tar from build artifacts (${artifact_basename})" - buildkite-agent artifact download "${artifact_basename}*.tar.bz2" . - eval "$return_varname=-T\ \${artifact_basename}*.tar.bz2" - fi -} diff --git a/system-test/deprecated-testcases/colo-cpu-only-perf.yml b/system-test/deprecated-testcases/colo-cpu-only-perf.yml deleted file mode 100755 index a79560ddf8e7e9..00000000000000 --- a/system-test/deprecated-testcases/colo-cpu-only-perf.yml +++ /dev/null @@ -1,17 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "COLO performance testnet CPU only" - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "colo" - TESTNET_TAG: "colo-perf-cpu-only" - ENABLE_GPU: "false" - TEST_DURATION_SECONDS: 600 - NUMBER_OF_VALIDATOR_NODES: 4 - NUMBER_OF_CLIENT_NODES: 2 - CLIENT_OPTIONS: "bench-tps=2=--tx_count 20000 --thread-batch-sleep-ms 250" - ADDITIONAL_FLAGS: "" - TEST_TYPE: "fixed_duration" - agents: - queue: "colo-deploy" diff --git a/system-test/deprecated-testcases/colo-gpu-perf-high-txcount.yml b/system-test/deprecated-testcases/colo-gpu-perf-high-txcount.yml deleted file mode 100755 index 1ff020f4ea7623..00000000000000 --- a/system-test/deprecated-testcases/colo-gpu-perf-high-txcount.yml +++ /dev/null @@ -1,17 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "COLO performance testnet GPU enabled - High Tx Count" - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "colo" - TESTNET_TAG: "colo-perf-gpu-enabled" - ENABLE_GPU: "true" - TEST_DURATION_SECONDS: 600 - NUMBER_OF_VALIDATOR_NODES: 4 - NUMBER_OF_CLIENT_NODES: 2 - CLIENT_OPTIONS: "bench-tps=2=--tx_count 30000 --thread-batch-sleep-ms 250" - ADDITIONAL_FLAGS: "" - TEST_TYPE: "fixed_duration" - agents: - queue: "colo-deploy" diff --git a/system-test/deprecated-testcases/colo-gpu-perf.yml b/system-test/deprecated-testcases/colo-gpu-perf.yml deleted file mode 100755 index b7ffc178962159..00000000000000 --- a/system-test/deprecated-testcases/colo-gpu-perf.yml +++ /dev/null @@ -1,17 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "COLO performance testnet GPU enabled" - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "colo" - TESTNET_TAG: "colo-perf-gpu-enabled" - ENABLE_GPU: "true" - TEST_DURATION_SECONDS: 600 - NUMBER_OF_VALIDATOR_NODES: 4 - NUMBER_OF_CLIENT_NODES: 2 - CLIENT_OPTIONS: "bench-tps=2=--tx_count 20000 --thread-batch-sleep-ms 250" - ADDITIONAL_FLAGS: "" - TEST_TYPE: "fixed_duration" - agents: - queue: "colo-deploy" diff --git a/system-test/deprecated-testcases/gce-gpu-perf-100-node.yml b/system-test/deprecated-testcases/gce-gpu-perf-100-node.yml deleted file mode 100755 index 6c3c22aef3e3d6..00000000000000 --- a/system-test/deprecated-testcases/gce-gpu-perf-100-node.yml +++ /dev/null @@ -1,21 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "GCE - GPU Enabled 100 Nodes" - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "gce" - TESTNET_TAG: "gce-perf-gpu-enabled" - TEST_DURATION_SECONDS: 600 - NUMBER_OF_VALIDATOR_NODES: 100 - ENABLE_GPU: "true" - VALIDATOR_NODE_MACHINE_TYPE: "--machine-type n2-standard-16 --accelerator count=2,type=nvidia-tesla-v100" - NUMBER_OF_CLIENT_NODES: 2 - CLIENT_OPTIONS: "bench-tps=2=--tx_count 15000 --thread-batch-sleep-ms 250" - TESTNET_ZONES: "us-west1-a,us-west1-b,us-central1-a,europe-west4-a" - ALLOW_BOOT_FAILURES: "true" - USE_PUBLIC_IP_ADDRESSES: "false" - ADDITIONAL_FLAGS: "--dedicated" - TEST_TYPE: "fixed_duration" - agents: - queue: "gce-deploy" diff --git a/system-test/genesis-test/cluster_token_count.sh b/system-test/genesis-test/cluster_token_count.sh deleted file mode 100755 index 0d0fd2507ee60c..00000000000000 --- a/system-test/genesis-test/cluster_token_count.sh +++ /dev/null @@ -1,134 +0,0 @@ -#!/usr/bin/env bash - -# shellcheck disable=SC1090 -# shellcheck disable=SC1091 -source "$(dirname "$0")"/get_program_accounts.sh - -usage() { - exitcode=0 - if [[ -n "$1" ]]; then - exitcode=1 - echo "Error: $*" - fi - cat <> "$csvfile" - fi -} - -function display_results_summary { - stake_account_balance_total=0 - num_stake_accounts=0 - { - read -r - while IFS=, read -r program account_pubkey lamports lockup_epoch; do - case $program in - SYSTEM) - system_account_balance=$lamports - ;; - STAKE) - stake_account_balance_total=$((stake_account_balance_total + lamports)) - num_stake_accounts=$((num_stake_accounts + 1)) - ;; - *) - echo "Unknown program: $program" - exit 1 - ;; - esac - done - } < "$results_file" - - stake_account_balance_total_sol="$(bc <<< "scale=3; $stake_account_balance_total/$LAMPORTS_PER_SOL")" - system_account_balance_sol="$(bc <<< "scale=3; $system_account_balance/$LAMPORTS_PER_SOL")" - - all_account_total_balance="$(bc <<< "scale=3; $system_account_balance+$stake_account_balance_total")" - all_account_total_balance_sol="$(bc <<< "scale=3; ($system_account_balance+$stake_account_balance_total)/$LAMPORTS_PER_SOL")" - - echo "--------------------------------------------------------------------------------------" - echo "Results written to: $results_file" - echo "--------------------------------------------------------------------------------------" - echo "Summary of accounts owned by $filter_pubkey" - echo "" - printf "Number of STAKE accounts: %'d\n" "$num_stake_accounts" - printf "Balance of all STAKE accounts: %'d lamports\n" "$stake_account_balance_total" - printf "Balance of all STAKE accounts: %'.3f SOL\n" "$stake_account_balance_total_sol" - printf "\n" - printf "Balance of SYSTEM account: %'d lamports\n" "$system_account_balance" - printf "Balance of SYSTEM account: %'.3f SOL\n" "$system_account_balance_sol" - printf "\n" - printf "Total Balance of ALL accounts: %'d lamports\n" "$all_account_total_balance" - printf "Total Balance of ALL accounts: %'.3f SOL\n" "$all_account_total_balance_sol" - echo "--------------------------------------------------------------------------------------" -} - -function display_results_details { - # shellcheck disable=SC2002 - cat "$results_file" | column -t -s, -} - -LAMPORTS_PER_SOL=1000000000 # 1 billion -all_stake_accounts_json_file=all_stake_accounts_data.json -all_stake_accounts_csv_file=all_stake_accounts_data.csv - -url=$1 -[[ -n $url ]] || usage "Missing required RPC URL" -shift -filter_pubkey=$1 -[[ -n $filter_pubkey ]] || usage "Missing required pubkey" -shift - -results_file=accounts_owned_by_${filter_pubkey}.csv -system_account_json_file=system_account_${filter_pubkey}.json - -echo "Program,Account_Pubkey,Lamports,Lockup_Epoch" > "$results_file" - -echo "Getting system account data" -get_account_info "$filter_pubkey" "$url" "$system_account_json_file" -# shellcheck disable=SC2002 -system_account_balance="$(cat "$system_account_json_file" | jq -r '(.result | .value | .lamports)')" -if [[ "$system_account_balance" == "null" ]]; then - echo "The provided pubkey is not found in the system program: $filter_pubkey" - exit 1 -fi -echo SYSTEM,"$filter_pubkey","$system_account_balance",N/A >> "$results_file" - -echo "Getting all stake program accounts" -get_program_accounts STAKE "$STAKE_PROGRAM_PUBKEY" "$url" "$all_stake_accounts_json_file" -write_program_account_data_csv STAKE "$all_stake_accounts_json_file" "$all_stake_accounts_csv_file" - -echo "Querying cluster at $url for stake accounts with authorized staker: $filter_pubkey" -last_tick=$SECONDS -{ -read -r -while IFS=, read -r account_pubkey lamports; do - parse_stake_account_data_to_file "$account_pubkey" "$filter_pubkey" "$results_file" & - sleep 0.01 - if [[ $((SECONDS - last_tick)) == 1 ]]; then - last_tick=$SECONDS - printf "." - fi -done -} < "$all_stake_accounts_csv_file" -wait -printf "\n" - -display_results_details -display_results_summary diff --git a/system-test/genesis-test/get_program_accounts.sh b/system-test/genesis-test/get_program_accounts.sh deleted file mode 100755 index 60262a55eab51c..00000000000000 --- a/system-test/genesis-test/get_program_accounts.sh +++ /dev/null @@ -1,57 +0,0 @@ -# | source | this file - -# shellcheck disable=SC2034 -# shellcheck disable=SC2086 - -STAKE_PROGRAM_PUBKEY=Stake11111111111111111111111111111111111111 -SYSTEM_PROGRAM_PUBKEY=11111111111111111111111111111111 -VOTE_PROGRAM_PUBKEY=Vote111111111111111111111111111111111111111 -CONFIG_PROGRAM_PUBKEY=Config1111111111111111111111111111111111111 - -function get_program_accounts { - PROGRAM_NAME="$1" - PROGRAM_PUBKEY="$2" - URL="$3" - - if [[ -n "$4" ]] ; then - JSON_OUTFILE="$4" - else - JSON_OUTFILE="${PROGRAM_NAME}_account_data.json" - fi - curl -s -X POST -H "Content-Type: application/json" -d \ - '{"jsonrpc":"2.0","id":1, "method":"getProgramAccounts", "params":["'$PROGRAM_PUBKEY'"]}' $URL | jq '.' \ - > $JSON_OUTFILE -} - -function write_program_account_data_csv { - PROGRAM_NAME="$1" - if [[ -n "$2" ]] ; then - JSON_INFILE="$2" - else - JSON_INFILE="${PROGRAM_NAME}_account_data.json" - fi - if [[ -n "$3" ]] ; then - CSV_OUTFILE="$3" - else - CSV_OUTFILE="${PROGRAM_NAME}_account_data.csv" - fi - - echo "Account_Pubkey,Lamports" > $CSV_OUTFILE - # shellcheck disable=SC2002 - cat "$JSON_INFILE" | jq -r '(.result | .[]) | [.pubkey, (.account | .lamports)] | @csv' \ - >> $CSV_OUTFILE -} - -function get_account_info { - ACCOUNT_PUBKEY="$1" - URL="$2" - - if [[ -n "$3" ]] ; then - JSON_OUTFILE="$3" - else - JSON_OUTFILE="${ACCOUNT_PUBKEY}_account_info.json" - fi - curl -s -X POST -H "Content-Type: application/json" -d \ - '{"jsonrpc":"2.0","id":1, "method":"getAccountInfo", "params":["'$ACCOUNT_PUBKEY'"]}' $URL | jq '.' \ - > $JSON_OUTFILE -} diff --git a/system-test/netem-configs/complete-loss-four-partitions b/system-test/netem-configs/complete-loss-four-partitions deleted file mode 100644 index dc3407fc7c76c9..00000000000000 --- a/system-test/netem-configs/complete-loss-four-partitions +++ /dev/null @@ -1,70 +0,0 @@ -{ - "partitions":[ - 25, - 25, - 25, - 25 - ], - "interconnects":[ - { - "a":0, - "b":1, - "config":"loss 100%" - }, - { - "a":1, - "b":0, - "config":"loss 100%" - }, - { - "a":0, - "b":2, - "config":"loss 100%" - }, - { - "a":2, - "b":0, - "config":"loss 100%" - }, - { - "a":0, - "b":3, - "config":"loss 100%" - }, - { - "a":3, - "b":0, - "config":"loss 100%" - }, - { - "a":1, - "b":2, - "config":"loss 100%" - }, - { - "a":2, - "b":1, - "config":"loss 100%" - }, - { - "a":1, - "b":3, - "config":"loss 100%" - }, - { - "a":3, - "b":1, - "config":"loss 100%" - }, - { - "a":2, - "b":3, - "config":"loss 100%" - }, - { - "a":3, - "b":2, - "config":"loss 100%" - } - ] -} \ No newline at end of file diff --git a/system-test/netem-configs/complete-loss-two-partitions b/system-test/netem-configs/complete-loss-two-partitions deleted file mode 100644 index 0302a8f42e2a90..00000000000000 --- a/system-test/netem-configs/complete-loss-two-partitions +++ /dev/null @@ -1,18 +0,0 @@ -{ - "partitions":[ - 50, - 50 - ], - "interconnects":[ - { - "a":0, - "b":1, - "config":"loss 100%" - }, - { - "a":1, - "b":0, - "config":"loss 100%" - } - ] -} \ No newline at end of file diff --git a/system-test/netem-configs/partial-loss-three-partitions b/system-test/netem-configs/partial-loss-three-partitions deleted file mode 100644 index 3774ca1f2ea1c1..00000000000000 --- a/system-test/netem-configs/partial-loss-three-partitions +++ /dev/null @@ -1,39 +0,0 @@ -{ - "partitions":[ - 34, - 33, - 33 - ], - "interconnects":[ - { - "a":0, - "b":1, - "config":"loss 15% delay 25ms" - }, - { - "a":1, - "b":0, - "config":"loss 15% delay 25ms" - }, - { - "a":0, - "b":2, - "config":"loss 10% delay 15ms" - }, - { - "a":2, - "b":0, - "config":"loss 10% delay 15ms" - }, - { - "a":2, - "b":1, - "config":"loss 5% delay 5ms" - }, - { - "a":1, - "b":2, - "config":"loss 5% delay 5ms" - } - ] -} \ No newline at end of file diff --git a/system-test/partition-testcases/colo-3-partition.yml b/system-test/partition-testcases/colo-3-partition.yml deleted file mode 100755 index 2255ebb432822d..00000000000000 --- a/system-test/partition-testcases/colo-3-partition.yml +++ /dev/null @@ -1,21 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "Colo - CPU Only - Partial Loss 3 Partitions" - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "colo" - TESTNET_TAG: "colo-perf-cpu-only" - NUMBER_OF_VALIDATOR_NODES: 2 - ENABLE_GPU: "false" - NUMBER_OF_CLIENT_NODES: 1 - CLIENT_OPTIONS: "bench-tps=2=--tx_count 5000 --thread-batch-sleep-ms 250" - ADDITIONAL_FLAGS: "" - APPLY_PARTITIONS: "true" - NETEM_CONFIG_FILE: "system-test/netem-configs/partial-loss-three-partitions" - PARTITION_ACTIVE_DURATION: 30 - PARTITION_INACTIVE_DURATION: 30 - PARTITION_ITERATION_COUNT: 5 - TEST_TYPE: "partition" - agents: - queue: "colo-deploy" diff --git a/system-test/partition-testcases/colo-partition-2-1-test.yml b/system-test/partition-testcases/colo-partition-2-1-test.yml deleted file mode 100644 index ae875d92a83cef..00000000000000 --- a/system-test/partition-testcases/colo-partition-2-1-test.yml +++ /dev/null @@ -1,21 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "Colo - CPU Only - Complete Loss 2 - 1 Partition" - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "colo" - TESTNET_TAG: "colo-perf-cpu-only" - NUMBER_OF_VALIDATOR_NODES: 2 - ENABLE_GPU: "false" - NUMBER_OF_CLIENT_NODES: 1 - CLIENT_OPTIONS: "bench-tps=1=--tx_count 5000 --thread-batch-sleep-ms 250" - ADDITIONAL_FLAGS: "" - APPLY_PARTITIONS: "true" - NETEM_CONFIG_FILE: "system-test/netem-configs/complete-loss-two-partitions" - PARTITION_ACTIVE_DURATION: 60 - PARTITION_INACTIVE_DURATION: 60 - PARTITION_ITERATION_COUNT: 10 - TEST_TYPE: "partition" - agents: - queue: "colo-deploy" diff --git a/system-test/partition-testcases/colo-partition-long-sanity-test.yml b/system-test/partition-testcases/colo-partition-long-sanity-test.yml deleted file mode 100755 index dbf82694c9845f..00000000000000 --- a/system-test/partition-testcases/colo-partition-long-sanity-test.yml +++ /dev/null @@ -1,21 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "Colo - CPU Only - Partial Loss 3 Partitions" - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "colo" - TESTNET_TAG: "colo-perf-cpu-only" - NUMBER_OF_VALIDATOR_NODES: 2 - ENABLE_GPU: "false" - NUMBER_OF_CLIENT_NODES: 1 - CLIENT_OPTIONS: "bench-tps=1=--tx_count 5000 --thread-batch-sleep-ms 250" - ADDITIONAL_FLAGS: "" - APPLY_PARTITIONS: "true" - NETEM_CONFIG_FILE: "system-test/netem-configs/partial-loss-three-partitions" - PARTITION_ACTIVE_DURATION: 60 - PARTITION_INACTIVE_DURATION: 60 - PARTITION_ITERATION_COUNT: 10 - TEST_TYPE: "partition" - agents: - queue: "colo-deploy" diff --git a/system-test/partition-testcases/colo-partition-no-superminority-test.yml b/system-test/partition-testcases/colo-partition-no-superminority-test.yml deleted file mode 100644 index a3ccb0ac7fd684..00000000000000 --- a/system-test/partition-testcases/colo-partition-no-superminority-test.yml +++ /dev/null @@ -1,22 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "Colo - CPU Only - Complete Loss 4 Partitions" - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "colo" - TESTNET_TAG: "colo-perf-cpu-only" - NUMBER_OF_VALIDATOR_NODES: 2 - ENABLE_GPU: "false" - NUMBER_OF_CLIENT_NODES: 1 - CLIENT_OPTIONS: "bench-tps=1=--tx_count 5000 --thread-batch-sleep-ms 250" - ADDITIONAL_FLAGS: "" - APPLY_PARTITIONS: "true" - NETEM_CONFIG_FILE: "system-test/netem-configs/complete-loss-four-partitions" - PARTITION_ACTIVE_DURATION: 60 - PARTITION_INACTIVE_DURATION: 60 - PARTITION_ITERATION_COUNT: 10 - BOOTSTRAP_VALIDATOR_MAX_STAKE_THRESHOLD: 33 - TEST_TYPE: "partition" - agents: - queue: "colo-deploy" diff --git a/system-test/partition-testcases/colo-partition-once-then-stabilize.yml b/system-test/partition-testcases/colo-partition-once-then-stabilize.yml deleted file mode 100755 index 0e187bcc8cc97e..00000000000000 --- a/system-test/partition-testcases/colo-partition-once-then-stabilize.yml +++ /dev/null @@ -1,21 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "Colo - CPU Only - 1 minute partition then 5 minute stabilization" - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "colo" - TESTNET_TAG: "colo-perf-cpu-only" - NUMBER_OF_VALIDATOR_NODES: 2 - ENABLE_GPU: "false" - NUMBER_OF_CLIENT_NODES: 1 - CLIENT_OPTIONS: "bench-tps=1=--tx_count 5000 --thread-batch-sleep-ms 250" - ADDITIONAL_FLAGS: "" - APPLY_PARTITIONS: "true" - NETEM_CONFIG_FILE: "system-test/netem-configs/partial-loss-three-partitions" - PARTITION_ACTIVE_DURATION: 60 - PARTITION_INACTIVE_DURATION: 300 - PARTITION_ITERATION_COUNT: 1 - TEST_TYPE: "partition" - agents: - queue: "colo-deploy" diff --git a/system-test/partition-testcases/gce-5-node-3-partition.yml b/system-test/partition-testcases/gce-5-node-3-partition.yml deleted file mode 100755 index 62dbc3a5f62d5d..00000000000000 --- a/system-test/partition-testcases/gce-5-node-3-partition.yml +++ /dev/null @@ -1,24 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "GCE - CPU Only 5 Node - Partial Loss 3 Partitions" - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "gce" - TESTNET_TAG: "gce-perf-cpu-only" - NUMBER_OF_VALIDATOR_NODES: 5 - ENABLE_GPU: "false" - VALIDATOR_NODE_MACHINE_TYPE: "--machine-type n2-standard-16" - NUMBER_OF_CLIENT_NODES: 1 - CLIENT_OPTIONS: "bench-tps=1=--tx_count 10000 --thread-batch-sleep-ms 250" - TESTNET_ZONES: "us-west1-a" - USE_PUBLIC_IP_ADDRESSES: "false" - ADDITIONAL_FLAGS: "--dedicated" - APPLY_PARTITIONS: "true" - NETEM_CONFIG_FILE: "system-test/netem-configs/partial-loss-three-partitions" - PARTITION_ACTIVE_DURATION: 30 - PARTITION_INACTIVE_DURATION: 30 - PARTITION_ITERATION_COUNT: 5 - TEST_TYPE: "partition" - agents: - queue: "gce-deploy" diff --git a/system-test/partition-testcases/gce-5-node-single-region-2-partitions.yml b/system-test/partition-testcases/gce-5-node-single-region-2-partitions.yml deleted file mode 100755 index 75bbf576d725fd..00000000000000 --- a/system-test/partition-testcases/gce-5-node-single-region-2-partitions.yml +++ /dev/null @@ -1,24 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "GCE - 2 even partitions with full loss" - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "gce" - TESTNET_TAG: "gce-perf-cpu-only" - NUMBER_OF_VALIDATOR_NODES: 5 - ENABLE_GPU: "false" - VALIDATOR_NODE_MACHINE_TYPE: "--machine-type n2-standard-16" - NUMBER_OF_CLIENT_NODES: 1 - CLIENT_OPTIONS: "bench-tps=1=--tx_count 5000 --thread-batch-sleep-ms 250" - TESTNET_ZONES: "us-west1-a" - USE_PUBLIC_IP_ADDRESSES: "false" - ADDITIONAL_FLAGS: "--dedicated" - APPLY_PARTITIONS: "true" - NETEM_CONFIG_FILE: "system-test/netem-configs/complete-loss-two-partitions" - PARTITION_ACTIVE_DURATION: 60 - PARTITION_INACTIVE_DURATION: 300 - PARTITION_ITERATION_COUNT: 5 - TEST_TYPE: "partition" - agents: - queue: "gce-deploy" diff --git a/system-test/partition-testcases/gce-partition-once-then-stabilize.yml b/system-test/partition-testcases/gce-partition-once-then-stabilize.yml deleted file mode 100755 index a0f197cbe3d3f1..00000000000000 --- a/system-test/partition-testcases/gce-partition-once-then-stabilize.yml +++ /dev/null @@ -1,24 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "GCE - CPU Only - 1 minute partition then 5 minute stabilization" - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "gce" - TESTNET_TAG: "gce-perf-cpu-only" - NUMBER_OF_VALIDATOR_NODES: 5 - ENABLE_GPU: "false" - VALIDATOR_NODE_MACHINE_TYPE: "--machine-type n2-standard-16" - NUMBER_OF_CLIENT_NODES: 1 - CLIENT_OPTIONS: "bench-tps=1=--tx_count 10000 --thread-batch-sleep-ms 250" - TESTNET_ZONES: "us-west1-a" - USE_PUBLIC_IP_ADDRESSES: "false" - ADDITIONAL_FLAGS: "--dedicated" - APPLY_PARTITIONS: "true" - NETEM_CONFIG_FILE: "system-test/netem-configs/partial-loss-three-partitions" - PARTITION_ACTIVE_DURATION: 60 - PARTITION_INACTIVE_DURATION: 300 - PARTITION_ITERATION_COUNT: 1 - TEST_TYPE: "partition" - agents: - queue: "gce-deploy" diff --git a/system-test/partition-testcases/gce-partition-recovery.yml b/system-test/partition-testcases/gce-partition-recovery.yml deleted file mode 100755 index a2b2b4fd94db2b..00000000000000 --- a/system-test/partition-testcases/gce-partition-recovery.yml +++ /dev/null @@ -1,23 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "Partition recovery on GCE" - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "gce" - ENABLE_GPU: "false" - NUMBER_OF_VALIDATOR_NODES: 9 - VALIDATOR_NODE_MACHINE_TYPE: "--machine-type n2-standard-16" - NUMBER_OF_CLIENT_NODES: 1 - ADDITIONAL_FLAGS: "--dedicated" - SKIP_PERF_RESULTS: "true" - EXTRA_PRIMORDIAL_STAKES: 4 - TEST_TYPE: "script" - WARMUP_SLOTS_BEFORE_TEST: 400 - PRE_PARTITION_DURATION: 120 - PARTITION_DURATION: 360 - PARTITION_INCREMENT: 60 - NETEM_CONFIG_FILE: "system-test/netem-configs/complete-loss-two-partitions" - CUSTOM_SCRIPT: "system-test/partition-testcases/measure-partition-recovery.sh" - agents: - queue: "gce-deploy" diff --git a/system-test/partition-testcases/gce-partition-with-offline.yml b/system-test/partition-testcases/gce-partition-with-offline.yml deleted file mode 100755 index 896d553d00eefd..00000000000000 --- a/system-test/partition-testcases/gce-partition-with-offline.yml +++ /dev/null @@ -1,28 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "GCE - CPU Only 5 Node - 20% network offline with 2 partitions" - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "gce" - TESTNET_TAG: "gce-perf-cpu-only" - NUMBER_OF_VALIDATOR_NODES: 4 - ENABLE_GPU: "false" - VALIDATOR_NODE_MACHINE_TYPE: "--machine-type n2-standard-16" - NUMBER_OF_CLIENT_NODES: 1 - CLIENT_OPTIONS: "bench-tps=1=--tx_count 10000 --thread-batch-sleep-ms 250" - TESTNET_ZONES: "us-west1-a" - USE_PUBLIC_IP_ADDRESSES: "false" - ADDITIONAL_FLAGS: "--dedicated" - APPLY_PARTITIONS: "true" - NETEM_CONFIG_FILE: "system-test/netem-configs/complete-loss-two-partitions" - PARTITION_ACTIVE_DURATION: 30 - PARTITION_INACTIVE_DURATION: 30 - PARTITION_ITERATION_COUNT: 5 - TEST_TYPE: "partition" - EXTRA_PRIMORDIAL_STAKES: 4 - WAIT_FOR_EQUAL_STAKE: "true" - WARMUP_SLOTS_BEFORE_TEST: 400 - NUMBER_OF_OFFLINE_NODES: 1 - agents: - queue: "gce-deploy" diff --git a/system-test/partition-testcases/measure-partition-recovery.sh b/system-test/partition-testcases/measure-partition-recovery.sh deleted file mode 100755 index 3c1df03bdef9c2..00000000000000 --- a/system-test/partition-testcases/measure-partition-recovery.sh +++ /dev/null @@ -1,81 +0,0 @@ -#!/usr/bin/env bash - -set -ex - -# shellcheck disable=SC1090 -# shellcheck disable=SC1091 -source "$(dirname "$0")"/../automation_utils.sh - -RESULT_FILE="$1" - -[[ -n $TESTNET_TAG ]] || TESTNET_TAG=${CLOUD_PROVIDER}-testnet-automation - -if [[ -z $NETEM_CONFIG_FILE ]]; then - echo "Error: For this test NETEM_CONFIG_FILE must be specified" - exit 1 -fi - -if [[ -z $PRE_PARTITION_DURATION ]]; then - PRE_PARTITION_DURATION=60 -fi - -if [[ -z $PARTITION_DURATION ]]; then - PARTITION_DURATION=300 -fi - -if [[ -z $PARTITION_INCREMENT ]]; then - PARTITION_INCREMENT=60 -fi - -num_online_nodes=$(( NUMBER_OF_VALIDATOR_NODES + 1 )) -if [[ -n "$NUMBER_OF_OFFLINE_NODES" ]]; then - num_online_nodes=$(( num_online_nodes - NUMBER_OF_OFFLINE_NODES )) -fi - -execution_step "Measuring validator confirmation time for $PRE_PARTITION_DURATION seconds" -sleep "$PRE_PARTITION_DURATION" -get_validator_confirmation_time "$PRE_PARTITION_DURATION" -# shellcheck disable=SC2154 -execution_step "Pre partition validator confirmation time is $mean_confirmation_ms ms" -echo "Pre partition validator confirmation time: $mean_confirmation_ms ms" >> "$RESULT_FILE" -target=$mean_confirmation_ms - -while true; do - execution_step "Applying partition config $NETEM_CONFIG_FILE for $PARTITION_DURATION seconds" - echo "Partitioning for $PARTITION_DURATION seconds" >> "$RESULT_FILE" - "${REPO_ROOT}"/net/net.sh netem --config-file "$NETEM_CONFIG_FILE" -n $num_online_nodes - sleep "$PARTITION_DURATION" - - execution_step "Resolving partition" - "${REPO_ROOT}"/net/net.sh netem --config-file "$NETEM_CONFIG_FILE" --netem-cmd cleanup -n $num_online_nodes - - get_validator_confirmation_time 10 - SECONDS=0 - - # This happens when we haven't confirmed anything recently so the query returns an empty string - while [[ -z $mean_confirmation_ms ]]; do - sleep 5 - get_validator_confirmation_time 10 - if [[ $SECONDS -gt $PARTITION_DURATION ]]; then - echo " No confirmations seen after $SECONDS seconds" >> "$RESULT_FILE" - exit 0 - fi - done - echo " Validator confirmation is $mean_confirmation_ms ms $SECONDS seconds after resolving the partition" >> "$RESULT_FILE" - - last="" - while [[ -z $mean_confirmation_ms || $mean_confirmation_ms -gt $target ]]; do - sleep 5 - - if [[ -n $mean_confirmation_ms && -n $last && $mean_confirmation_ms -gt $(echo "$last * 1.2" | bc) || $SECONDS -gt $PARTITION_DURATION ]]; then - echo " Unable to make progress after $SECONDS seconds. Last confirmation time was $mean_confirmation_ms ms" >> "$RESULT_FILE" - exit 0 - fi - last=$mean_confirmation_ms - get_validator_confirmation_time 10 - done - - echo " Recovered in $SECONDS seconds: validator confirmation to fall to $mean_confirmation_ms ms" >> "$RESULT_FILE" - - PARTITION_DURATION=$(( PARTITION_DURATION + PARTITION_INCREMENT )) -done diff --git a/system-test/performance-testcases/aws-cpu-only-perf-10-node.yml b/system-test/performance-testcases/aws-cpu-only-perf-10-node.yml deleted file mode 100755 index bca50aa6c697f6..00000000000000 --- a/system-test/performance-testcases/aws-cpu-only-perf-10-node.yml +++ /dev/null @@ -1,21 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "AWS - CPU Only 10 Node" - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "ec2" - TESTNET_TAG: "aws-perf-cpu-only" - TEST_DURATION_SECONDS: 600 - NUMBER_OF_VALIDATOR_NODES: 10 - ENABLE_GPU: "false" - # Up to 3.1 GHz Intel Xeon® Platinum 8175, 16 vCPU, 64GB RAM - VALIDATOR_NODE_MACHINE_TYPE: "m5.4xlarge" - NUMBER_OF_CLIENT_NODES: 1 - CLIENT_OPTIONS: "bench-tps=1=--tx_count 10000 --thread-batch-sleep-ms 250" - TESTNET_ZONES: "us-west-1a,us-west-1c,us-east-1a,eu-west-1a" - USE_PUBLIC_IP_ADDRESSES: "false" - ADDITIONAL_FLAGS: "" - TEST_TYPE: "fixed_duration" - agents: - queue: "aws-deploy" diff --git a/system-test/performance-testcases/aws-cpu-only-perf-5-node.yml b/system-test/performance-testcases/aws-cpu-only-perf-5-node.yml deleted file mode 100755 index cc9ac48f4356f8..00000000000000 --- a/system-test/performance-testcases/aws-cpu-only-perf-5-node.yml +++ /dev/null @@ -1,21 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "AWS - CPU Only 5 Node" - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "ec2" - TESTNET_TAG: "aws-perf-cpu-only" - TEST_DURATION_SECONDS: 600 - NUMBER_OF_VALIDATOR_NODES: 5 - ENABLE_GPU: "false" - # Up to 3.1 GHz Intel Xeon® Platinum 8175, 16 vCPU, 64GB RAM - VALIDATOR_NODE_MACHINE_TYPE: "m5.4xlarge" - NUMBER_OF_CLIENT_NODES: 1 - CLIENT_OPTIONS: "bench-tps=1=--tx_count 10000 --thread-batch-sleep-ms 250" - TESTNET_ZONES: "us-west-1a,us-west-1c,us-east-1a,eu-west-1a" - USE_PUBLIC_IP_ADDRESSES: "false" - ADDITIONAL_FLAGS: "" - TEST_TYPE: "fixed_duration" - agents: - queue: "aws-deploy" diff --git a/system-test/performance-testcases/azure-cpu-only-perf-5-node.yml b/system-test/performance-testcases/azure-cpu-only-perf-5-node.yml deleted file mode 100755 index a8fc529cef32bb..00000000000000 --- a/system-test/performance-testcases/azure-cpu-only-perf-5-node.yml +++ /dev/null @@ -1,20 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "Azure - CPU Only 5 Node" - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "azure" - TESTNET_TAG: "azure-perf-cpu-only" - TEST_DURATION_SECONDS: 600 - NUMBER_OF_VALIDATOR_NODES: 5 - ENABLE_GPU: "false" - VALIDATOR_NODE_MACHINE_TYPE: "Standard_D16s_v3" - NUMBER_OF_CLIENT_NODES: 1 - CLIENT_OPTIONS: "bench-tps=1=--tx_count 10000 --thread-batch-sleep-ms 250" - TESTNET_ZONES: "westus" - USE_PUBLIC_IP_ADDRESSES: "false" - ADDITIONAL_FLAGS: "" - TEST_TYPE: "fixed_duration" - agents: - queue: "azure-deploy" diff --git a/system-test/performance-testcases/colo-cpu-only-perf-4-val-1-client.yml b/system-test/performance-testcases/colo-cpu-only-perf-4-val-1-client.yml deleted file mode 100755 index 8be728fb98b84f..00000000000000 --- a/system-test/performance-testcases/colo-cpu-only-perf-4-val-1-client.yml +++ /dev/null @@ -1,17 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "COLO performance testnet CPU only (reduced validator and client count)" - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "colo" - TESTNET_TAG: "colo-perf-cpu-only" - ENABLE_GPU: "false" - TEST_DURATION_SECONDS: 600 - NUMBER_OF_VALIDATOR_NODES: 2 - NUMBER_OF_CLIENT_NODES: 1 - CLIENT_OPTIONS: "bench-tps=1=--tx_count 10000 --thread-batch-sleep-ms 250" - ADDITIONAL_FLAGS: "" - TEST_TYPE: "fixed_duration" - agents: - queue: "colo-deploy" diff --git a/system-test/performance-testcases/colo-gpu-perf-4-val-1-client.yml b/system-test/performance-testcases/colo-gpu-perf-4-val-1-client.yml deleted file mode 100755 index 196e8dc9bc74f9..00000000000000 --- a/system-test/performance-testcases/colo-gpu-perf-4-val-1-client.yml +++ /dev/null @@ -1,17 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "COLO performance testnet GPU enabled (reduced validator and client count)" - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "colo" - TESTNET_TAG: "colo-perf-gpu-enabled" - ENABLE_GPU: "true" - TEST_DURATION_SECONDS: 600 - NUMBER_OF_VALIDATOR_NODES: 2 - NUMBER_OF_CLIENT_NODES: 1 - CLIENT_OPTIONS: "bench-tps=1=--tx_count 10000 --thread-batch-sleep-ms 250" - ADDITIONAL_FLAGS: "" - TEST_TYPE: "fixed_duration" - agents: - queue: "colo-deploy" diff --git a/system-test/performance-testcases/colo-gpu-perf-high-txcount-4-val-1-client.yml b/system-test/performance-testcases/colo-gpu-perf-high-txcount-4-val-1-client.yml deleted file mode 100755 index 933068a463f083..00000000000000 --- a/system-test/performance-testcases/colo-gpu-perf-high-txcount-4-val-1-client.yml +++ /dev/null @@ -1,17 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "COLO performance testnet GPU enabled - High Tx Count (reduced validator and client count)" - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "colo" - TESTNET_TAG: "colo-perf-gpu-enabled" - ENABLE_GPU: "true" - TEST_DURATION_SECONDS: 600 - NUMBER_OF_VALIDATOR_NODES: 2 - NUMBER_OF_CLIENT_NODES: 1 - CLIENT_OPTIONS: "bench-tps=1=--tx_count 20000 --thread-batch-sleep-ms 250" - ADDITIONAL_FLAGS: "" - TEST_TYPE: "fixed_duration" - agents: - queue: "colo-deploy" diff --git a/system-test/performance-testcases/gce-cpu-only-perf-10-node.yml b/system-test/performance-testcases/gce-cpu-only-perf-10-node.yml deleted file mode 100755 index c8d3677164065b..00000000000000 --- a/system-test/performance-testcases/gce-cpu-only-perf-10-node.yml +++ /dev/null @@ -1,20 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "GCE performance testnets CPU ONLY" - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "gce" - TESTNET_TAG: "gce-perf-cpu-only" - TEST_DURATION_SECONDS: 600 - NUMBER_OF_VALIDATOR_NODES: 10 - ENABLE_GPU: "false" - VALIDATOR_NODE_MACHINE_TYPE: "--machine-type n2-standard-16" - NUMBER_OF_CLIENT_NODES: 1 - CLIENT_OPTIONS: "bench-tps=1=--tx_count 10000 --thread-batch-sleep-ms 250" - TESTNET_ZONES: "us-west1-a,us-west1-b,us-central1-a,europe-west4-a" - USE_PUBLIC_IP_ADDRESSES: "false" - ADDITIONAL_FLAGS: "--dedicated" - TEST_TYPE: "fixed_duration" - agents: - queue: "gce-deploy" diff --git a/system-test/performance-testcases/gce-cpu-only-perf-5-node-single-region.yml b/system-test/performance-testcases/gce-cpu-only-perf-5-node-single-region.yml deleted file mode 100755 index 79f43abfd375fb..00000000000000 --- a/system-test/performance-testcases/gce-cpu-only-perf-5-node-single-region.yml +++ /dev/null @@ -1,20 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "GCE - CPU Only 5 Node Single Zone" - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "gce" - TESTNET_TAG: "gce-perf-cpu-only" - TEST_DURATION_SECONDS: 600 - NUMBER_OF_VALIDATOR_NODES: 5 - ENABLE_GPU: "false" - VALIDATOR_NODE_MACHINE_TYPE: "--machine-type n2-standard-16" - NUMBER_OF_CLIENT_NODES: 1 - CLIENT_OPTIONS: "bench-tps=1=--tx_count 10000 --thread-batch-sleep-ms 250" - TESTNET_ZONES: "us-west1-a" - USE_PUBLIC_IP_ADDRESSES: "false" - ADDITIONAL_FLAGS: "--dedicated" - TEST_TYPE: "fixed_duration" - agents: - queue: "gce-deploy" diff --git a/system-test/performance-testcases/gce-cpu-only-perf-5-node.yml b/system-test/performance-testcases/gce-cpu-only-perf-5-node.yml deleted file mode 100755 index 6448942f578f6a..00000000000000 --- a/system-test/performance-testcases/gce-cpu-only-perf-5-node.yml +++ /dev/null @@ -1,20 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "GCE - CPU Only 5 Node" - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "gce" - TESTNET_TAG: "gce-perf-cpu-only" - TEST_DURATION_SECONDS: 600 - NUMBER_OF_VALIDATOR_NODES: 5 - ENABLE_GPU: "false" - VALIDATOR_NODE_MACHINE_TYPE: "--machine-type n2-standard-16" - NUMBER_OF_CLIENT_NODES: 1 - CLIENT_OPTIONS: "bench-tps=1=--tx_count 10000 --thread-batch-sleep-ms 250" - TESTNET_ZONES: "us-west1-a,us-west1-b,us-central1-a,europe-west4-a" - USE_PUBLIC_IP_ADDRESSES: "false" - ADDITIONAL_FLAGS: "--dedicated" - TEST_TYPE: "fixed_duration" - agents: - queue: "gce-deploy" diff --git a/system-test/performance-testcases/gce-gpu-perf-10-node-single-region.yml b/system-test/performance-testcases/gce-gpu-perf-10-node-single-region.yml deleted file mode 100755 index 8206c889c7bc40..00000000000000 --- a/system-test/performance-testcases/gce-gpu-perf-10-node-single-region.yml +++ /dev/null @@ -1,20 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "GCE - GPU Enabled 10 Nodes" - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "gce" - TESTNET_TAG: "gce-perf-gpu-enabled" - TEST_DURATION_SECONDS: 600 - NUMBER_OF_VALIDATOR_NODES: 10 - ENABLE_GPU: "true" - VALIDATOR_NODE_MACHINE_TYPE: "--machine-type n2-standard-16 --accelerator count=2,type=nvidia-tesla-v100" - NUMBER_OF_CLIENT_NODES: 1 - CLIENT_OPTIONS: "bench-tps=1=--tx_count 10000 --thread-batch-sleep-ms 250" - TESTNET_ZONES: "us-west1-a" - USE_PUBLIC_IP_ADDRESSES: "false" - ADDITIONAL_FLAGS: "--dedicated" - TEST_TYPE: "fixed_duration" - agents: - queue: "gce-deploy" diff --git a/system-test/performance-testcases/gce-gpu-perf-10-node.yml b/system-test/performance-testcases/gce-gpu-perf-10-node.yml deleted file mode 100755 index c15ac2d259c3a2..00000000000000 --- a/system-test/performance-testcases/gce-gpu-perf-10-node.yml +++ /dev/null @@ -1,20 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "GCE - GPU Enabled 10 Nodes" - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "gce" - TESTNET_TAG: "gce-perf-gpu-enabled" - TEST_DURATION_SECONDS: 600 - NUMBER_OF_VALIDATOR_NODES: 10 - ENABLE_GPU: "true" - VALIDATOR_NODE_MACHINE_TYPE: "--machine-type n2-standard-16 --accelerator count=2,type=nvidia-tesla-v100" - NUMBER_OF_CLIENT_NODES: 1 - CLIENT_OPTIONS: "bench-tps=1=--tx_count 10000 --thread-batch-sleep-ms 250" - TESTNET_ZONES: "us-west1-a,us-west1-b,us-central1-a,europe-west4-a" - USE_PUBLIC_IP_ADDRESSES: "false" - ADDITIONAL_FLAGS: "--dedicated" - TEST_TYPE: "fixed_duration" - agents: - queue: "gce-deploy" diff --git a/system-test/performance-testcases/gce-gpu-perf-25-node-single-region.yml b/system-test/performance-testcases/gce-gpu-perf-25-node-single-region.yml deleted file mode 100755 index 2a7657d7301cde..00000000000000 --- a/system-test/performance-testcases/gce-gpu-perf-25-node-single-region.yml +++ /dev/null @@ -1,20 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "GCE - GPU Enabled 25 Nodes" - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "gce" - TESTNET_TAG: "gce-perf-gpu-enabled" - TEST_DURATION_SECONDS: 600 - NUMBER_OF_VALIDATOR_NODES: 25 - ENABLE_GPU: "true" - VALIDATOR_NODE_MACHINE_TYPE: "--machine-type n2-standard-16 --accelerator count=2,type=nvidia-tesla-v100" - NUMBER_OF_CLIENT_NODES: 1 - CLIENT_OPTIONS: "bench-tps=1=--tx_count 10000 --thread-batch-sleep-ms 250" - TESTNET_ZONES: "us-west1-a" - USE_PUBLIC_IP_ADDRESSES: "false" - ADDITIONAL_FLAGS: "--dedicated" - TEST_TYPE: "fixed_duration" - agents: - queue: "gce-deploy" diff --git a/system-test/performance-testcases/gce-gpu-perf-25-node.yml b/system-test/performance-testcases/gce-gpu-perf-25-node.yml deleted file mode 100755 index eed6b07fad5835..00000000000000 --- a/system-test/performance-testcases/gce-gpu-perf-25-node.yml +++ /dev/null @@ -1,20 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "GCE - GPU Enabled 25 Nodes" - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "gce" - TESTNET_TAG: "gce-perf-gpu-enabled" - TEST_DURATION_SECONDS: 600 - NUMBER_OF_VALIDATOR_NODES: 25 - ENABLE_GPU: "true" - VALIDATOR_NODE_MACHINE_TYPE: "--machine-type n2-standard-16 --accelerator count=2,type=nvidia-tesla-v100" - NUMBER_OF_CLIENT_NODES: 1 - CLIENT_OPTIONS: "bench-tps=1=--tx_count 10000 --thread-batch-sleep-ms 250" - TESTNET_ZONES: "us-west1-a,us-west1-b,us-central1-a,europe-west4-a" - USE_PUBLIC_IP_ADDRESSES: "false" - ADDITIONAL_FLAGS: "--dedicated" - TEST_TYPE: "fixed_duration" - agents: - queue: "gce-deploy" diff --git a/system-test/performance-testcases/gce-gpu-perf-5-node-single-region.yml b/system-test/performance-testcases/gce-gpu-perf-5-node-single-region.yml deleted file mode 100755 index 4ba94e1aaeeedc..00000000000000 --- a/system-test/performance-testcases/gce-gpu-perf-5-node-single-region.yml +++ /dev/null @@ -1,20 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "GCE - GPU Enabled 5 Nodes" - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "gce" - TESTNET_TAG: "gce-perf-gpu-enabled" - TEST_DURATION_SECONDS: 600 - NUMBER_OF_VALIDATOR_NODES: 5 - ENABLE_GPU: "true" - VALIDATOR_NODE_MACHINE_TYPE: "--machine-type n2-standard-16 --accelerator count=2,type=nvidia-tesla-v100" - NUMBER_OF_CLIENT_NODES: 1 - CLIENT_OPTIONS: "bench-tps=1=--tx_count 10000 --thread-batch-sleep-ms 250" - TESTNET_ZONES: "us-west1-a" - USE_PUBLIC_IP_ADDRESSES: "false" - ADDITIONAL_FLAGS: "--dedicated" - TEST_TYPE: "fixed_duration" - agents: - queue: "gce-deploy" diff --git a/system-test/performance-testcases/gce-gpu-perf-5-node.yml b/system-test/performance-testcases/gce-gpu-perf-5-node.yml deleted file mode 100755 index 54d1ef4c2ca9f6..00000000000000 --- a/system-test/performance-testcases/gce-gpu-perf-5-node.yml +++ /dev/null @@ -1,20 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "GCE - GPU Enabled 5 Nodes" - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "gce" - TESTNET_TAG: "gce-perf-gpu-enabled" - TEST_DURATION_SECONDS: 600 - NUMBER_OF_VALIDATOR_NODES: 5 - ENABLE_GPU: "true" - VALIDATOR_NODE_MACHINE_TYPE: "--machine-type n2-standard-16 --accelerator count=2,type=nvidia-tesla-v100" - NUMBER_OF_CLIENT_NODES: 1 - CLIENT_OPTIONS: "bench-tps=1=--tx_count 10000 --thread-batch-sleep-ms 250" - TESTNET_ZONES: "us-west1-a,us-west1-b,us-central1-a,europe-west4-a" - USE_PUBLIC_IP_ADDRESSES: "false" - ADDITIONAL_FLAGS: "--dedicated" - TEST_TYPE: "fixed_duration" - agents: - queue: "gce-deploy" diff --git a/system-test/performance-testcases/gce-gpu-perf-50-node-single-region.yml b/system-test/performance-testcases/gce-gpu-perf-50-node-single-region.yml deleted file mode 100755 index 3b32a354202898..00000000000000 --- a/system-test/performance-testcases/gce-gpu-perf-50-node-single-region.yml +++ /dev/null @@ -1,21 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "GCE - GPU Enabled 50 Nodes" - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "gce" - TESTNET_TAG: "gce-perf-gpu-enabled" - TEST_DURATION_SECONDS: 600 - NUMBER_OF_VALIDATOR_NODES: 50 - ENABLE_GPU: "true" - VALIDATOR_NODE_MACHINE_TYPE: "--machine-type n2-standard-16 --accelerator count=2,type=nvidia-tesla-v100" - NUMBER_OF_CLIENT_NODES: 1 - CLIENT_OPTIONS: "bench-tps=1=--tx_count 10000 --thread-batch-sleep-ms 250" - TESTNET_ZONES: "us-west1-a" - ALLOW_BOOT_FAILURES: "true" - USE_PUBLIC_IP_ADDRESSES: "false" - ADDITIONAL_FLAGS: "--dedicated" - TEST_TYPE: "fixed_duration" - agents: - queue: "gce-deploy" diff --git a/system-test/performance-testcases/gce-gpu-perf-50-node.yml b/system-test/performance-testcases/gce-gpu-perf-50-node.yml deleted file mode 100755 index b15dc697b056ac..00000000000000 --- a/system-test/performance-testcases/gce-gpu-perf-50-node.yml +++ /dev/null @@ -1,21 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "GCE - GPU Enabled 50 Nodes" - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "gce" - TESTNET_TAG: "gce-perf-gpu-enabled" - TEST_DURATION_SECONDS: 600 - NUMBER_OF_VALIDATOR_NODES: 50 - ENABLE_GPU: "true" - VALIDATOR_NODE_MACHINE_TYPE: "--machine-type n2-standard-16 --accelerator count=2,type=nvidia-tesla-v100" - NUMBER_OF_CLIENT_NODES: 1 - CLIENT_OPTIONS: "bench-tps=1=--tx_count 10000 --thread-batch-sleep-ms 250" - TESTNET_ZONES: "us-west1-a,us-west1-b,us-central1-a,europe-west4-a" - ALLOW_BOOT_FAILURES: "true" - USE_PUBLIC_IP_ADDRESSES: "false" - ADDITIONAL_FLAGS: "--dedicated" - TEST_TYPE: "fixed_duration" - agents: - queue: "gce-deploy" diff --git a/system-test/performance-testcases/tps-report-single-region.yml b/system-test/performance-testcases/tps-report-single-region.yml deleted file mode 100644 index e2a5ba9d8b08dd..00000000000000 --- a/system-test/performance-testcases/tps-report-single-region.yml +++ /dev/null @@ -1,23 +0,0 @@ -steps: - - command: "buildkite-agent pipeline upload system-test/performance-testcases/gce-gpu-perf-5-node-single-region.yml" - name: "5 Node Test - TPS Report - Single Region" - agents: - queue: "pipeline-uploader" - - wait: ~ - continue_on_failure: true - - command: "buildkite-agent pipeline upload system-test/performance-testcases/gce-gpu-perf-10-node-single-region.yml" - name: "10 Node Test - TPS Report - Single Region" - agents: - queue: "pipeline-uploader" - - wait: ~ - continue_on_failure: true - - command: "buildkite-agent pipeline upload system-test/performance-testcases/gce-gpu-perf-25-node-single-region.yml" - name: "25 Node Test - TPS Report - Single Region" - agents: - queue: "pipeline-uploader" - - wait: ~ - continue_on_failure: true - - command: "buildkite-agent pipeline upload system-test/performance-testcases/gce-gpu-perf-50-node-single-region.yml" - name: "50 Node Test - TPS Report - Single Region" - agents: - queue: "pipeline-uploader" diff --git a/system-test/performance-testcases/tps-report.yml b/system-test/performance-testcases/tps-report.yml deleted file mode 100644 index d5aad75fd11b31..00000000000000 --- a/system-test/performance-testcases/tps-report.yml +++ /dev/null @@ -1,23 +0,0 @@ -steps: - - command: "buildkite-agent pipeline upload system-test/performance-testcases/gce-gpu-perf-5-node.yml" - name: "5 Node Test - TPS Report" - agents: - queue: "pipeline-uploader" - - wait: ~ - continue_on_failure: true - - command: "buildkite-agent pipeline upload system-test/performance-testcases/gce-gpu-perf-10-node.yml" - name: "10 Node Test - TPS Report" - agents: - queue: "pipeline-uploader" - - wait: ~ - continue_on_failure: true - - command: "buildkite-agent pipeline upload system-test/performance-testcases/gce-gpu-perf-25-node.yml" - name: "25 Node Test - TPS Report" - agents: - queue: "pipeline-uploader" - - wait: ~ - continue_on_failure: true - - command: "buildkite-agent pipeline upload system-test/performance-testcases/gce-gpu-perf-50-node.yml" - name: "50 Node Test - TPS Report" - agents: - queue: "pipeline-uploader" diff --git a/system-test/restart-testcases/restart_gce.yml b/system-test/restart-testcases/restart_gce.yml deleted file mode 100755 index b187fa5f4d4360..00000000000000 --- a/system-test/restart-testcases/restart_gce.yml +++ /dev/null @@ -1,19 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "Restart test on GCE" - env: - UPLOAD_RESULTS_TO_SLACK: "false" - CLOUD_PROVIDER: "gce" - ENABLE_GPU: "false" - NUMBER_OF_VALIDATOR_NODES: 4 - VALIDATOR_NODE_MACHINE_TYPE: "--machine-type n2-standard-16" - NUMBER_OF_CLIENT_NODES: 1 - ADDITIONAL_FLAGS: "--dedicated" - SKIP_PERF_RESULTS: "true" - EXTRA_PRIMORDIAL_STAKES: 4 - WAIT_FOR_EQUAL_STAKE: "true" - TEST_TYPE: "script" - CONSENSUS_TIMEOUT: 60 - CUSTOM_SCRIPT: "system-test/restart-testcases/restart_test_automation.sh" - agents: - queue: "gce-deploy" diff --git a/system-test/restart-testcases/restart_test_automation.sh b/system-test/restart-testcases/restart_test_automation.sh deleted file mode 100755 index 20b7dec4957ad9..00000000000000 --- a/system-test/restart-testcases/restart_test_automation.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env bash - -set -ex - -# shellcheck disable=SC1090 -# shellcheck disable=SC1091 -source "$(dirname "$0")"/../automation_utils.sh - -RESULT_FILE="$1" - -if [[ -z $CONSENSUS_TIMEOUT ]]; then - CONSENSUS_TIMEOUT=180 -fi - -startGpuMode="off" -if [[ -z $ENABLE_GPU ]]; then - ENABLE_GPU=false -fi -if [[ "$ENABLE_GPU" = "true" ]]; then - startGpuMode="on" -fi - -declare maybeAsyncNodeInit -if [[ "$ASYNC_NODE_INIT" = "true" ]]; then - maybeAsyncNodeInit="--async-node-init" -fi - -# Restart the network -"$REPO_ROOT"/net/net.sh stop - -sleep 2 - -# shellcheck disable=SC2086 -"$REPO_ROOT"/net/net.sh start --skip-setup --no-snapshot-fetch --no-deploy \ - --gpu-mode $startGpuMode $maybeAsyncNodeInit - -# wait until consensus -start=$SECONDS -activeStake=$(get_active_stake) -while [[ $((SECONDS - start)) -lt $CONSENSUS_TIMEOUT ]]; do - currentStake=$(get_current_stake) - echo "$((SECONDS - start))s: Current stake $currentStake, Active stake $activeStake" >> "$RESULT_FILE" - if [[ $activeStake -eq $currentStake ]]; then - echo "Restart Test Succeeded" >>"$RESULT_FILE" - exit 0 - fi - sleep 5 -done - -echo "Could not establish consensus in $CONSENSUS_TIMEOUT seconds" >> "$RESULT_FILE" -exit 1 diff --git a/system-test/rolling-upgrade/rolling_upgrade.sh b/system-test/rolling-upgrade/rolling_upgrade.sh deleted file mode 100755 index 2fd7b1cdbf6252..00000000000000 --- a/system-test/rolling-upgrade/rolling_upgrade.sh +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/env bash - -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -REPO_ROOT=${DIR}/../.. - -# shellcheck source=/dev/null # Ignore generated source target -source "${REPO_ROOT}/net/config/config" -# shellcheck source=system-test/automation_utils.sh -source "${REPO_ROOT}/system-test/automation_utils.sh" - -NET_SH="${REPO_ROOT}/net/net.sh" - -set -x - -: "${UPGRADE_INITIAL_DELAY:=0}" # Time to wait before starting rolling upgrade -: "${UPGRADE_INTERVALIDATOR_DELAY:?}" # Time to wait between validators during upgrade -: "${UPGRADE_POST_TEST_DELAY:=0}" #Time to wait after upgrade - -sleep_if_positive() { - declare delay=$1 - if [[ "$delay" -gt 0 ]]; then - sleep "$delay" - fi -} - -# Fetch new software and upload it to the bootstrap validator -declare -g sw_version_args -get_net_launch_software_version_launch_args "$UPGRADE_CHANNEL" "upgrade-release" sw_version_args -# shellcheck disable=2086 # $sw_version_args holds two args. Don't quote! -"$NET_SH" upgrade $sw_version_args - -# Wait initial delay, if any -sleep_if_positive "$UPGRADE_INITIAL_DELAY" - -if [[ "$UPGRADE_INITIAL_DELAY" -gt 0 ]]; then - "$NET_SH" sanity -fi - -# Restart validators one by one -# shellcheck disable=SC2154 # sourced from config above -for i in "${!validatorIpList[@]}"; do - if [[ "$i" -eq 0 ]]; then - # net.sh doesn't support restarting the bootstrap validator yet - continue - fi - - declare ipAddress="${validatorIpList[$i]}" - - "$NET_SH" stopnode -i "$ipAddress" - "$NET_SH" startnode -r -i "$ipAddress" - - # This could be replaced with something based on `solana catchup` - sleep_if_positive "$UPGRADE_INTERVALIDATOR_DELAY" - - "$NET_SH" sanity -done - -sleep_if_positive "$UPGRADE_POST_TEST_DELAY" - -if [[ "$UPGRADE_POST_TEST_DELAY" -gt 0 ]]; then - "$NET_SH" sanity -fi diff --git a/system-test/sanity-testcases/colo-cpu-only-quick-sanity-test.yml b/system-test/sanity-testcases/colo-cpu-only-quick-sanity-test.yml deleted file mode 100755 index 12b127b2c39218..00000000000000 --- a/system-test/sanity-testcases/colo-cpu-only-quick-sanity-test.yml +++ /dev/null @@ -1,18 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "SANITY - Short duration Colo perf sanity. 1 val, 1 client, CPU-only." - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "colo" - TESTNET_TAG: "colo-perf-cpu-only" - ENABLE_GPU: "false" - TEST_DURATION_SECONDS: 60 - NUMBER_OF_VALIDATOR_NODES: 1 - NUMBER_OF_CLIENT_NODES: 1 - CLIENT_OPTIONS: "bench-tps=1=--tx_count 10000 --thread-batch-sleep-ms 250" - ADDITIONAL_FLAGS: "" - BOOTSTRAP_VALIDATOR_MAX_STAKE_THRESHOLD: 99 - TEST_TYPE: "fixed_duration" - agents: - queue: "colo-deploy" diff --git a/system-test/sanity-testcases/colo-partition-sanity-test.yml b/system-test/sanity-testcases/colo-partition-sanity-test.yml deleted file mode 100755 index 3610dc8c131722..00000000000000 --- a/system-test/sanity-testcases/colo-partition-sanity-test.yml +++ /dev/null @@ -1,22 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "SANITY TEST - Colo - CPU Only - Partial Loss 3 Partitions" - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "colo" - TESTNET_TAG: "colo-perf-cpu-only" - NUMBER_OF_VALIDATOR_NODES: 2 - ENABLE_GPU: "false" - NUMBER_OF_CLIENT_NODES: 1 - CLIENT_OPTIONS: "bench-tps=1=--tx_count 10000 --thread-batch-sleep-ms 250" - ADDITIONAL_FLAGS: "" - APPLY_PARTITIONS: "true" - NETEM_CONFIG_FILE: "system-test/netem-configs/partial-loss-three-partitions" - PARTITION_ACTIVE_DURATION: 30 - PARTITION_INACTIVE_DURATION: 30 - PARTITION_ITERATION_COUNT: 2 - BOOTSTRAP_VALIDATOR_MAX_STAKE_THRESHOLD: 66 - TEST_TYPE: "partition" - agents: - queue: "colo-deploy" diff --git a/system-test/stability-testcases/.gitignore b/system-test/stability-testcases/.gitignore deleted file mode 100644 index 1790fda312066e..00000000000000 --- a/system-test/stability-testcases/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/releases/ diff --git a/system-test/stability-testcases/colo-long-duration-cpu-only-perf.yml b/system-test/stability-testcases/colo-long-duration-cpu-only-perf.yml deleted file mode 100755 index 2dc0af943f9450..00000000000000 --- a/system-test/stability-testcases/colo-long-duration-cpu-only-perf.yml +++ /dev/null @@ -1,17 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "COLO 1 hour performance & stability CPU only" - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "colo" - TESTNET_TAG: "colo-perf-cpu-only" - ENABLE_GPU: "false" - TEST_DURATION_SECONDS: 3600 - NUMBER_OF_VALIDATOR_NODES: 3 - NUMBER_OF_CLIENT_NODES: 1 - CLIENT_OPTIONS: "bench-tps=1=--tx_count 10000 --thread-batch-sleep-ms 250" - ADDITIONAL_FLAGS: "" - TEST_TYPE: "fixed_duration" - agents: - queue: "colo-deploy" diff --git a/system-test/stability-testcases/colo-long-duration-gpu-perf.yml b/system-test/stability-testcases/colo-long-duration-gpu-perf.yml deleted file mode 100755 index c5f0c1af52c9ff..00000000000000 --- a/system-test/stability-testcases/colo-long-duration-gpu-perf.yml +++ /dev/null @@ -1,17 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "COLO 1 hour performance & stability GPU enabled" - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "colo" - TESTNET_TAG: "colo-perf-gpu-enabled" - ENABLE_GPU: "true" - TEST_DURATION_SECONDS: 3600 - NUMBER_OF_VALIDATOR_NODES: 3 - NUMBER_OF_CLIENT_NODES: 1 - CLIENT_OPTIONS: "bench-tps=1=--tx_count 10000 --thread-batch-sleep-ms 250" - ADDITIONAL_FLAGS: "" - TEST_TYPE: "fixed_duration" - agents: - queue: "colo-deploy" diff --git a/system-test/stability-testcases/gce-perf-stability-5-node-single-region.yml b/system-test/stability-testcases/gce-perf-stability-5-node-single-region.yml deleted file mode 100755 index 1441a43700c391..00000000000000 --- a/system-test/stability-testcases/gce-perf-stability-5-node-single-region.yml +++ /dev/null @@ -1,20 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "GCE - 1 hour perf stability" - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "gce" - TESTNET_TAG: "gce-perf-cpu-only" - TEST_DURATION_SECONDS: 3600 - NUMBER_OF_VALIDATOR_NODES: 5 - ENABLE_GPU: "false" - VALIDATOR_NODE_MACHINE_TYPE: "--machine-type n2-standard-16" - NUMBER_OF_CLIENT_NODES: 1 - CLIENT_OPTIONS: "bench-tps=1=--tx_count 10000 --thread-batch-sleep-ms 250" - TESTNET_ZONES: "us-west1-a" - USE_PUBLIC_IP_ADDRESSES: "false" - ADDITIONAL_FLAGS: "--dedicated" - TEST_TYPE: "fixed_duration" - agents: - queue: "gce-deploy" diff --git a/system-test/stability-testcases/gce-stability-5-node.yml b/system-test/stability-testcases/gce-stability-5-node.yml deleted file mode 100755 index 2d841715ae9c83..00000000000000 --- a/system-test/stability-testcases/gce-stability-5-node.yml +++ /dev/null @@ -1,19 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "GCE - 8 hour Stability - 5 Node" - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "gce" - TESTNET_TAG: "gce-stability" - TEST_DURATION_SECONDS: 28800 - NUMBER_OF_VALIDATOR_NODES: 5 - ENABLE_GPU: "false" - VALIDATOR_NODE_MACHINE_TYPE: "--machine-type n2-standard-16" - NUMBER_OF_CLIENT_NODES: 0 - TESTNET_ZONES: "us-west1-a,us-west1-b,us-central1-a,europe-west4-a" - USE_PUBLIC_IP_ADDRESSES: "false" - ADDITIONAL_FLAGS: "--dedicated" - TEST_TYPE: "fixed_duration" - agents: - queue: "stability-deploy" diff --git a/system-test/stability-testcases/gossip-dos-test.sh b/system-test/stability-testcases/gossip-dos-test.sh deleted file mode 100755 index 68c3c540d5948c..00000000000000 --- a/system-test/stability-testcases/gossip-dos-test.sh +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env bash - -set -e -cd "$(dirname "$0")" -SOLANA_ROOT="$(cd ../..; pwd)" - -logDir="$PWD"/logs -rm -rf "$logDir" -mkdir "$logDir" - -solanaInstallDataDir=$PWD/releases -solanaInstallGlobalOpts=( - --data-dir "$solanaInstallDataDir" - --config "$solanaInstallDataDir"/config.yml - --no-modify-path -) - -# Install all the solana versions -bootstrapInstall() { - declare v=$1 - if [[ ! -h $solanaInstallDataDir/active_release ]]; then - sh "$SOLANA_ROOT"/install/agave-install-init.sh "$v" "${solanaInstallGlobalOpts[@]}" - fi - export PATH="$solanaInstallDataDir/active_release/bin/:$PATH" -} - -bootstrapInstall "edge" -agave-install-init --version -agave-install-init edge -solana-gossip --version -solana-dos --version - -killall solana-gossip || true -solana-gossip spy --gossip-port 8001 > "$logDir"/gossip.log 2>&1 & -solanaGossipPid=$! -echo "solana-gossip pid: $solanaGossipPid" -sleep 5 -solana-dos --mode gossip --data-type random --data-size 1232 & -dosPid=$! -echo "solana-dos pid: $dosPid" - -pass=true - -SECONDS= -while ((SECONDS < 600)); do - if ! kill -0 $solanaGossipPid; then - echo "solana-gossip is no longer running after $SECONDS seconds" - pass=false - break - fi - if ! kill -0 $dosPid; then - echo "solana-dos is no longer running after $SECONDS seconds" - pass=false - break - fi - sleep 1 -done - -kill $solanaGossipPid || true -kill $dosPid || true -wait || true - -$pass && echo Pass diff --git a/system-test/stability-testcases/gossip-dos-test.yml b/system-test/stability-testcases/gossip-dos-test.yml deleted file mode 100644 index 46e2711d050ccf..00000000000000 --- a/system-test/stability-testcases/gossip-dos-test.yml +++ /dev/null @@ -1,5 +0,0 @@ -steps: - - command: "system-test/stability-testcases/gossip-dos-test.sh" - label: "Gossip DoS Test" - artifact_paths: - - "system-test/stability-testcases/logs/*" diff --git a/system-test/stake-operations-testcases/offline_stake_colo.yml b/system-test/stake-operations-testcases/offline_stake_colo.yml deleted file mode 100755 index df4737a1b624c2..00000000000000 --- a/system-test/stake-operations-testcases/offline_stake_colo.yml +++ /dev/null @@ -1,18 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "Running Offline Stake Operations Tests on Colo" - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "colo" - ENABLE_GPU: "false" - TEST_DURATION_SECONDS: 30 - NUMBER_OF_VALIDATOR_NODES: 1 - NUMBER_OF_CLIENT_NODES: 0 - ADDITIONAL_FLAGS: "" - BOOTSTRAP_VALIDATOR_MAX_STAKE_THRESHOLD: 100 - SKIP_PERF_RESULTS: "true" - TEST_TYPE: "script" - CUSTOM_SCRIPT: "system-test/stake-operations-testcases/stake_test_automation.sh" - agents: - queue: "colo-deploy" diff --git a/system-test/stake-operations-testcases/offline_stake_gce.yml b/system-test/stake-operations-testcases/offline_stake_gce.yml deleted file mode 100755 index fb2fcc695e6f9c..00000000000000 --- a/system-test/stake-operations-testcases/offline_stake_gce.yml +++ /dev/null @@ -1,19 +0,0 @@ -steps: - - command: "system-test/testnet-automation.sh" - label: "Running Offline Stake Operations Tests on GCE" - env: - UPLOAD_RESULTS_TO_SLACK: "true" - UPLOAD_RESULTS_TO_DISCORD: "true" - CLOUD_PROVIDER: "gce" - ENABLE_GPU: "false" - TEST_DURATION_SECONDS: 30 - NUMBER_OF_VALIDATOR_NODES: 1 - VALIDATOR_NODE_MACHINE_TYPE: "--machine-type n2-standard-16" - NUMBER_OF_CLIENT_NODES: 0 - ADDITIONAL_FLAGS: "--dedicated" - BOOTSTRAP_VALIDATOR_MAX_STAKE_THRESHOLD: 100 - SKIP_PERF_RESULTS: "true" - TEST_TYPE: "script" - CUSTOM_SCRIPT: "system-test/stake-operations-testcases/stake_test_automation.sh" - agents: - queue: "gce-deploy" diff --git a/system-test/stake-operations-testcases/offline_stake_operations.sh b/system-test/stake-operations-testcases/offline_stake_operations.sh deleted file mode 100755 index 513a168ab25766..00000000000000 --- a/system-test/stake-operations-testcases/offline_stake_operations.sh +++ /dev/null @@ -1,258 +0,0 @@ -#!/usr/bin/env bash - -# shellcheck disable=SC2086 -# shellcheck disable=SC2068 -# shellcheck disable=SC2206 -# shellcheck disable=SC2162 -# shellcheck disable=SC2178 -# shellcheck disable=SC2145 - -# shellcheck disable=SC1090 -# shellcheck disable=SC1091 -source "$(dirname "$0")"/../automation_utils.sh - -set -e - -function get_signers_string() { - sign_only_output="$1" - - signers=() - while read LINE; do - signers+=( --signer $LINE ) - done <<<"$(sed -Ee $'s/^ ([a-km-zA-HJ-NP-Z1-9]{32,44}=[a-km-zA-HJ-NP-Z1-9]{64,88})$/\\1/\nt\nd' <<<"$sign_only_output")" - - echo "${signers[@]}" -} - -if [[ -n "$1" ]]; then - url="$1" -fi - -if [[ -z "$url" ]]; then - echo Provide complete URL, ex: "$0" http://api.devnet.solana.com:8899 - exit 1 -fi -solana config set --url $url - -# Create a dummy keypair file with no balance for operations that require a "client keypair file" to exist even if they don't touch it -dummy_keypair=dummy.json -solana-keygen new -o "$dummy_keypair" --no-passphrase --force --silent -solana config set --keypair $dummy_keypair - -### Offline stake account creation - -# The nonce account and the system account funding its creation are online -online_nonce_account_keypair=nonce_keypair.json -online_system_account_keypair=online_system_account_keypair.json - -solana-keygen new -o "$online_system_account_keypair" --no-passphrase --force --silent -solana-keygen new -o "$online_nonce_account_keypair" --no-passphrase --force --silent - -online_system_account_pubkey="$(solana-keygen pubkey $online_system_account_keypair)" -nonce_account_pubkey="$(solana-keygen pubkey $online_nonce_account_keypair)" - -# System account funding the stake account is offline, and the auth staker and withdrawer keypairs are offline -offline_system_account_keypair=offline_system_account_keypair.json -offline_staker_keypair=offline_staker_keypair.json -offline_withdrawer_keypair=offline_withdrawer_keypair.json -offline_custodian_keypair=offline_custodian_keypair.json - -solana-keygen new -o "$offline_system_account_keypair" --no-passphrase --force --silent -solana-keygen new -o "$offline_staker_keypair" --no-passphrase --force --silent -solana-keygen new -o "$offline_withdrawer_keypair" --no-passphrase --force --silent -solana-keygen new -o "$offline_custodian_keypair" --no-passphrase --force --silent - -offline_system_account_pubkey="$(solana-keygen pubkey $offline_system_account_keypair)" -offline_withdrawer_pubkey="$(solana-keygen pubkey $offline_withdrawer_keypair)" -offline_staker_pubkey="$(solana-keygen pubkey $offline_staker_keypair)" -offline_custodian_pubkey="$(solana-keygen pubkey $offline_custodian_keypair)" - -# Airdrop some funds to the offline account. -solana airdrop 100 $offline_system_account_pubkey -solana airdrop 2 $online_system_account_pubkey - -# Create a nonce account funded by the online account, with the authority given to the offline account -solana create-nonce-account $online_nonce_account_keypair 1 --nonce-authority $offline_system_account_pubkey --keypair $online_system_account_keypair -nonce="$(solana nonce $nonce_account_pubkey)" - -execution_step OFFLINE SYSTEM ACCOUNT BALANCE BEFORE CREATING STAKE ACCOUNTS -( - set -x - solana balance $offline_system_account_pubkey -) - -################################ -execution_step CREATE OFFLINE STAKE ACCOUNT -################################ - -# Create a stake account funded by the offline system account - -stake_account_keypair=stake_account_keypair.json -solana-keygen new -o "$stake_account_keypair" --no-passphrase --force --silent -stake_account_address="$(solana-keygen pubkey $stake_account_keypair)" - -sign_only="$(solana create-stake-account $stake_account_keypair 50 \ - --sign-only --blockhash $nonce --nonce $nonce_account_pubkey --nonce-authority $offline_system_account_keypair \ - --stake-authority $offline_staker_pubkey --withdraw-authority $offline_withdrawer_pubkey \ - --custodian $offline_custodian_pubkey \ - --lockup-epoch 999 \ - --keypair $offline_system_account_keypair --url http://0.0.0.0)" - -signers="$(get_signers_string "${sign_only[@]}")" - -solana create-stake-account $stake_account_keypair 50 \ - --blockhash $nonce --nonce $nonce_account_pubkey --nonce-authority $offline_system_account_pubkey \ - --stake-authority $offline_staker_pubkey --withdraw-authority $offline_withdrawer_pubkey \ - --custodian $offline_custodian_pubkey \ - --lockup-epoch 999 \ - --from $offline_system_account_pubkey --fee-payer $offline_system_account_pubkey ${signers[@]} - -execution_step VIEW STAKE ACCOUNT AFTER CREATION -( - set -x - solana stake-account $stake_account_address -) - - -execution_step VIEW OFFLINE SYSTEM ACCOUNT BALANCE AFTER CREATING FIRST STAKE ACCOUNT -( - set -x - solana balance $offline_system_account_pubkey -) - -##################### -execution_step SPLIT STAKE OFFLINE -##################### - -# Split the original stake account before delegating - -split_stake_account_keypair=split_stake_account_keypair.json -solana-keygen new -o $split_stake_account_keypair --no-passphrase --force --silent -split_stake_account_address=$(solana-keygen pubkey $split_stake_account_keypair) - -nonce="$(solana nonce $nonce_account_pubkey)" - -sign_only="$(solana split-stake --blockhash $nonce --nonce $nonce_account_pubkey --nonce-authority $offline_system_account_keypair \ - --stake-authority $offline_staker_keypair $stake_account_address $split_stake_account_keypair 10 \ - --keypair $offline_system_account_keypair --sign-only --url http://0.0.0.0)" - -signers="$(get_signers_string "${sign_only[@]}")" - -solana split-stake --blockhash $nonce --nonce $nonce_account_pubkey --nonce-authority $offline_system_account_pubkey \ - --stake-authority $offline_staker_pubkey $stake_account_address $split_stake_account_keypair 10 \ - --fee-payer $offline_system_account_pubkey ${signers[@]} - -execution_step VIEW ORIGINAL STAKE ACCOUNT AFTER SPLITTING -( - set -x - solana stake-account $stake_account_address -) - -execution_step VIEW NEW STAKE ACCOUNT CREATED FROM SPLITTING ORIGINAL -( - set -x - solana stake-account $split_stake_account_address -) - -##################### -execution_step CHANGE CUSTODIAN LOCKUP -##################### - -# Set the lockup epoch to 0 to allow stake to be withdrawn - -nonce="$(solana nonce $nonce_account_pubkey)" - -sign_only="$(solana stake-set-lockup --blockhash $nonce --nonce $nonce_account_pubkey --nonce-authority $offline_system_account_keypair \ - $split_stake_account_address --custodian $offline_custodian_keypair --lockup-epoch 0 \ - --keypair $offline_system_account_keypair --sign-only --url http://0.0.0.0)" - -signers="$(get_signers_string "${sign_only[@]}")" - -solana stake-set-lockup --blockhash $nonce --nonce $nonce_account_pubkey --nonce-authority $offline_system_account_keypair \ - $split_stake_account_address --custodian $offline_custodian_pubkey --lockup-epoch 0 \ - --fee-payer $offline_system_account_pubkey ${signers[@]} - -execution_step VIEW SPLIT STAKE ACCOUNT AFTER CHANGING LOCKUP -( - set -x - solana stake-account $split_stake_account_address -) - -########################## -execution_step OFFLINE STAKE WITHDRAWAL -########################## - -# Withdraw the lamports from the stake account that was split off and return them to the offline system account - -nonce="$(solana nonce $nonce_account_pubkey)" - -sign_only="$(solana withdraw-stake --blockhash $nonce --nonce $nonce_account_pubkey --nonce-authority $offline_system_account_keypair \ - $split_stake_account_address $offline_system_account_pubkey 10 \ - --withdraw-authority $offline_withdrawer_keypair \ - --keypair $offline_system_account_keypair --sign-only --url http://0.0.0.0)" - -signers="$(get_signers_string "${sign_only[@]}")" - -solana withdraw-stake --blockhash $nonce --nonce $nonce_account_pubkey --nonce-authority $offline_system_account_pubkey \ - $split_stake_account_address $offline_system_account_pubkey 10 \ - --withdraw-authority $offline_withdrawer_pubkey \ - --fee-payer $offline_system_account_pubkey ${signers[@]} - -execution_step VIEW OFFLINE SYSTEM ACCOUNT BALANCE AFTER WITHDRAWING SPLIT STAKE -( - set -x - solana balance $offline_system_account_pubkey -) - -########################## -execution_step OFFLINE STAKE DELEGATION -########################## - -# Delegate stake from the original account to a vote account - -vote_account_pubkey="$(awk '{if(NR==4) print $2}'<<<"$(solana show-validators)")" -nonce="$(solana nonce $nonce_account_pubkey)" - -# Sign a stake delegation, assuming the authorized staker is held offline -sign_only="$(solana delegate-stake --blockhash $nonce --nonce $nonce_account_pubkey --nonce-authority $offline_system_account_keypair \ ---stake-authority $offline_staker_keypair $stake_account_address $vote_account_pubkey \ ---keypair $offline_system_account_keypair --sign-only --url http://0.0.0.0)" - -signers="$(get_signers_string "${sign_only[@]}")" - -# Send the signed transaction on the cluster -solana delegate-stake --blockhash $nonce --nonce $nonce_account_pubkey --nonce-authority $offline_system_account_pubkey \ ---stake-authority $offline_staker_pubkey $stake_account_address $vote_account_pubkey \ ---fee-payer $offline_system_account_pubkey ${signers[@]} - -execution_step VIEW ORIGINAL STAKE ACCOUNT AFTER DELEGATION -( - set -x - solana stake-account $stake_account_address -) - -########################## - execution_step OFFLINE STAKE DEACTIVATION -########################## - -# Deactivate delegated stake - -nonce="$(solana nonce $nonce_account_pubkey)" - -# Sign a stake delegation, assuming the authorized staker is held offline -sign_only="$(solana deactivate-stake --blockhash $nonce --nonce $nonce_account_pubkey --nonce-authority $offline_system_account_keypair \ ---stake-authority $offline_staker_keypair $stake_account_address \ ---keypair $offline_system_account_keypair --sign-only --url http://0.0.0.0)" - -signers="$(get_signers_string "${sign_only[@]}")" - -# Send the signed transaction on the cluster -solana deactivate-stake --blockhash $nonce --nonce $nonce_account_pubkey --nonce-authority $offline_system_account_pubkey \ ---stake-authority $offline_staker_pubkey $stake_account_address \ ---fee-payer $offline_system_account_pubkey ${signers[@]} - -execution_step VIEW ORIGINAL STAKE ACCOUNT AFTER DEACTIVATION -( - set -x - solana stake-account $stake_account_address -) diff --git a/system-test/stake-operations-testcases/stake_test_automation.sh b/system-test/stake-operations-testcases/stake_test_automation.sh deleted file mode 100755 index 77dd534343e68c..00000000000000 --- a/system-test/stake-operations-testcases/stake_test_automation.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env bash - -set -ex - -# shellcheck disable=SC1090 -# shellcheck disable=SC1091 -source "$(dirname "$0")"/../automation_utils.sh - -RESULT_FILE="$1" - -# Runs offline stake operations tests against a running cluster launched from the automation framework -bootstrapper_ip_address="$(get_bootstrap_validator_ip_address)" -entrypoint=http://"${bootstrapper_ip_address}":8899 -PATH="$REPO_ROOT"/solana-release/bin:$PATH "$REPO_ROOT"/system-test/stake-operations-testcases/offline_stake_operations.sh "$entrypoint" - -echo "Offline Stake Operations Test Succeeded" >>"$RESULT_FILE" diff --git a/system-test/testnet-automation-json-parser-missing.py b/system-test/testnet-automation-json-parser-missing.py deleted file mode 100644 index 47790c956fea4a..00000000000000 --- a/system-test/testnet-automation-json-parser-missing.py +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env python3 -import sys, json - -data=json.load(sys.stdin) - -# this code is designed for influx queries where 'no data' means 0 -if 'results' in data: - for result in data['results']: - val = "0" - if 'series' in result: - val = str(result['series'][0]['values'][0][1]) - print(val) -else: - print("No results returned from CURL request") diff --git a/system-test/testnet-automation-json-parser.py b/system-test/testnet-automation-json-parser.py deleted file mode 100755 index 37959b025855b0..00000000000000 --- a/system-test/testnet-automation-json-parser.py +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env python3 -import sys, json, argparse - -parser = argparse.ArgumentParser() -parser.add_argument("--empty_error", action="store_true", help="If present, do not print error message") -args = parser.parse_args() - -data=json.load(sys.stdin) - -if 'results' in data: - for result in data['results']: - if 'series' in result: - print(result['series'][0]['columns'][1] + ': ' + str(result['series'][0]['values'][0][1])) - elif not args.empty_error: - print("An expected result from CURL request is missing") -elif not args.empty_error: - print("No results returned from CURL request") diff --git a/system-test/testnet-automation.sh b/system-test/testnet-automation.sh deleted file mode 100755 index e4256b7a7f0e82..00000000000000 --- a/system-test/testnet-automation.sh +++ /dev/null @@ -1,380 +0,0 @@ -#!/usr/bin/env bash -set -e - -# shellcheck disable=SC1090 -# shellcheck disable=SC1091 -source "$(dirname "$0")"/automation_utils.sh - -function cleanup_testnet { - RC=$? - if [[ $RC != 0 ]]; then - RESULT_DETAILS=" -Test failed during step: -${STEP} - -Failure occurred when running the following command: -$*" - fi - -# shellcheck disable=SC2034 - TESTNET_FINISH_UNIX_MSECS="$(($(date +%s%N)/1000000))" - if [[ "$UPLOAD_RESULTS_TO_SLACK" = "true" ]]; then - upload_results_to_slack - fi - - if [[ "$UPLOAD_RESULTS_TO_DISCORD" = "true" ]]; then - upload_results_to_discord - fi - - ( - execution_step "Collecting Logfiles from Nodes" - collect_logs - ) || echo "Error from collecting logs" - - ( - execution_step "Stop Network Software" - "${REPO_ROOT}"/net/net.sh stop - ) || echo "Error from stopping nodes" - - ( - analyze_packet_loss - ) || echo "Error from packet loss analysis" - - execution_step "Deleting Testnet" - if test -f "${REPO_ROOT}"/net/"${CLOUD_PROVIDER}".sh; then - "${REPO_ROOT}"/net/"${CLOUD_PROVIDER}".sh delete -p "${TESTNET_TAG}" - fi -} -trap 'cleanup_testnet $BASH_COMMAND' EXIT - -function launch_testnet() { - set -x - - # shellcheck disable=SC2068 - execution_step "Create ${NUMBER_OF_VALIDATOR_NODES} ${CLOUD_PROVIDER} nodes" - - case $CLOUD_PROVIDER in - gce) - if [[ -z $VALIDATOR_NODE_MACHINE_TYPE ]]; then - echo VALIDATOR_NODE_MACHINE_TYPE not defined - exit 1 - fi - # shellcheck disable=SC2068 - # shellcheck disable=SC2086 - "${REPO_ROOT}"/net/gce.sh create \ - -d pd-ssd \ - -n "$NUMBER_OF_VALIDATOR_NODES" -c "$NUMBER_OF_CLIENT_NODES" \ - $maybeCustomMachineType "$VALIDATOR_NODE_MACHINE_TYPE" $maybeEnableGpu \ - -p "$TESTNET_TAG" $maybeCreateAllowBootFailures $maybePublicIpAddresses \ - ${TESTNET_CLOUD_ZONES[@]/#/"-z "} \ - --self-destruct-hours 0 \ - ${ADDITIONAL_FLAGS[@]/#/" "} - ;; - ec2) - # shellcheck disable=SC2068 - # shellcheck disable=SC2086 - "${REPO_ROOT}"/net/ec2.sh create \ - -n "$NUMBER_OF_VALIDATOR_NODES" -c "$NUMBER_OF_CLIENT_NODES" \ - $maybeCustomMachineType "$VALIDATOR_NODE_MACHINE_TYPE" $maybeEnableGpu \ - -p "$TESTNET_TAG" $maybeCreateAllowBootFailures $maybePublicIpAddresses \ - ${TESTNET_CLOUD_ZONES[@]/#/"-z "} \ - ${ADDITIONAL_FLAGS[@]/#/" "} - ;; - azure) - # shellcheck disable=SC2068 - # shellcheck disable=SC2086 - "${REPO_ROOT}"/net/azure.sh create \ - -n "$NUMBER_OF_VALIDATOR_NODES" -c "$NUMBER_OF_CLIENT_NODES" \ - $maybeCustomMachineType "$VALIDATOR_NODE_MACHINE_TYPE" $maybeEnableGpu \ - -p "$TESTNET_TAG" $maybeCreateAllowBootFailures $maybePublicIpAddresses \ - ${TESTNET_CLOUD_ZONES[@]/#/"-z "} \ - ${ADDITIONAL_FLAGS[@]/#/" "} - ;; - colo) - "${REPO_ROOT}"/net/colo.sh delete --reclaim-preemptible-reservations - # shellcheck disable=SC2068 - # shellcheck disable=SC2086 - "${REPO_ROOT}"/net/colo.sh create \ - -n "$NUMBER_OF_VALIDATOR_NODES" -c "$NUMBER_OF_CLIENT_NODES" $maybeEnableGpu \ - -p "$TESTNET_TAG" $maybePublicIpAddresses --dedicated \ - ${ADDITIONAL_FLAGS[@]/#/" "} - ;; - bare) - ;; - *) - echo "Error: Unsupported cloud provider: $CLOUD_PROVIDER" - ;; - esac - - execution_step "Configure database" - "${REPO_ROOT}"/net/init-metrics.sh -e - - execution_step "Fetch reusable testnet keypairs" - if [[ ! -d "${REPO_ROOT}"/net/keypairs ]]; then -# git clone https://github.com/solana-labs/testnet-keypairs.git "${REPO_ROOT}"/net/keypairs - git clone git@github.com:solana-labs/testnet-keypairs.git "${REPO_ROOT}"/net/keypairs - # If we have provider-specific keys (CoLo*, GCE*, etc) use them instead of generic val* - if [[ -d "${REPO_ROOT}"/net/keypairs/"${CLOUD_PROVIDER}" ]]; then - cp "${REPO_ROOT}"/net/keypairs/"${CLOUD_PROVIDER}"/* "${REPO_ROOT}"/net/keypairs/ - fi - fi - - if [[ "$CLOUD_PROVIDER" = "colo" ]]; then - execution_step "Stopping Colo nodes before we start" - "${REPO_ROOT}"/net/net.sh stop - fi - - execution_step "Starting bootstrap node and ${NUMBER_OF_VALIDATOR_NODES} validator nodes" - - declare -g version_args - get_net_launch_software_version_launch_args "$CHANNEL" "solana-release" version_args - - declare maybeWarpSlot - if [[ -n "$WARP_SLOT" ]]; then - maybeWarpSlot="--warp-slot $WARP_SLOT" - fi - - declare maybeAsyncNodeInit - if [[ "$ASYNC_NODE_INIT" = "true" ]]; then - maybeAsyncNodeInit="--async-node-init" - fi - - declare maybeExtraPrimordialStakes - if [[ -n "$EXTRA_PRIMORDIAL_STAKES" ]]; then - maybeExtraPrimordialStakes="--extra-primordial-stakes $EXTRA_PRIMORDIAL_STAKES" - fi - - # shellcheck disable=SC2068 - # shellcheck disable=SC2086 - "${REPO_ROOT}"/net/net.sh start $version_args \ - -c idle=$NUMBER_OF_CLIENT_NODES $maybeStartAllowBootFailures \ - --gpu-mode $startGpuMode $maybeWarpSlot $maybeAsyncNodeInit \ - $maybeExtraPrimordialStakes - - if [[ -n "$WAIT_FOR_EQUAL_STAKE" ]]; then - wait_for_equal_stake - else - execution_step "Waiting for bootstrap validator's stake to fall below ${BOOTSTRAP_VALIDATOR_MAX_STAKE_THRESHOLD}%" - wait_for_max_stake "$BOOTSTRAP_VALIDATOR_MAX_STAKE_THRESHOLD" - fi - - echo "NUMBER_OF_CLIENT_NODES is : &NUMBER_OF_CLIENT_NODES" - if [[ $NUMBER_OF_CLIENT_NODES -gt 0 ]]; then - execution_step "Starting ${NUMBER_OF_CLIENT_NODES} client nodes" - "${REPO_ROOT}"/net/net.sh startclients "$maybeClientOptions" "$CLIENT_OPTIONS" - # It takes roughly 3 minutes from the time the client nodes return from starting to when they have finished loading the - # accounts file and actually start sending transactions - sleep 180 - fi - - if [[ -n "$WARMUP_SLOTS_BEFORE_TEST" ]]; then - # Allow the network to run for a bit before beginning the test - while [[ "$WARMUP_SLOTS_BEFORE_TEST" -gt $(get_slot) ]]; do - sleep 5 - done - fi - - # Stop the specified number of nodes - num_online_nodes=$(( NUMBER_OF_VALIDATOR_NODES + 1 )) - if [[ -n "$NUMBER_OF_OFFLINE_NODES" ]]; then - execution_step "Stopping $NUMBER_OF_OFFLINE_NODES nodes" - for (( i=NUMBER_OF_VALIDATOR_NODES; i>$(( NUMBER_OF_VALIDATOR_NODES - NUMBER_OF_OFFLINE_NODES )); i-- )); do - # shellcheck disable=SC2154 - "${REPO_ROOT}"/net/net.sh stopnode -i "${validatorIpList[$i]}" - done - num_online_nodes=$(( num_online_nodes - NUMBER_OF_OFFLINE_NODES )) - fi - - SECONDS=0 - START_SLOT=$(get_slot) - SLOT_COUNT_START_SECONDS=$SECONDS - execution_step "Marking beginning of slot rate test - Slot: $START_SLOT, Seconds: $SLOT_COUNT_START_SECONDS" - - case $TEST_TYPE in - fixed_duration) - execution_step "Wait ${TEST_DURATION_SECONDS} seconds to complete test" - sleep "$TEST_DURATION_SECONDS" - ;; - partition) - STATS_START_SECONDS=$SECONDS - execution_step "Wait $PARTITION_INACTIVE_DURATION before beginning to apply partitions" - sleep "$PARTITION_INACTIVE_DURATION" - for (( i=1; i<=PARTITION_ITERATION_COUNT; i++ )); do - execution_step "Partition Iteration $i of $PARTITION_ITERATION_COUNT" - execution_step "Applying netem config $NETEM_CONFIG_FILE for $PARTITION_ACTIVE_DURATION seconds" - "${REPO_ROOT}"/net/net.sh netem --config-file "$NETEM_CONFIG_FILE" -n $num_online_nodes - sleep "$PARTITION_ACTIVE_DURATION" - - execution_step "Resolving partitions for $PARTITION_INACTIVE_DURATION seconds" - "${REPO_ROOT}"/net/net.sh netem --config-file "$NETEM_CONFIG_FILE" --netem-cmd cleanup -n $num_online_nodes - sleep "$PARTITION_INACTIVE_DURATION" - done - STATS_FINISH_SECONDS=$SECONDS - TEST_DURATION_SECONDS=$((STATS_FINISH_SECONDS - STATS_START_SECONDS)) - ;; - script) - execution_step "Running custom script: ${REPO_ROOT}/${CUSTOM_SCRIPT}" - "$REPO_ROOT"/"$CUSTOM_SCRIPT" "$RESULT_FILE" - ;; - *) - echo "Error: Unsupported test type: $TEST_TYPE" - ;; - esac - - END_SLOT=$(get_slot) - SLOT_COUNT_END_SECONDS=$SECONDS - execution_step "Marking end of slot rate test - Slot: $END_SLOT, Seconds: $SLOT_COUNT_END_SECONDS" - - SLOTS_PER_SECOND="$(bc <<< "scale=3; ($END_SLOT - $START_SLOT)/($SLOT_COUNT_END_SECONDS - $SLOT_COUNT_START_SECONDS)")" - execution_step "Average slot rate: $SLOTS_PER_SECOND slots/second over $((SLOT_COUNT_END_SECONDS - SLOT_COUNT_START_SECONDS)) seconds" - - if [[ "$SKIP_PERF_RESULTS" = "false" ]]; then - declare -g dropped_vote_hash_count - - collect_performance_statistics - echo "slots_per_second: $SLOTS_PER_SECOND" >>"$RESULT_FILE" - - if [[ $dropped_vote_hash_count -gt 0 ]]; then - execution_step "Checking for dropped vote hash count" - exit 1 - fi - fi - - RESULT_DETAILS=$(<"$RESULT_FILE") - upload-ci-artifact "$RESULT_FILE" -} - -# shellcheck disable=SC2034 -RESULT_DETAILS= -STEP= -execution_step "Initialize Environment" - -[[ -n $TESTNET_TAG ]] || TESTNET_TAG=${CLOUD_PROVIDER}-testnet-automation -[[ -n $INFLUX_HOST ]] || INFLUX_HOST=https://internal-metrics.solana.com:8086 -[[ -n $BOOTSTRAP_VALIDATOR_MAX_STAKE_THRESHOLD ]] || BOOTSTRAP_VALIDATOR_MAX_STAKE_THRESHOLD=66 -[[ -n $SKIP_PERF_RESULTS ]] || SKIP_PERF_RESULTS=false - -if [[ -z $NUMBER_OF_VALIDATOR_NODES ]]; then - echo NUMBER_OF_VALIDATOR_NODES not defined - exit 1 -fi - -startGpuMode="off" -if [[ -z $ENABLE_GPU ]]; then - ENABLE_GPU=false -fi -if [[ "$ENABLE_GPU" = "true" ]]; then - maybeEnableGpu="--enable-gpu" - startGpuMode="on" -fi - -if [[ -z $NUMBER_OF_CLIENT_NODES ]]; then - echo NUMBER_OF_CLIENT_NODES not defined - exit 1 -fi - -if [[ -z $SOLANA_METRICS_CONFIG ]]; then - if [[ -z $SOLANA_METRICS_PARTIAL_CONFIG ]]; then - echo SOLANA_METRICS_PARTIAL_CONFIG not defined - exit 1 - fi - export SOLANA_METRICS_CONFIG="db=$TESTNET_TAG,host=$INFLUX_HOST,$SOLANA_METRICS_PARTIAL_CONFIG" -fi -echo "SOLANA_METRICS_CONFIG: $SOLANA_METRICS_CONFIG" - -if [[ -z $ALLOW_BOOT_FAILURES ]]; then - ALLOW_BOOT_FAILURES=false -fi -if [[ "$ALLOW_BOOT_FAILURES" = "true" ]]; then - maybeCreateAllowBootFailures="--allow-boot-failures" - maybeStartAllowBootFailures="-F" -fi - -if [[ -z $USE_PUBLIC_IP_ADDRESSES ]]; then - USE_PUBLIC_IP_ADDRESSES=false -fi -if [[ "$USE_PUBLIC_IP_ADDRESSES" = "true" ]]; then - maybePublicIpAddresses="-P" -fi - -execution_step "Checking for required parameters" -testTypeRequiredParameters= -case $TEST_TYPE in - fixed_duration) - testTypeRequiredParameters=( - TEST_DURATION_SECONDS \ - ) - ;; - partition) - testTypeRequiredParameters=( - NETEM_CONFIG_FILE \ - PARTITION_ACTIVE_DURATION \ - PARTITION_INACTIVE_DURATION \ - PARTITION_ITERATION_COUNT \ - ) - ;; - script) - testTypeRequiredParameters=( - CUSTOM_SCRIPT \ - ) - ;; - *) - echo "Error: Unsupported test type: $TEST_TYPE" - ;; -esac - -missingParameters= -for i in "${testTypeRequiredParameters[@]}"; do - if [[ -z ${!i} ]]; then - missingParameters+="${i}, " - fi -done - -if [[ -n $missingParameters ]]; then - echo "Error: For test type $TEST_TYPE, the following required parameters are missing: ${missingParameters[*]}" - exit 1 -fi - -maybeClientOptions=${CLIENT_OPTIONS:+"-c"} -maybeCustomMachineType=${VALIDATOR_NODE_MACHINE_TYPE:+"--custom-machine-type"} - -IFS=, read -r -a TESTNET_CLOUD_ZONES <<<"${TESTNET_ZONES}" - -RESULT_FILE="$TESTNET_TAG"_SUMMARY_STATS_"$NUMBER_OF_VALIDATOR_NODES".log -rm -f "$RESULT_FILE" - -TEST_PARAMS_TO_DISPLAY=(CLOUD_PROVIDER \ - NUMBER_OF_VALIDATOR_NODES \ - ENABLE_GPU \ - VALIDATOR_NODE_MACHINE_TYPE \ - NUMBER_OF_CLIENT_NODES \ - CLIENT_OPTIONS \ - CLIENT_DELAY_START \ - TESTNET_ZONES \ - TEST_DURATION_SECONDS \ - USE_PUBLIC_IP_ADDRESSES \ - ALLOW_BOOT_FAILURES \ - ADDITIONAL_FLAGS \ - APPLY_PARTITIONS \ - NETEM_CONFIG_FILE \ - WAIT_FOR_EQUAL_STAKE \ - WARMUP_SLOTS_BEFORE_TEST \ - NUMBER_OF_OFFLINE_NODES \ - PARTITION_ACTIVE_DURATION \ - PARTITION_INACTIVE_DURATION \ - PARTITION_ITERATION_COUNT \ - TEST_TYPE \ - CUSTOM_SCRIPT \ - ) - -TEST_CONFIGURATION= -for i in "${TEST_PARAMS_TO_DISPLAY[@]}"; do - if [[ -n ${!i} ]]; then - TEST_CONFIGURATION+="${i} = ${!i} | " - fi -done - -# shellcheck disable=SC2034 -TESTNET_START_UNIX_MSECS="$(($(date +%s%N)/1000000))" - -launch_testnet diff --git a/test-abi.sh b/test-abi.sh deleted file mode 100755 index 27cfb454336908..00000000000000 --- a/test-abi.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env bash -# -# Easily run the ABI tests for the entire repo or a subset -# - -here=$(dirname "$0") -set -x -exec "${here}/cargo" nightly test --features frozen-abi --lib -- test_abi_ --nocapture diff --git a/test-validator/Cargo.toml b/test-validator/Cargo.toml index 5d3ce69575456f..c3015641f9518d 100644 --- a/test-validator/Cargo.toml +++ b/test-validator/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "solana-test-validator" -description = "Blockchain, Rebuilt for Scale" readme = "../README.md" version = { workspace = true } authors = { workspace = true } +description = { workspace = true } repository = { workspace = true } homepage = { workspace = true } license = { workspace = true } @@ -38,10 +38,11 @@ solana-instruction = { workspace = true } solana-keypair = { workspace = true } solana-ledger = { workspace = true } solana-loader-v3-interface = { workspace = true } -solana-logger = "=2.3.1" +solana-logger = "=3.0.0" solana-message = { workspace = true } solana-native-token = { workspace = true } solana-net-utils = { workspace = true } +solana-program-binaries = { workspace = true } solana-program-test = { workspace = true } solana-pubkey = { workspace = true } solana-rent = { workspace = true } diff --git a/test-validator/src/lib.rs b/test-validator/src/lib.rs index 1f98034c7e4202..fc8cf7a7a13ac4 100644 --- a/test-validator/src/lib.rs +++ b/test-validator/src/lib.rs @@ -1,6 +1,6 @@ #![allow(clippy::arithmetic_side_effects)] use { - agave_feature_set::{raise_cpi_nesting_limit_to_8, FeatureSet, FEATURE_NAMES}, + agave_feature_set::{alpenglow, raise_cpi_nesting_limit_to_8, FeatureSet, FEATURE_NAMES}, base64::{prelude::BASE64_STANDARD, Engine}, crossbeam_channel::Receiver, log::*, @@ -25,7 +25,7 @@ use { geyser_plugin_manager::GeyserPluginManager, GeyserPluginManagerRequest, }, solana_gossip::{ - cluster_info::{BindIpAddrs, ClusterInfo, NodeConfig}, + cluster_info::{ClusterInfo, NodeConfig}, contact_info::Protocol, node::Node, }, @@ -38,8 +38,8 @@ use { }, solana_loader_v3_interface::state::UpgradeableLoaderState, solana_message::Message, - solana_native_token::sol_to_lamports, - solana_net_utils::{find_available_ports_in_range, PortRange}, + solana_native_token::LAMPORTS_PER_SOL, + solana_net_utils::{find_available_ports_in_range, multihomed_sockets::BindIpAddrs, PortRange}, solana_pubkey::Pubkey, solana_rent::Rent, solana_rpc::{rpc::JsonRpcConfig, rpc_pubsub_service::PubSubConfig}, @@ -50,7 +50,7 @@ use { genesis_utils::{self, create_genesis_config_with_leader_ex_no_features}, runtime_config::RuntimeConfig, snapshot_config::SnapshotConfig, - snapshot_utils::SnapshotInterval, + snapshot_utils::{SnapshotInterval, BANK_SNAPSHOTS_DIR}, }, solana_sdk_ids::address_lookup_table, solana_signer::Signer, @@ -143,6 +143,8 @@ pub struct TestValidatorGenesis { impl Default for TestValidatorGenesis { fn default() -> Self { + // Default to Tower consensus to ensure proper converage pre-Alpenglow. + let deactivate_feature_set = [alpenglow::id()].into_iter().collect(); Self { fee_rate_governor: FeeRateGovernor::default(), ledger_path: Option::::default(), @@ -165,12 +167,12 @@ impl Default for TestValidatorGenesis { max_ledger_shreds: Option::::default(), max_genesis_archive_unpacked_size: Option::::default(), geyser_plugin_config_files: Option::>::default(), - deactivate_feature_set: HashSet::::default(), + deactivate_feature_set, compute_unit_limit: Option::::default(), log_messages_bytes_limit: Option::::default(), transaction_account_lock_limit: Option::::default(), tpu_enable_udp: DEFAULT_TPU_ENABLE_UDP, - geyser_plugin_manager: Arc::new(RwLock::new(GeyserPluginManager::new())), + geyser_plugin_manager: Arc::new(RwLock::new(GeyserPluginManager::default())), admin_rpc_service_post_init: Arc::>>::default(), } @@ -345,7 +347,7 @@ impl TestValidatorGenesis { { let addresses: Vec = addresses.into_iter().collect(); for chunk in addresses.chunks(MAX_MULTIPLE_ACCOUNTS) { - info!("Fetching {:?} over RPC...", chunk); + info!("Fetching {chunk:?} over RPC..."); let responses = rpc_client .get_multiple_accounts(chunk) .map_err(|err| format!("Failed to fetch: {err}"))?; @@ -353,7 +355,7 @@ impl TestValidatorGenesis { if let Some(account) = res { self.add_account(*address, transform(address, account)?); } else if skip_missing { - warn!("Could not find {}, skipping.", address); + warn!("Could not find {address}, skipping."); } else { return Err(format!("Failed to fetch {address}")); } @@ -397,7 +399,7 @@ impl TestValidatorGenesis { let mut alt_entries: Vec = Vec::new(); for chunk in addresses.chunks(MAX_MULTIPLE_ACCOUNTS) { - info!("Fetching {:?} over RPC...", chunk); + info!("Fetching {chunk:?} over RPC..."); let responses = rpc_client .get_multiple_accounts(chunk) .map_err(|err| format!("Failed to fetch: {err}"))?; @@ -569,7 +571,7 @@ impl TestValidatorGenesis { json_files.extend(matched_files); } - debug!("account files found: {:?}", json_files); + debug!("account files found: {json_files:?}"); let accounts: Vec<_> = json_files .iter() @@ -842,15 +844,6 @@ impl TestValidator { .expect("validator start failed") } - /// allow tests to indicate that validator has completed initialization - pub fn set_startup_verification_complete_for_tests(&self) { - self.bank_forks() - .read() - .unwrap() - .root_bank() - .set_initial_accounts_hash_verification_completed(); - } - /// Initialize the ledger directory /// /// If `ledger_path` is `None`, a temporary ledger will be created. Otherwise the ledger will @@ -864,29 +857,26 @@ impl TestValidator { let validator_identity = Keypair::new(); let validator_vote_account = Keypair::new(); let validator_stake_account = Keypair::new(); - let validator_identity_lamports = sol_to_lamports(500.); - let validator_stake_lamports = sol_to_lamports(1_000_000.); - let mint_lamports = sol_to_lamports(500_000_000.); + let validator_identity_lamports = 500 * LAMPORTS_PER_SOL; + let validator_stake_lamports = 1_000_000 * LAMPORTS_PER_SOL; + let mint_lamports = 500_000_000 * LAMPORTS_PER_SOL; // Only activate features which are not explicitly deactivated. let mut feature_set = FeatureSet::default().inactive().clone(); for feature in &config.deactivate_feature_set { if feature_set.remove(feature) { - info!("Feature for {:?} deactivated", feature) + info!("Feature for {feature:?} deactivated") } else { - warn!( - "Feature {:?} set for deactivation is not a known Feature public key", - feature, - ) + warn!("Feature {feature:?} set for deactivation is not a known Feature public key",) } } let mut accounts = config.accounts.clone(); - for (address, account) in solana_program_test::programs::spl_programs(&config.rent) { + for (address, account) in solana_program_binaries::spl_programs(&config.rent) { accounts.entry(address).or_insert(account); } for (address, account) in - solana_program_test::programs::core_bpf_programs(&config.rent, |feature_id| { + solana_program_binaries::core_bpf_programs(&config.rent, |feature_id| { feature_set.contains(feature_id) }) { @@ -1036,7 +1026,7 @@ impl TestValidator { let node = { let bind_ip_addr = config.node_config.bind_ip_addr; let validator_node_config = NodeConfig { - bind_ip_addrs: BindIpAddrs::new(vec![bind_ip_addr])?, + bind_ip_addrs: Arc::new(BindIpAddrs::new(vec![bind_ip_addr])?), gossip_port: config.node_config.gossip_addr.port(), port_range: config.node_config.port_range, advertised_ip: bind_ip_addr, @@ -1079,11 +1069,11 @@ impl TestValidator { } } - let accounts_db_config = Some(AccountsDbConfig { + let accounts_db_config = AccountsDbConfig { index: Some(AccountsIndexConfig::default()), account_indexes: Some(config.rpc_config.account_indexes.clone()), ..AccountsDbConfig::default() - }); + }; let runtime_config = RuntimeConfig { compute_budget: config @@ -1125,7 +1115,7 @@ impl TestValidator { NonZeroU64::new(100).unwrap(), ), incremental_snapshot_archive_interval: SnapshotInterval::Disabled, - bank_snapshots_dir: ledger_path.join("snapshot"), + bank_snapshots_dir: ledger_path.join(BANK_SNAPSHOTS_DIR), full_snapshot_archives_dir: ledger_path.to_path_buf(), incremental_snapshot_archives_dir: ledger_path.to_path_buf(), ..SnapshotConfig::default() @@ -1206,13 +1196,13 @@ impl TestValidator { } } Err(err) => { - warn!("get_fee_for_message() failed: {:?}", err); + warn!("get_fee_for_message() failed: {err:?}"); break; } } } Err(err) => { - warn!("get_latest_blockhash() failed: {:?}", err); + warn!("get_latest_blockhash() failed: {err:?}"); break; } } @@ -1255,13 +1245,13 @@ impl TestValidator { match rpc_client.send_transaction(&transaction).await { Ok(_) => *is_deployed = true, Err(e) => { - if format!("{:?}", e).contains("Program is not deployed") { - debug!("{:?} - not deployed", program_id); + if format!("{e:?}").contains("Program is not deployed") { + debug!("{program_id:?} - not deployed"); } else { // Assuming all other other errors could only occur *after* // program is deployed for usability. *is_deployed = true; - debug!("{:?} - Unexpected error: {:?}", program_id, e); + debug!("{program_id:?} - Unexpected error: {e:?}"); } } } @@ -1270,7 +1260,7 @@ impl TestValidator { return; } - println!("Waiting for programs to be fully deployed {} ...", attempt); + println!("Waiting for programs to be fully deployed {attempt} ..."); sleep(Duration::from_millis(DEFAULT_MS_PER_SLOT)).await; } panic!("Timeout waiting for program to become usable"); @@ -1357,7 +1347,6 @@ mod test { #[test] fn get_health() { let (test_validator, _payer) = TestValidatorGenesis::default().start(); - test_validator.set_startup_verification_complete_for_tests(); let rpc_client = test_validator.get_rpc_client(); rpc_client.get_health().expect("health"); } @@ -1365,7 +1354,6 @@ mod test { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn nonblocking_get_health() { let (test_validator, _payer) = TestValidatorGenesis::default().start_async().await; - test_validator.set_startup_verification_complete_for_tests(); let rpc_client = test_validator.get_async_rpc_client(); rpc_client.get_health().await.expect("health"); } @@ -1436,6 +1424,7 @@ mod test { [ agave_feature_set::deprecate_rewards_sysvar::id(), agave_feature_set::disable_fees_sysvar::id(), + alpenglow::id(), ] .into_iter() .for_each(|feature| { @@ -1513,13 +1502,7 @@ mod test { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_core_bpf_programs() { - let (test_validator, _payer) = TestValidatorGenesis::default() - .deactivate_features(&[ - // Don't migrate the stake program. - agave_feature_set::migrate_stake_program_to_core_bpf::id(), - ]) - .start_async() - .await; + let (test_validator, _payer) = TestValidatorGenesis::default().start_async().await; let rpc_client = test_validator.get_async_rpc_client(); @@ -1548,9 +1531,9 @@ mod test { assert_eq!(account.owner, solana_sdk_ids::bpf_loader_upgradeable::id()); assert!(account.executable); - // Stake is a builtin. + // Stake is a BPF program. let account = fetched_programs[3].as_ref().unwrap(); - assert_eq!(account.owner, solana_sdk_ids::native_loader::id()); + assert_eq!(account.owner, solana_sdk_ids::bpf_loader_upgradeable::id()); assert!(account.executable); } } diff --git a/thread-manager/examples/core_contention_basics.rs b/thread-manager/examples/core_contention_basics.rs index 81569538f2c5eb..c2b20390cc901e 100644 --- a/thread-manager/examples/core_contention_basics.rs +++ b/thread-manager/examples/core_contention_basics.rs @@ -58,7 +58,7 @@ fn main() -> anyhow::Result<()> { join_handle.join().expect("Load generator crashed!") }); //print out the results of the bench run - info!("Results are: {:?}", results); + info!("Results are: {results:?}"); } Ok(()) } diff --git a/thread-manager/examples/core_contention_sweep.rs b/thread-manager/examples/core_contention_sweep.rs index db3ee496f5b137..ff7e08d28c2e5f 100644 --- a/thread-manager/examples/core_contention_sweep.rs +++ b/thread-manager/examples/core_contention_sweep.rs @@ -122,7 +122,7 @@ fn main() -> anyhow::Result<()> { }; jh.join().expect("Some of the threads crashed!") })?; - info!("Results are: {:?}", measurement); + info!("Results are: {measurement:?}"); results.latencies_s.push(measurement.latency_s); results .requests_per_second diff --git a/thread-manager/src/lib.rs b/thread-manager/src/lib.rs index 1b0fd7213864ff..3b120d7b4b7588 100644 --- a/thread-manager/src/lib.rs +++ b/thread-manager/src/lib.rs @@ -227,7 +227,7 @@ mod tests { #[cfg(target_os = "linux")] fn validate_affinity(expect_cores: &[usize], error_msg: &str) { let affinity = affinity::get_thread_affinity().unwrap(); - assert_eq!(affinity, expect_cores, "{}", error_msg); + assert_eq!(affinity, expect_cores, "{error_msg}"); } #[test] #[cfg(target_os = "linux")] diff --git a/thread-manager/src/policy.rs b/thread-manager/src/policy.rs index e8991ce1f6e230..06934a71951e3a 100644 --- a/thread-manager/src/policy.rs +++ b/thread-manager/src/policy.rs @@ -61,7 +61,7 @@ cfg_if::cfg_if! { policy, thread_priority::ThreadPriority::Crossplatform((priority).try_into().expect("Priority value outside of OS-supported range")), ) { - panic!("Can not set thread priority, OS error {:?}", e); + panic!("Can not set thread priority, OS error {e:?}"); } } pub fn parse_policy(policy: &str) -> ThreadSchedulePolicy { diff --git a/tls-utils/src/tls_certificates.rs b/tls-utils/src/tls_certificates.rs index 76faf995e1c025..4963003e40653f 100644 --- a/tls-utils/src/tls_certificates.rs +++ b/tls-utils/src/tls_certificates.rs @@ -32,7 +32,7 @@ pub fn new_dummy_x509_certificate( ]; let key_pkcs8_der = { - let keypair_secret_bytes = keypair.secret().as_bytes(); + let keypair_secret_bytes = keypair.secret_bytes(); let keypair_secret_len = keypair_secret_bytes.len(); if keypair_secret_len != 32 { panic!("Unexpected secret key length!"); diff --git a/tokens/Cargo.toml b/tokens/Cargo.toml index 02cb6e6c3fb847..f37e2ade3a9872 100644 --- a/tokens/Cargo.toml +++ b/tokens/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "solana-tokens" -description = "Blockchain, Rebuilt for Scale" documentation = "https://docs.rs/solana-tokens" version = { workspace = true } authors = { workspace = true } +description = { workspace = true } repository = { workspace = true } homepage = { workspace = true } license = { workspace = true } @@ -23,6 +23,7 @@ serde_derive = { workspace = true } solana-account-decoder = { workspace = true } solana-clap-utils = { workspace = true } solana-cli-config = { workspace = true } +solana-cli-output = { workspace = true } solana-clock = { workspace = true } solana-commitment-config = { workspace = true } solana-hash = { workspace = true } @@ -43,7 +44,7 @@ solana-system-interface = { workspace = true } solana-transaction = { workspace = true } solana-transaction-status = { workspace = true } solana-version = { workspace = true } -spl-associated-token-account-interface = { version = "=1.0.0" } +spl-associated-token-account-interface = { version = "=2.0.0" } spl-token-interface = { workspace = true } tempfile = { workspace = true } thiserror = { workspace = true } diff --git a/tokens/src/arg_parser.rs b/tokens/src/arg_parser.rs index 7542bb090b4d45..f4f2a21b0dacb9 100644 --- a/tokens/src/arg_parser.rs +++ b/tokens/src/arg_parser.rs @@ -12,7 +12,7 @@ use { keypair::{pubkey_from_path, signer_from_path}, }, solana_cli_config::CONFIG_FILE, - solana_native_token::sol_to_lamports, + solana_native_token::sol_str_to_lamports, solana_remote_wallet::remote_wallet::maybe_wallet_manager, std::{error::Error, ffi::OsString, process::exit}, }; @@ -44,8 +44,8 @@ where .global(true) .validator(is_url_or_moniker) .help( - "URL for Solana's JSON RPC or moniker (or their first letter): \ - [mainnet-beta, testnet, devnet, localhost]", + "URL for Solana's JSON RPC or moniker (or their first letter): [mainnet-beta, \ + testnet, devnet, localhost]", ), ) .subcommand( @@ -58,9 +58,9 @@ where .takes_value(true) .value_name("FILE") .help( - "Location for storing distribution database. \ - The database is used for tracking transactions as they are finalized \ - and preventing double spends.", + "Location for storing distribution database. The database is used for \ + tracking transactions as they are finalized and preventing double \ + spends.", ), ) .arg( @@ -121,9 +121,9 @@ where .takes_value(true) .value_name("FILE") .help( - "Location for storing distribution database. \ - The database is used for tracking transactions as they are finalized \ - and preventing double spends.", + "Location for storing distribution database. The database is used for \ + tracking transactions as they are finalized and preventing double \ + spends.", ), ) .arg( @@ -192,9 +192,9 @@ where .takes_value(true) .value_name("FILE") .help( - "Location for storing distribution database. \ - The database is used for tracking transactions as they are finalized \ - and preventing double spends.", + "Location for storing distribution database. The database is used for \ + tracking transactions as they are finalized and preventing double \ + spends.", ), ) .arg( @@ -290,9 +290,9 @@ where .takes_value(true) .value_name("FILE") .help( - "Location for storing distribution database. \ - The database is used for tracking transactions as they are finalized \ - and preventing double spends.", + "Location for storing distribution database. The database is used for \ + tracking transactions as they are finalized and preventing double \ + spends.", ), ) .arg( @@ -439,7 +439,9 @@ fn parse_distribute_tokens_args( fee_payer, stake_args: None, spl_token_args: None, - transfer_amount: value_of(matches, "transfer_amount").map(sol_to_lamports), + transfer_amount: matches + .value_of("transfer_amount") + .and_then(sol_str_to_lamports), }) } @@ -478,7 +480,10 @@ fn parse_create_stake_args( .transpose()?; let stake_args = StakeArgs { - unlocked_sol: sol_to_lamports(value_t_or_exit!(matches, "unlocked_sol", f64)), + unlocked_sol: matches + .value_of("unlocked_sol") + .and_then(sol_str_to_lamports) + .unwrap(), lockup_authority, sender_stake_args: None, }; @@ -562,7 +567,10 @@ fn parse_distribute_stake_args( rent_exempt_reserve: None, }; let stake_args = StakeArgs { - unlocked_sol: sol_to_lamports(value_t_or_exit!(matches, "unlocked_sol", f64)), + unlocked_sol: matches + .value_of("unlocked_sol") + .and_then(sol_str_to_lamports) + .unwrap(), lockup_authority: lockup_authority_address, sender_stake_args: Some(sender_stake_args), }; diff --git a/tokens/src/commands.rs b/tokens/src/commands.rs index 13ac1711b24c50..6cc8cab90db312 100644 --- a/tokens/src/commands.rs +++ b/tokens/src/commands.rs @@ -15,12 +15,13 @@ use { pickledb::PickleDb, serde::{Deserialize, Serialize}, solana_account_decoder::parse_token::real_number_string, + solana_cli_output::display::build_balance_message, solana_clock::Slot, solana_commitment_config::CommitmentConfig, solana_hash::Hash, solana_instruction::Instruction, solana_message::Message, - solana_native_token::{lamports_to_sol, sol_to_lamports}, + solana_native_token::sol_str_to_lamports, solana_program_error::ProgramError, solana_rpc_client::rpc_client::RpcClient, solana_rpc_client_api::{ @@ -138,6 +139,8 @@ pub enum Error { ProgramError(#[from] ProgramError), #[error("Exit signal received")] ExitSignal, + #[error("Bad input data for SOL value: {input}")] + BadInputNumberError { input: String }, } fn merge_allocations(allocations: &[TypedAllocation]) -> Vec { @@ -374,7 +377,7 @@ fn build_messages( println!( "{:<44} {:>24.9}", allocation.recipient, - lamports_to_sol(allocation.amount) + build_balance_message(allocation.amount, false, false) ); false }; @@ -531,7 +534,7 @@ fn read_allocations( // We only support SOL token in "require lockup" mode. rdr.deserialize() .map(|recipient| { - let (recipient, amount, lockup_date): (String, f64, String) = recipient?; + let (recipient, amount, lockup_date): (String, String, String) = recipient?; let recipient = Pubkey::from_str(&recipient).map_err(|err| Error::BadInputPubkeyError { input: recipient, @@ -551,7 +554,8 @@ fn read_allocations( }; Ok(TypedAllocation { recipient, - amount: sol_to_lamports(amount), + amount: sol_str_to_lamports(&amount) + .ok_or(Error::BadInputNumberError { input: amount })?, lockup_date, }) }) @@ -575,7 +579,7 @@ fn read_allocations( } else { rdr.deserialize() .map(|recipient| { - let (recipient, amount): (String, f64) = recipient?; + let (recipient, amount): (String, String) = recipient?; let recipient = Pubkey::from_str(&recipient).map_err(|err| Error::BadInputPubkeyError { input: recipient, @@ -583,7 +587,8 @@ fn read_allocations( })?; Ok(TypedAllocation { recipient, - amount: sol_to_lamports(amount), + amount: sol_str_to_lamports(&amount) + .ok_or(Error::BadInputNumberError { input: amount })?, lockup_date: None, }) }) @@ -847,28 +852,28 @@ fn check_payer_balances( if staker_balance < undistributed_tokens { return Err(Error::InsufficientFunds( vec![FundingSource::StakeAccount].into(), - lamports_to_sol(undistributed_tokens).to_string(), + build_balance_message(undistributed_tokens, false, false).to_string(), )); } if args.fee_payer.pubkey() == unlocked_sol_source { if fee_payer_balance < fees + total_unlocked_sol { return Err(Error::InsufficientFunds( vec![FundingSource::SystemAccount, FundingSource::FeePayer].into(), - lamports_to_sol(fees + total_unlocked_sol).to_string(), + build_balance_message(fees + total_unlocked_sol, false, false).to_string(), )); } } else { if fee_payer_balance < fees { return Err(Error::InsufficientFunds( vec![FundingSource::FeePayer].into(), - lamports_to_sol(fees).to_string(), + build_balance_message(fees, false, false).to_string(), )); } let unlocked_sol_balance = client.get_balance(&unlocked_sol_source)?; if unlocked_sol_balance < total_unlocked_sol { return Err(Error::InsufficientFunds( vec![FundingSource::SystemAccount].into(), - lamports_to_sol(total_unlocked_sol).to_string(), + build_balance_message(total_unlocked_sol, false, false).to_string(), )); } } @@ -876,21 +881,21 @@ fn check_payer_balances( if fee_payer_balance < fees + undistributed_tokens { return Err(Error::InsufficientFunds( vec![FundingSource::SystemAccount, FundingSource::FeePayer].into(), - lamports_to_sol(fees + undistributed_tokens).to_string(), + build_balance_message(fees + undistributed_tokens, false, false).to_string(), )); } } else { if fee_payer_balance < fees { return Err(Error::InsufficientFunds( vec![FundingSource::FeePayer].into(), - lamports_to_sol(fees).to_string(), + build_balance_message(fees, false, false).to_string(), )); } let sender_balance = client.get_balance(&distribution_source)?; if sender_balance < undistributed_tokens { return Err(Error::InsufficientFunds( vec![FundingSource::SystemAccount].into(), - lamports_to_sol(undistributed_tokens).to_string(), + build_balance_message(undistributed_tokens, false, false).to_string(), )); } } @@ -931,14 +936,13 @@ pub fn process_balances( print_token_balances(client, allocation, spl_token_args)?; } else { let address: Pubkey = allocation.recipient; - let expected = lamports_to_sol(allocation.amount); - let actual = lamports_to_sol(client.get_balance(&address).unwrap()); + let expected = build_balance_message(allocation.amount, false, false); + let actual_amount = client.get_balance(&address).unwrap(); + let actual = build_balance_message(actual_amount, false, false); + let diff = build_balance_message(actual_amount - allocation.amount, false, false); println!( "{:<44} {:>24.9} {:>24.9} {:>24.9}", - allocation.recipient, - expected, - actual, - actual - expected, + allocation.recipient, expected, actual, diff, ); } } @@ -968,7 +972,7 @@ pub fn test_process_distribute_tokens_with_client( let fee_payer = Keypair::new(); let transaction = transfer( client, - sol_to_lamports(1.0), + sol_str_to_lamports("1.0").unwrap(), &sender_keypair, &fee_payer.pubkey(), ) @@ -978,13 +982,13 @@ pub fn test_process_distribute_tokens_with_client( .unwrap(); assert_eq!( client.get_balance(&fee_payer.pubkey()).unwrap(), - sol_to_lamports(1.0), + sol_str_to_lamports("1.0").unwrap(), ); let expected_amount = if let Some(amount) = transfer_amount { amount } else { - sol_to_lamports(1000.0) + sol_str_to_lamports("1000.0").unwrap() }; let alice_pubkey = pubkey::new_rand(); let allocations_file = NamedTempFile::new().unwrap(); @@ -993,7 +997,7 @@ pub fn test_process_distribute_tokens_with_client( wtr.write_record(["recipient", "amount"]).unwrap(); wtr.write_record([ alice_pubkey.to_string(), - lamports_to_sol(expected_amount).to_string(), + build_balance_message(expected_amount, false, false).to_string(), ]) .unwrap(); wtr.flush().unwrap(); @@ -1051,7 +1055,7 @@ pub fn test_process_create_stake_with_client(client: &RpcClient, sender_keypair: let fee_payer = Keypair::new(); let transaction = transfer( client, - sol_to_lamports(1.0), + sol_str_to_lamports("1.0").unwrap(), &sender_keypair, &fee_payer.pubkey(), ) @@ -1075,7 +1079,7 @@ pub fn test_process_create_stake_with_client(client: &RpcClient, sender_keypair: &stake_account_address, &authorized, &lockup, - sol_to_lamports(3000.0), + sol_str_to_lamports("3000.0").unwrap(), ); let message = Message::new(&instructions, Some(&sender_keypair.pubkey())); let signers = [&sender_keypair, &stake_account_keypair]; @@ -1085,7 +1089,7 @@ pub fn test_process_create_stake_with_client(client: &RpcClient, sender_keypair: .send_and_confirm_transaction_with_spinner(&transaction) .unwrap(); - let expected_amount = sol_to_lamports(1000.0); + let expected_amount = sol_str_to_lamports("1000.0").unwrap(); let alice_pubkey = pubkey::new_rand(); let file = NamedTempFile::new().unwrap(); let input_csv = file.path().to_str().unwrap().to_string(); @@ -1094,7 +1098,7 @@ pub fn test_process_create_stake_with_client(client: &RpcClient, sender_keypair: .unwrap(); wtr.write_record([ alice_pubkey.to_string(), - lamports_to_sol(expected_amount).to_string(), + build_balance_message(expected_amount, false, false).to_string(), "".to_string(), ]) .unwrap(); @@ -1113,7 +1117,7 @@ pub fn test_process_create_stake_with_client(client: &RpcClient, sender_keypair: let stake_args = StakeArgs { lockup_authority: None, - unlocked_sol: sol_to_lamports(1.0), + unlocked_sol: sol_str_to_lamports("1.0").unwrap(), sender_stake_args: None, }; let args = DistributeTokensArgs { @@ -1138,12 +1142,12 @@ pub fn test_process_create_stake_with_client(client: &RpcClient, sender_keypair: assert_eq!( client.get_balance(&alice_pubkey).unwrap(), - sol_to_lamports(1.0), + sol_str_to_lamports("1.0").unwrap(), ); let new_stake_account_address = transaction_infos[0].new_stake_account_address.unwrap(); assert_eq!( client.get_balance(&new_stake_account_address).unwrap(), - expected_amount - sol_to_lamports(1.0), + expected_amount - sol_str_to_lamports("1.0").unwrap(), ); check_output_file(&output_path, &db::open_db(&transaction_db, true).unwrap()); @@ -1158,11 +1162,11 @@ pub fn test_process_create_stake_with_client(client: &RpcClient, sender_keypair: assert_eq!( client.get_balance(&alice_pubkey).unwrap(), - sol_to_lamports(1.0), + sol_str_to_lamports("1.0").unwrap(), ); assert_eq!( client.get_balance(&new_stake_account_address).unwrap(), - expected_amount - sol_to_lamports(1.0), + expected_amount - sol_str_to_lamports("1.0").unwrap(), ); check_output_file(&output_path, &db::open_db(&transaction_db, true).unwrap()); @@ -1173,7 +1177,7 @@ pub fn test_process_distribute_stake_with_client(client: &RpcClient, sender_keyp let fee_payer = Keypair::new(); let transaction = transfer( client, - sol_to_lamports(1.0), + sol_str_to_lamports("1.0").unwrap(), &sender_keypair, &fee_payer.pubkey(), ) @@ -1197,7 +1201,7 @@ pub fn test_process_distribute_stake_with_client(client: &RpcClient, sender_keyp &stake_account_address, &authorized, &lockup, - sol_to_lamports(3000.0), + sol_str_to_lamports("3000.0").unwrap(), ); let message = Message::new(&instructions, Some(&sender_keypair.pubkey())); let signers = [&sender_keypair, &stake_account_keypair]; @@ -1207,7 +1211,7 @@ pub fn test_process_distribute_stake_with_client(client: &RpcClient, sender_keyp .send_and_confirm_transaction_with_spinner(&transaction) .unwrap(); - let expected_amount = sol_to_lamports(1000.0); + let expected_amount = sol_str_to_lamports("1000.0").unwrap(); let alice_pubkey = pubkey::new_rand(); let file = NamedTempFile::new().unwrap(); let input_csv = file.path().to_str().unwrap().to_string(); @@ -1216,7 +1220,7 @@ pub fn test_process_distribute_stake_with_client(client: &RpcClient, sender_keyp .unwrap(); wtr.write_record([ alice_pubkey.to_string(), - lamports_to_sol(expected_amount).to_string(), + build_balance_message(expected_amount, false, false).to_string(), "".to_string(), ]) .unwrap(); @@ -1244,7 +1248,7 @@ pub fn test_process_distribute_stake_with_client(client: &RpcClient, sender_keyp rent_exempt_reserve: Some(rent_exempt_reserve), }; let stake_args = StakeArgs { - unlocked_sol: sol_to_lamports(1.0), + unlocked_sol: sol_str_to_lamports("1.0").unwrap(), lockup_authority: None, sender_stake_args: Some(sender_stake_args), }; @@ -1270,12 +1274,12 @@ pub fn test_process_distribute_stake_with_client(client: &RpcClient, sender_keyp assert_eq!( client.get_balance(&alice_pubkey).unwrap(), - sol_to_lamports(1.0), + sol_str_to_lamports("1.0").unwrap(), ); let new_stake_account_address = transaction_infos[0].new_stake_account_address.unwrap(); assert_eq!( client.get_balance(&new_stake_account_address).unwrap(), - expected_amount - sol_to_lamports(1.0), + expected_amount - sol_str_to_lamports("1.0").unwrap(), ); check_output_file(&output_path, &db::open_db(&transaction_db, true).unwrap()); @@ -1290,11 +1294,11 @@ pub fn test_process_distribute_stake_with_client(client: &RpcClient, sender_keyp assert_eq!( client.get_balance(&alice_pubkey).unwrap(), - sol_to_lamports(1.0), + sol_str_to_lamports("1.0").unwrap(), ); assert_eq!( client.get_balance(&new_stake_account_address).unwrap(), - expected_amount - sol_to_lamports(1.0), + expected_amount - sol_str_to_lamports("1.0").unwrap(), ); check_output_file(&output_path, &db::open_db(&transaction_db, true).unwrap()); @@ -1306,6 +1310,7 @@ mod tests { super::*, solana_instruction::AccountMeta, solana_keypair::{read_keypair_file, write_keypair_file}, + solana_native_token::LAMPORTS_PER_SOL, solana_signer::Signer, solana_stake_interface::instruction::StakeInstruction, solana_streamer::socket::SocketAddrSpace, @@ -1342,14 +1347,11 @@ mod tests { let url = test_validator.rpc_url(); let client = RpcClient::new_with_commitment(url, CommitmentConfig::processed()); - test_process_distribute_tokens_with_client(&client, alice, Some(sol_to_lamports(1.5))); + test_process_distribute_tokens_with_client(&client, alice, sol_str_to_lamports("1.5")); } fn simple_test_validator_no_fees(pubkey: Pubkey) -> TestValidator { - let test_validator = - TestValidator::with_no_fees(pubkey, None, SocketAddrSpace::Unspecified); - test_validator.set_startup_verification_complete_for_tests(); - test_validator + TestValidator::with_no_fees(pubkey, None, SocketAddrSpace::Unspecified) } #[test] @@ -1404,7 +1406,7 @@ mod tests { let allocation_sol = TypedAllocation { recipient: alice_pubkey, - amount: sol_to_lamports(42.0), + amount: sol_str_to_lamports("42.0").unwrap(), lockup_date: None, }; @@ -1438,12 +1440,12 @@ mod tests { let expected_allocations = vec![ TypedAllocation { recipient: pubkey0, - amount: sol_to_lamports(42.0), + amount: sol_str_to_lamports("42.0").unwrap(), lockup_date: None, }, TypedAllocation { recipient: pubkey1, - amount: sol_to_lamports(43.0), + amount: sol_str_to_lamports("43.0").unwrap(), lockup_date: None, }, ]; @@ -1596,7 +1598,7 @@ mod tests { ); let input_csv = file.path().to_str().unwrap().to_string(); let got = read_allocations(&input_csv, None, false, false); - assert!(matches!(got, Err(Error::CsvError(..)))); + assert!(matches!(got, Err(Error::BadInputNumberError { .. }))); // Bad value in 2nd column (with require lockup). let file = NamedTempFile::new().unwrap(); generate_csv_file( @@ -1613,7 +1615,7 @@ mod tests { ); let input_csv = file.path().to_str().unwrap().to_string(); let got = read_allocations(&input_csv, None, true, false); - assert!(matches!(got, Err(Error::CsvError(..)))); + assert!(matches!(got, Err(Error::BadInputNumberError { .. }))); // Bad value in 2nd column (with raw amount). let file = NamedTempFile::new().unwrap(); generate_csv_file( @@ -1667,7 +1669,7 @@ mod tests { wtr.serialize(pubkey2.to_string()).unwrap(); wtr.flush().unwrap(); - let amount = sol_to_lamports(1.5); + let amount = sol_str_to_lamports("1.5").unwrap(); let expected_allocations = vec![ TypedAllocation { @@ -1699,18 +1701,18 @@ mod tests { let mut allocations = vec![ TypedAllocation { recipient: alice, - amount: sol_to_lamports(1.0), + amount: sol_str_to_lamports("1.0").unwrap(), lockup_date: None, }, TypedAllocation { recipient: bob, - amount: sol_to_lamports(1.0), + amount: sol_str_to_lamports("1.0").unwrap(), lockup_date: None, }, ]; let transaction_infos = vec![TransactionInfo { recipient: bob, - amount: sol_to_lamports(1.0), + amount: sol_str_to_lamports("1.0").unwrap(), ..TransactionInfo::default() }]; apply_previous_transactions(&mut allocations, &transaction_infos); @@ -1729,12 +1731,12 @@ mod tests { let lockup1 = "9999-12-31T23:59:59Z".to_string(); let alice_alloc = TypedAllocation { recipient: alice_pubkey, - amount: sol_to_lamports(1.0), + amount: sol_str_to_lamports("1.0").unwrap(), lockup_date: None, }; let alice_alloc_lockup0 = TypedAllocation { recipient: alice_pubkey, - amount: sol_to_lamports(1.0), + amount: sol_str_to_lamports("1.0").unwrap(), lockup_date: lockup0.parse().ok(), }; let alice_info = TransactionInfo { @@ -1777,7 +1779,7 @@ mod tests { let lockup_date_str = "2021-01-07T00:00:00Z"; let allocation = TypedAllocation { recipient: Pubkey::default(), - amount: sol_to_lamports(1.002_282_880), + amount: sol_str_to_lamports("1.002282880").unwrap(), lockup_date: lockup_date_str.parse().ok(), }; let stake_account_address = pubkey::new_rand(); @@ -1793,7 +1795,7 @@ mod tests { }; let stake_args = StakeArgs { lockup_authority: Some(lockup_authority_address), - unlocked_sol: sol_to_lamports(1.0), + unlocked_sol: sol_str_to_lamports("1.0").unwrap(), sender_stake_args: Some(sender_stake_args), }; let args = DistributeTokensArgs { @@ -1872,13 +1874,13 @@ mod tests { let fees = client .get_fee_for_message(&one_signer_message(&client)) .unwrap(); - let fees_in_sol = lamports_to_sol(fees); + let fees_in_sol = fees as f64 / LAMPORTS_PER_SOL as f64; let allocation_amount = 1000.0; // Fully funded payer let (allocations, mut args) = initialize_check_payer_balances_inputs( - sol_to_lamports(allocation_amount), + sol_str_to_lamports(&allocation_amount.to_string()).unwrap(), &sender_keypair_file, &sender_keypair_file, None, @@ -1916,7 +1918,7 @@ mod tests { .unwrap(); let transaction = transfer( &client, - sol_to_lamports(allocation_amount), + sol_str_to_lamports(&allocation_amount.to_string()).unwrap(), &alice, &partially_funded_payer.pubkey(), ) @@ -1954,7 +1956,7 @@ mod tests { let fees = client .get_fee_for_message(&one_signer_message(&client)) .unwrap(); - let fees_in_sol = lamports_to_sol(fees); + let fees_in_sol = fees as f64 / LAMPORTS_PER_SOL as f64; let sender_keypair_file = tmp_file_path("keypair_file", &alice.pubkey()); write_keypair_file(&alice, &sender_keypair_file).unwrap(); @@ -1966,7 +1968,7 @@ mod tests { write_keypair_file(&funded_payer, &funded_payer_keypair_file).unwrap(); let transaction = transfer( &client, - sol_to_lamports(allocation_amount), + sol_str_to_lamports(&allocation_amount.to_string()).unwrap(), &alice, &funded_payer.pubkey(), ) @@ -1977,7 +1979,7 @@ mod tests { // Fully funded payers let (allocations, mut args) = initialize_check_payer_balances_inputs( - sol_to_lamports(allocation_amount), + sol_str_to_lamports(&allocation_amount.to_string()).unwrap(), &funded_payer_keypair_file, &sender_keypair_file, None, @@ -2063,10 +2065,7 @@ mod tests { } fn simple_test_validator(alice: Pubkey) -> TestValidator { - let test_validator = - TestValidator::with_custom_fees(alice, 10_000, None, SocketAddrSpace::Unspecified); - test_validator.set_startup_verification_complete_for_tests(); - test_validator + TestValidator::with_custom_fees(alice, 10_000, None, SocketAddrSpace::Unspecified) } #[test] @@ -2079,7 +2078,7 @@ mod tests { let fees = client .get_fee_for_message(&one_signer_message(&client)) .unwrap(); - let fees_in_sol = lamports_to_sol(fees); + let fees_in_sol = fees as f64 / LAMPORTS_PER_SOL as f64; let sender_keypair_file = tmp_file_path("keypair_file", &alice.pubkey()); write_keypair_file(&alice, &sender_keypair_file).unwrap(); @@ -2087,15 +2086,15 @@ mod tests { let allocation_amount = 1000.0; let unlocked_sol = 1.0; let stake_args = initialize_stake_account( - sol_to_lamports(allocation_amount), - sol_to_lamports(unlocked_sol), + sol_str_to_lamports(&allocation_amount.to_string()).unwrap(), + sol_str_to_lamports(&unlocked_sol.to_string()).unwrap(), &alice, &client, ); // Fully funded payer & stake account let (allocations, mut args) = initialize_check_payer_balances_inputs( - sol_to_lamports(allocation_amount), + sol_str_to_lamports(&allocation_amount.to_string()).unwrap(), &sender_keypair_file, &sender_keypair_file, Some(stake_args), @@ -2106,7 +2105,7 @@ mod tests { let expensive_allocation_amount = 5000.0; let expensive_allocations = vec![TypedAllocation { recipient: pubkey::new_rand(), - amount: sol_to_lamports(expensive_allocation_amount), + amount: sol_str_to_lamports(&expensive_allocation_amount.to_string()).unwrap(), lockup_date: None, }]; let err_result = check_payer_balances( @@ -2157,7 +2156,7 @@ mod tests { .unwrap(); let transaction = transfer( &client, - sol_to_lamports(unlocked_sol), + sol_str_to_lamports(&unlocked_sol.to_string()).unwrap(), &alice, &partially_funded_payer.pubkey(), ) @@ -2195,7 +2194,7 @@ mod tests { let fees = client .get_fee_for_message(&one_signer_message(&client)) .unwrap(); - let fees_in_sol = lamports_to_sol(fees); + let fees_in_sol = fees as f64 / LAMPORTS_PER_SOL as f64; let sender_keypair_file = tmp_file_path("keypair_file", &alice.pubkey()); write_keypair_file(&alice, &sender_keypair_file).unwrap(); @@ -2203,8 +2202,8 @@ mod tests { let allocation_amount = 1000.0; let unlocked_sol = 1.0; let stake_args = initialize_stake_account( - sol_to_lamports(allocation_amount), - sol_to_lamports(unlocked_sol), + sol_str_to_lamports(&allocation_amount.to_string()).unwrap(), + sol_str_to_lamports(&unlocked_sol.to_string()).unwrap(), &alice, &client, ); @@ -2214,7 +2213,7 @@ mod tests { write_keypair_file(&funded_payer, &funded_payer_keypair_file).unwrap(); let transaction = transfer( &client, - sol_to_lamports(unlocked_sol), + sol_str_to_lamports(&unlocked_sol.to_string()).unwrap(), &alice, &funded_payer.pubkey(), ) @@ -2225,7 +2224,7 @@ mod tests { // Fully funded payers let (allocations, mut args) = initialize_check_payer_balances_inputs( - sol_to_lamports(allocation_amount), + sol_str_to_lamports(&allocation_amount.to_string()).unwrap(), &funded_payer_keypair_file, &sender_keypair_file, Some(stake_args), @@ -2278,7 +2277,7 @@ mod tests { let sender = Keypair::new(); let recipient = Pubkey::new_unique(); - let amount = sol_to_lamports(1.0); + let amount = sol_str_to_lamports("1.0").unwrap(); let last_valid_block_height = 222; let transaction = transfer(&client, amount, &sender, &recipient).unwrap(); @@ -2313,7 +2312,7 @@ mod tests { }; let allocation = TypedAllocation { recipient, - amount: sol_to_lamports(1.0), + amount: sol_str_to_lamports("1.0").unwrap(), lockup_date: None, }; @@ -2400,7 +2399,7 @@ mod tests { let sender = Keypair::new(); let recipient = Pubkey::new_unique(); - let amount = sol_to_lamports(1.0); + let amount = sol_str_to_lamports("1.0").unwrap(); let last_valid_block_height = 222; let transaction = transfer(&client, amount, &sender, &recipient).unwrap(); @@ -2435,7 +2434,7 @@ mod tests { }; let allocation = TypedAllocation { recipient, - amount: sol_to_lamports(1.0), + amount: sol_str_to_lamports("1.0").unwrap(), lockup_date: None, }; let message = transaction.message.clone(); @@ -2514,7 +2513,7 @@ mod tests { let fee_payer = Keypair::new(); let transaction = transfer( &client, - sol_to_lamports(1.0), + sol_str_to_lamports("1.0").unwrap(), &sender_keypair, &fee_payer.pubkey(), ) @@ -2534,7 +2533,7 @@ mod tests { let recipient = Pubkey::new_unique(); let allocation = TypedAllocation { recipient, - amount: sol_to_lamports(1.0), + amount: sol_str_to_lamports("1.0").unwrap(), lockup_date: None, }; // This is just dummy data; Args will not affect messages @@ -2573,7 +2572,7 @@ mod tests { let sender = Keypair::new(); let recipient = Pubkey::new_unique(); - let amount = sol_to_lamports(1.0); + let amount = sol_str_to_lamports("1.0").unwrap(); let last_valid_block_height = 222; let transaction = transfer(&client, amount, &sender, &recipient).unwrap(); @@ -2666,7 +2665,7 @@ mod tests { let sender = Keypair::new(); let recipient = Pubkey::new_unique(); - let amount = sol_to_lamports(1.0); + let amount = sol_str_to_lamports("1.0").unwrap(); let last_valid_block_height = 222; let transaction = transfer(&client, amount, &sender, &recipient).unwrap(); diff --git a/tokens/src/db.rs b/tokens/src/db.rs index af3ef596161113..e5a1c5c83d19ed 100644 --- a/tokens/src/db.rs +++ b/tokens/src/db.rs @@ -145,7 +145,8 @@ pub fn update_finalized_transaction( if opt_transaction_status.is_none() { if finalized_block_height > last_valid_block_height { eprintln!( - "Signature not found {signature} and blockhash expired. Transaction either dropped or the validator purged the transaction status." + "Signature not found {signature} and blockhash expired. Transaction either \ + dropped or the validator purged the transaction status." ); eprintln!(); diff --git a/tokens/src/spl_token.rs b/tokens/src/spl_token.rs index fc2af66cf4fc40..39539a19fae8e5 100644 --- a/tokens/src/spl_token.rs +++ b/tokens/src/spl_token.rs @@ -5,9 +5,9 @@ use { }, console::style, solana_account_decoder::parse_token::{real_number_string, real_number_string_trimmed}, + solana_cli_output::display::build_balance_message, solana_instruction::Instruction, solana_message::Message, - solana_native_token::lamports_to_sol, solana_program_pack::Pack, solana_rpc_client::rpc_client::RpcClient, spl_associated_token_account_interface::{ @@ -94,7 +94,7 @@ pub(crate) fn check_spl_token_balances( if fee_payer_balance < fees + account_creation_amount { return Err(Error::InsufficientFunds( vec![FundingSource::FeePayer].into(), - lamports_to_sol(fees + account_creation_amount).to_string(), + build_balance_message(fees + account_creation_amount, false, false).to_string(), )); } let source_token_account = client diff --git a/tokens/src/token_display.rs b/tokens/src/token_display.rs index 590f6e1561fba5..148a9792bfbd7a 100644 --- a/tokens/src/token_display.rs +++ b/tokens/src/token_display.rs @@ -1,6 +1,6 @@ use { solana_account_decoder::parse_token::real_number_string_trimmed, - solana_native_token::lamports_to_sol, + solana_cli_output::display::build_balance_message, std::{ fmt::{Debug, Display, Formatter, Result}, ops::Add, @@ -25,7 +25,7 @@ impl Token { fn write_with_symbol(&self, f: &mut Formatter) -> Result { match &self.token_type { TokenType::Sol => { - let amount = lamports_to_sol(self.amount); + let amount = build_balance_message(self.amount, false, false); write!(f, "{SOL_SYMBOL}{amount}") } TokenType::SplToken => { diff --git a/tps-client/Cargo.toml b/tps-client/Cargo.toml index 201ede15561bc3..f3a98454d4f9a4 100644 --- a/tps-client/Cargo.toml +++ b/tps-client/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "solana-tps-client" -description = "Blockchain, Rebuilt for Scale" version = { workspace = true } authors = { workspace = true } +description = { workspace = true } repository = { workspace = true } homepage = { workspace = true } license = { workspace = true } @@ -15,6 +15,7 @@ rustdoc-args = ["--cfg=docsrs"] [features] bank-client = ["dep:solana-client-traits", "dep:solana-runtime"] +dev-context-only-utils = [] [dependencies] log = { workspace = true } @@ -28,6 +29,7 @@ solana-epoch-info = { workspace = true } solana-hash = { workspace = true } solana-keypair = { workspace = true } solana-message = { workspace = true } +solana-net-utils = { workspace = true } solana-pubkey = { workspace = true } solana-quic-client = { workspace = true } solana-rpc-client = { workspace = true } diff --git a/tps-client/src/lib.rs b/tps-client/src/lib.rs index d145f309f5799c..c1d0fd601fd9d1 100644 --- a/tps-client/src/lib.rs +++ b/tps-client/src/lib.rs @@ -63,7 +63,7 @@ pub trait TpsClient { return Ok(new_blockhash); } } - debug!("Got same blockhash ({:?}), will retry...", blockhash); + debug!("Got same blockhash ({blockhash:?}), will retry..."); // Retry ~twice during a slot sleep(Duration::from_millis(DEFAULT_MS_PER_SLOT / 2)); diff --git a/tps-client/src/utils.rs b/tps-client/src/utils.rs index bf1d1326d0d571..c659219fef77c4 100644 --- a/tps-client/src/utils.rs +++ b/tps-client/src/utils.rs @@ -8,7 +8,7 @@ use { solana_streamer::streamer::StakedNodes, std::{ collections::HashMap, - net::IpAddr, + net::{IpAddr, UdpSocket}, sync::{Arc, RwLock}, }, }; @@ -23,7 +23,7 @@ fn find_node_activated_stake( ) -> Result<(u64, u64), ()> { let vote_accounts = rpc_client.get_vote_accounts(); if let Err(error) = vote_accounts { - error!("Failed to get vote accounts, error: {}", error); + error!("Failed to get vote accounts, error: {error}"); return Err(()); } @@ -55,6 +55,43 @@ pub fn create_connection_cache( bind_address: IpAddr, client_node_id: Option<&Keypair>, rpc_client: Arc, +) -> ClientConnectionCache { + create_connection_cache_with_client_socket_option( + tpu_connection_pool_size, + use_quic, + bind_address, + client_node_id, + rpc_client, + None, + ) +} + +#[cfg(feature = "dev-context-only-utils")] +pub fn create_connection_cache_for_tests( + tpu_connection_pool_size: usize, + use_quic: bool, + bind_address: IpAddr, + client_node_id: Option<&Keypair>, + rpc_client: Arc, +) -> ClientConnectionCache { + // create the client socket for tests to avoid port collision + create_connection_cache_with_client_socket_option( + tpu_connection_pool_size, + use_quic, + bind_address, + client_node_id, + rpc_client, + Some(solana_net_utils::sockets::bind_to_localhost_unique().unwrap()), + ) +} + +fn create_connection_cache_with_client_socket_option( + tpu_connection_pool_size: usize, + use_quic: bool, + bind_address: IpAddr, + client_node_id: Option<&Keypair>, + rpc_client: Arc, + client_socket: Option, ) -> ClientConnectionCache { if !use_quic { return ClientConnectionCache::with_udp( @@ -63,9 +100,12 @@ pub fn create_connection_cache( ); } if client_node_id.is_none() { - return ClientConnectionCache::new_quic( + return ClientConnectionCache::new_with_client_options( "solana-tps-connection_cache_quic", tpu_connection_pool_size, + client_socket, + None, // no certificate + None, // no stake information ); } @@ -87,7 +127,7 @@ pub fn create_connection_cache( ClientConnectionCache::new_with_client_options( "solana-tps-connection_cache_quic", tpu_connection_pool_size, - None, + client_socket, Some((client_node_id, bind_address)), Some((&staked_nodes, &client_node_id.pubkey())), ) diff --git a/tpu-client-next/Cargo.toml b/tpu-client-next/Cargo.toml index d2d2bdc459d635..fa3c61316fcd3e 100644 --- a/tpu-client-next/Cargo.toml +++ b/tpu-client-next/Cargo.toml @@ -12,12 +12,17 @@ edition = { workspace = true } targets = ["x86_64-unknown-linux-gnu"] [features] +agave-unstable-api = ["dep:qualifier_attr"] +default = ["log"] +log = ["dep:log"] metrics = ["dep:solana-metrics"] +tracing = ["dep:tracing"] [dependencies] async-trait = { workspace = true } -log = { workspace = true } +log = { workspace = true, optional = true } lru = { workspace = true } +qualifier_attr = { workspace = true, optional = true } quinn = { workspace = true } rustls = { workspace = true } solana-clock = { workspace = true } @@ -34,6 +39,7 @@ solana-tpu-client = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true } tokio-util = { workspace = true } +tracing = { workspace = true, optional = true } [dev-dependencies] crossbeam-channel = { workspace = true } diff --git a/tpu-client-next/src/connection_worker.rs b/tpu-client-next/src/connection_worker.rs index 2d4ca00d4ce953..8af6ae495a8f57 100644 --- a/tpu-client-next/src/connection_worker.rs +++ b/tpu-client-next/src/connection_worker.rs @@ -4,11 +4,13 @@ use { super::SendTransactionStats, crate::{ - quic_networking::send_data_over_stream, send_transaction_stats::record_error, - transaction_batch::TransactionBatch, QuicError, + logging::{debug, error, trace, warn}, + quic_networking::send_data_over_stream, + send_transaction_stats::record_error, + transaction_batch::TransactionBatch, + QuicError, }, - log::*, - quinn::{ConnectError, Connection, Endpoint}, + quinn::{ConnectError, Connection, ConnectionError, Endpoint}, solana_clock::{DEFAULT_MS_PER_SLOT, MAX_PROCESSING_AGE, NUM_CONSECUTIVE_LEADER_SLOTS}, solana_measure::measure::Measure, solana_time_utils::timestamp, @@ -68,6 +70,9 @@ impl Drop for ConnectionState { /// [`ConnectionWorker`] holds connection to the validator with address `peer`. /// +/// The worker proactively monitors connection health while processing +/// transactions, detecting connection closures immediately rather than waiting +/// for send failures. /// If connection has been closed, [`ConnectionWorker`] tries to reconnect /// `max_reconnect_attempts` times. If connection is in `Active` state, it sends /// transactions received from `transactions_receiver`. Additionally, it @@ -124,7 +129,8 @@ impl ConnectionWorker { /// /// This method manages the connection to the peer and handles state /// transitions. It runs indefinitely until the connection is closed or an - /// unrecoverable error occurs. + /// unrecoverable error occurs. The worker monitors both incoming transactions + /// and connection health simultaneously when in the Active state. pub async fn run(&mut self) { let cancel = self.cancel.clone(); @@ -138,17 +144,37 @@ impl ConnectionWorker { self.create_connection(0).await; } ConnectionState::Active(connection) => { - let Some(transactions) = self.transactions_receiver.recv().await else { - debug!("Transactions sender has been dropped."); - self.connection = ConnectionState::Closing; - continue; - }; - self.send_transactions(connection.clone(), transactions) - .await; + tokio::select! { + // Process incoming transactions + transactions = self.transactions_receiver.recv() => { + match transactions { + Some(batch) => { + self.send_transactions(connection.clone(), batch).await; + } + None => { + debug!( + "Transactions sender has been dropped for peer: {}", + self.peer + ); + self.connection = ConnectionState::Closing; + } + } + } + + // Monitor connection health proactively + close_reason = connection.closed() => { + self.handle_connection_closed(close_reason); + continue; + } + } } ConnectionState::Retry(num_reconnects) => { if *num_reconnects > self.max_reconnect_attempts { - error!("Failed to establish connection: reach max reconnect attempts."); + error!( + "Failed to establish connection to {}: reached max reconnect \ + attempts", + self.peer + ); self.connection = ConnectionState::Closing; continue; } @@ -163,6 +189,70 @@ impl ConnectionWorker { () = main_loop => (), () = cancel.cancelled() => (), } + // Cancel it additionally here so that in WorkerInfo we can check if + // this worker is active. + cancel.cancel(); + } + + /// Handles connection closure events detected by the connection monitor. + /// + /// This method logs the close reason with appropriate severity based on + /// the type of closure, records statistics, and determines whether to + /// attempt reconnection based on the error type. + fn handle_connection_closed(&mut self, close_reason: ConnectionError) { + match &close_reason { + ConnectionError::ConnectionClosed(close) => { + debug!( + "Connection to {} closed by peer: code={} reason={:?}", + self.peer, + close.error_code, + String::from_utf8_lossy(&close.reason) + ); + } + ConnectionError::ApplicationClosed(close) => { + debug!( + "Connection to {} closed by application: code={} reason={:?}", + self.peer, + close.error_code, + String::from_utf8_lossy(&close.reason) + ); + } + ConnectionError::LocallyClosed => { + debug!("Connection to {} closed locally", self.peer); + } + ConnectionError::TimedOut => { + warn!("Connection to {} timed out", self.peer); + } + ConnectionError::Reset => { + warn!("Connection to {} reset", self.peer); + } + ConnectionError::TransportError(e) => { + warn!( + "Connection to {} encountered transport error: {}", + self.peer, e + ); + } + ConnectionError::VersionMismatch => { + error!("Connection to {} failed: version mismatch", self.peer); + } + ConnectionError::CidsExhausted => { + warn!( + "Connection to {} closed: connection IDs exhausted", + self.peer + ); + } + } + + record_error(close_reason.clone().into(), &self.send_txs_stats); + + // Determine next state based on close reason + // Fatal errors transition to Closing, recoverable errors transition to Retry + self.connection = match close_reason { + ConnectionError::VersionMismatch | ConnectionError::LocallyClosed => { + ConnectionState::Closing + } + _ => ConnectionState::Retry(0), + }; } /// Sends a batch of transactions using the provided `connection`. @@ -173,23 +263,38 @@ impl ConnectionWorker { /// outdated and flag `skip_check_transaction_age` is unset, it will be /// dropped without being sent. /// - /// In case of error, it doesn't retry to send the same transactions again. + /// The method checks connection health before sending each transaction to + /// avoid operations on a closed connection. In case of error, it doesn't + /// retry to send the same transactions again but transitions to retry state. async fn send_transactions(&mut self, connection: Connection, transactions: TransactionBatch) { let now = timestamp(); if !self.skip_check_transaction_age && now.saturating_sub(transactions.timestamp()) > MAX_PROCESSING_AGE_MS { - debug!("Drop outdated transaction batch."); + debug!("Drop outdated transaction batch for peer: {}", self.peer); return; } + let mut measure_send = Measure::start("send transaction batch"); for data in transactions.into_iter() { + // Check connection health before each send + if connection.close_reason().is_some() { + debug!("Connection closed during transaction batch sending"); + self.connection = ConnectionState::Retry(0); + break; + } + let result = send_data_over_stream(&connection, &data).await; if let Err(error) = result { - trace!("Failed to send transaction over stream with error: {error}."); + trace!( + "Failed to send transaction to {} over stream with error: {error}", + self.peer + ); record_error(error, &self.send_txs_stats); self.connection = ConnectionState::Retry(0); + // Exit early since connection is likely broken + break; } else { self.send_txs_stats .successfully_sent @@ -198,7 +303,8 @@ impl ConnectionWorker { } measure_send.stop(); debug!( - "Time to send transactions batch: {} us", + "Time to send transactions batch to {}: {} us", + self.peer, measure_send.as_us() ); } @@ -245,15 +351,25 @@ impl ConnectionWorker { record_error(connecting_error.clone().into(), &self.send_txs_stats); match connecting_error { ConnectError::EndpointStopping => { - debug!("Endpoint stopping, exit connection worker."); + debug!( + "Endpoint stopping, exit connection worker for peer: {}", + self.peer + ); self.connection = ConnectionState::Closing; } ConnectError::InvalidRemoteAddress(_) => { - warn!("Invalid remote address."); + warn!( + "Invalid remote address for peer: {}, attempt: {}", + self.peer, retries_attempt + ); self.connection = ConnectionState::Closing; } e => { - error!("Unexpected error has happen while trying to create connection {e}"); + error!( + "Unexpected error has happened while trying to create connection to \ + {}: {e}", + self.peer + ); self.connection = ConnectionState::Closing; } } @@ -263,7 +379,10 @@ impl ConnectionWorker { /// Attempts to reconnect to the peer after a connection failure. async fn reconnect(&mut self, num_reconnects: usize) { - debug!("Trying to reconnect. Reopen connection, 0rtt is not implemented yet."); + debug!( + "Trying to reconnect to {}. Reopen connection, 0rtt is not implemented yet.", + self.peer + ); // We can reconnect using 0rtt, but not a priority for now. Check if we // need to call config.enable_0rtt() on the client side and where // session tickets are stored. diff --git a/tpu-client-next/src/connection_workers_scheduler.rs b/tpu-client-next/src/connection_workers_scheduler.rs index 25fade9fb3ed2b..7097e40bcb2b12 100644 --- a/tpu-client-next/src/connection_workers_scheduler.rs +++ b/tpu-client-next/src/connection_workers_scheduler.rs @@ -1,19 +1,21 @@ //! This module defines [`ConnectionWorkersScheduler`] which sends transactions //! to the upcoming leaders. +#[cfg(feature = "agave-unstable-api")] +use qualifier_attr::qualifiers; use { super::leader_updater::LeaderUpdater, crate::{ connection_worker::DEFAULT_MAX_CONNECTION_HANDSHAKE_TIMEOUT, + logging::{debug, warn}, quic_networking::{ create_client_config, create_client_endpoint, QuicClientCertificate, QuicError, }, transaction_batch::TransactionBatch, - workers_cache::{shutdown_worker, spawn_worker, WorkersCache, WorkersCacheError}, + workers_cache::{shutdown_worker, WorkersCache, WorkersCacheError}, SendTransactionStats, }, async_trait::async_trait, - log::*, quinn::{ClientConfig, Endpoint}, solana_keypair::Keypair, std::{ @@ -62,6 +64,7 @@ pub enum ConnectionWorkersSchedulerError { /// The idea of having a separate `connect` parameter is to create a set of /// nodes to connect to in advance in order to hide the latency of opening new /// connection. Hence, `connect` must be greater or equal to `send` +#[derive(Debug, Clone)] pub struct Fanout { /// The number of leaders to target for sending transactions. pub send: usize, @@ -269,19 +272,16 @@ impl ConnectionWorkersScheduler { // add future leaders to the cache to hide the latency of opening // the connection. for peer in connect_leaders { - if !workers.contains(&peer) { - let worker = spawn_worker( - &endpoint, - &peer, - worker_channel_size, - skip_check_transaction_age, - max_reconnect_attempts, - DEFAULT_MAX_CONNECTION_HANDSHAKE_TIMEOUT, - stats.clone(), - ); - if let Some(pop_worker) = workers.push(peer, worker) { - shutdown_worker(pop_worker) - } + if let Some(evicted_worker) = workers.ensure_worker( + peer, + &endpoint, + worker_channel_size, + skip_check_transaction_age, + max_reconnect_attempts, + DEFAULT_MAX_CONNECTION_HANDSHAKE_TIMEOUT, + stats.clone(), + ) { + shutdown_worker(evicted_worker); } } @@ -305,6 +305,7 @@ impl ConnectionWorkersScheduler { } /// Sets up the QUIC endpoint for the scheduler to handle connections. +#[cfg_attr(feature = "agave-unstable-api", qualifiers(pub))] fn setup_endpoint( bind: BindTarget, stake_identity: Option, @@ -335,15 +336,13 @@ impl WorkersBroadcaster for NonblockingBroadcaster { transaction_batch: TransactionBatch, ) -> Result<(), ConnectionWorkersSchedulerError> { for new_leader in leaders { - if !workers.contains(new_leader) { - warn!("No existing worker for {new_leader:?}, skip sending to this leader."); - continue; - } - let send_res = workers.try_send_transactions_to_address(new_leader, transaction_batch.clone()); match send_res { Ok(()) => (), + Err(WorkersCacheError::WorkerNotFound) => { + warn!("No existing worker for {new_leader:?}, skip sending to this leader."); + } Err(WorkersCacheError::ShutdownError) => { debug!("Connection to {new_leader} was closed, worker cache shutdown"); } @@ -367,7 +366,8 @@ impl WorkersBroadcaster for NonblockingBroadcaster { /// /// This function selects up to `send_fanout` addresses from the `leaders` list, ensuring that /// only unique addresses are included while maintaining their original order. -fn extract_send_leaders(leaders: &[SocketAddr], send_fanout: usize) -> Vec { +#[cfg_attr(feature = "agave-unstable-api", qualifiers(pub))] +pub fn extract_send_leaders(leaders: &[SocketAddr], send_fanout: usize) -> Vec { let send_count = send_fanout.min(leaders.len()); remove_duplicates(&leaders[..send_count]) } diff --git a/tpu-client-next/src/leader_updater.rs b/tpu-client-next/src/leader_updater.rs index 690c21af461492..3dd908c1aa858e 100644 --- a/tpu-client-next/src/leader_updater.rs +++ b/tpu-client-next/src/leader_updater.rs @@ -9,8 +9,8 @@ //! Yet, it also allows to implement custom leader estimation. use { + crate::logging::error, async_trait::async_trait, - log::*, solana_clock::NUM_CONSECUTIVE_LEADER_SLOTS, solana_connection_cache::connection_cache::Protocol, solana_rpc_client::nonblocking::rpc_client::RpcClient, diff --git a/tpu-client-next/src/lib.rs b/tpu-client-next/src/lib.rs index c416f35c5229f4..4bb09c91c67932 100644 --- a/tpu-client-next/src/lib.rs +++ b/tpu-client-next/src/lib.rs @@ -1,3 +1,13 @@ +//! # Feature flags +//! +//! Tpu-client-next supports three features: +//! +//! - **`metrics`**: Enables implementation of the method `report_to_influxdb` for +//! [`SendTransactionStats`] structure. +//! - **`log`**: Enables logging using `log` crate. It is enabled by default. +//! - **`tracing`**: Enables logging using `tracing` crate instead of `log`. This feature is +//! mutually exclusive with `log`. + pub(crate) mod connection_worker; pub mod connection_workers_scheduler; pub mod send_transaction_stats; @@ -13,3 +23,6 @@ pub mod transaction_batch; #[cfg(feature = "metrics")] pub mod metrics; + +// Logging abstraction module +pub(crate) mod logging; diff --git a/tpu-client-next/src/logging.rs b/tpu-client-next/src/logging.rs new file mode 100644 index 00000000000000..791c002343b776 --- /dev/null +++ b/tpu-client-next/src/logging.rs @@ -0,0 +1,31 @@ +//! Logging abstraction module that supports both `log` and `tracing` libraries. +//! +//! This module provides a unified logging interface that can be configured +//! to use either the `log` crate (default) or the `tracing` crate. +//! The features are mutually exclusive - only one can be enabled at a time. + +#[cfg(feature = "log")] +pub use log::{debug, error, trace, warn}; +#[cfg(feature = "tracing")] +pub use tracing::{debug, error, trace, warn}; + +#[cfg(not(any(feature = "log", feature = "tracing")))] +compile_error!("Either 'log' or 'tracing' feature must be enabled"); + +#[cfg(all(feature = "log", feature = "tracing"))] +compile_error!("'log' and 'tracing' features are mutually exclusive"); + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_logging_macros_available() { + // This test verifies that the logging macros are available + // and can be called without errors + debug!("Test debug message"); + error!("Test error message"); + trace!("Test trace message"); + warn!("Test warn message"); + } +} diff --git a/tpu-client-next/src/workers_cache.rs b/tpu-client-next/src/workers_cache.rs index dd4c9f03b34396..838ceecd07623c 100644 --- a/tpu-client-next/src/workers_cache.rs +++ b/tpu-client-next/src/workers_cache.rs @@ -2,12 +2,15 @@ //! structures provide mechanisms for caching workers, sending transaction //! batches, and gathering send transaction statistics. +#[cfg(feature = "agave-unstable-api")] +use qualifier_attr::qualifiers; use { crate::{ - connection_worker::ConnectionWorker, transaction_batch::TransactionBatch, + connection_worker::ConnectionWorker, + logging::{debug, trace}, + transaction_batch::TransactionBatch, SendTransactionStats, }, - log::*, lru::LruCache, quinn::Endpoint, std::{net::SocketAddr, sync::Arc, time::Duration}, @@ -69,9 +72,16 @@ impl WorkerInfo { .map_err(|_| WorkersCacheError::TaskJoinFailure)?; Ok(()) } + + /// Returns `true` if the worker is still active and able to send + /// transactions. + fn is_active(&self) -> bool { + !(self.cancel.is_cancelled() || self.sender.is_closed()) + } } /// Spawns a worker to handle communication with a given peer. +#[cfg_attr(feature = "agave-unstable-api", qualifiers(pub))] pub(crate) fn spawn_worker( endpoint: &Endpoint, peer: &SocketAddr, @@ -125,9 +135,13 @@ pub enum WorkersCacheError { #[error("The WorkersCache is being shutdown.")] ShutdownError, + + #[error("No worker exists for the specified peer.")] + WorkerNotFound, } impl WorkersCache { + #[cfg_attr(feature = "agave-unstable-api", qualifiers(pub))] pub(crate) fn new(capacity: usize, cancel: CancellationToken) -> Self { Self { workers: LruCache::new(capacity), @@ -141,6 +155,7 @@ impl WorkersCache { self.workers.contains(peer) } + #[cfg_attr(feature = "agave-unstable-api", qualifiers(pub))] pub(crate) fn push( &mut self, leader: SocketAddr, @@ -165,15 +180,56 @@ impl WorkersCache { None } + /// Ensures a worker exists for the given peer, creating one if necessary. + /// + /// Returns any evicted worker that needs shutdown. + pub fn ensure_worker( + &mut self, + peer: SocketAddr, + endpoint: &Endpoint, + worker_channel_size: usize, + skip_check_transaction_age: bool, + max_reconnect_attempts: usize, + handshake_timeout: Duration, + stats: Arc, + ) -> Option { + if let Some(worker) = self.workers.peek(&peer) { + // if worker is active, we will reuse it. Otherwise, we will spawn + // the new one and the existing will be popped out. + if worker.is_active() { + return None; + } + } + trace!("No active worker for peer {peer}, respawning."); + + let worker = spawn_worker( + endpoint, + &peer, + worker_channel_size, + skip_check_transaction_age, + max_reconnect_attempts, + handshake_timeout, + stats, + ); + + self.push(peer, worker) + } + /// Attempts to send immediately a batch of transactions to the worker for a /// given peer. /// /// This method returns immediately if the channel of worker corresponding /// to this peer is full returning error [`WorkersCacheError::FullChannel`]. - /// If it happens that the peer's worker is stopped, it returns - /// [`WorkersCacheError::ShutdownError`]. In case if the worker is not - /// stopped but it's channel is unexpectedly dropped, it returns - /// [`WorkersCacheError::ReceiverDropped`]. + /// If no worker exists for the peer, it returns + /// [`WorkersCacheError::WorkerNotFound`]. If it happens that the peer's + /// worker is stopped, it returns [`WorkersCacheError::ShutdownError`]. + /// In case if the worker is not stopped but it's channel is unexpectedly + /// dropped, it returns [`WorkersCacheError::ReceiverDropped`]. + /// + /// Note: The worker existence check is necessary because workers can fail + /// asynchronously between creation and sending. Worker tasks may exit + /// due to connection failures, network issues, or cache evictions, + /// making a previously created worker unavailable. pub fn try_send_transactions_to_address( &mut self, peer: &SocketAddr, @@ -186,10 +242,8 @@ impl WorkersCache { return Err(WorkersCacheError::ShutdownError); } - let current_worker = workers.get(peer).expect( - "Failed to fetch worker for peer {peer}. Peer existence must be checked before this \ - call using `contains` method.", - ); + let current_worker = workers.get(peer).ok_or(WorkersCacheError::WorkerNotFound)?; + let send_res = current_worker.try_send_transactions(txs_batch); if let Err(WorkersCacheError::ReceiverDropped) = send_res { @@ -211,7 +265,8 @@ impl WorkersCache { /// Sends a batch of transactions to the worker for a given peer. /// /// If the worker for the peer is disconnected or fails, it - /// is removed from the cache. + /// is removed from the cache. If no worker exists for the peer, + /// it returns [`WorkersCacheError::WorkerNotFound`]. #[allow( dead_code, reason = "This method will be used in the upcoming changes to implement optional \ @@ -227,10 +282,8 @@ impl WorkersCache { } = self; let body = async move { - let current_worker = workers.get(peer).expect( - "Failed to fetch worker for peer {peer}. Peer existence must be checked before \ - this call using `contains` method.", - ); + let current_worker = workers.get(peer).ok_or(WorkersCacheError::WorkerNotFound)?; + let send_res = current_worker.send_transactions(txs_batch).await; if let Err(WorkersCacheError::ReceiverDropped) = send_res { // Remove the worker from the cache, if the peer has disconnected. @@ -267,6 +320,7 @@ impl WorkersCache { /// /// The method awaits the completion of all shutdown tasks, ensuring that /// each worker is properly terminated. + #[cfg_attr(feature = "agave-unstable-api", qualifiers(pub))] pub(crate) async fn shutdown(&mut self) { // Interrupt any outstanding `send_transactions()` calls. self.cancel.cancel(); @@ -328,10 +382,10 @@ mod tests { SendTransactionStats, }, quinn::Endpoint, - solana_net_utils::{bind_in_range, sockets::localhost_port_range_for_tests}, + solana_net_utils::sockets::{bind_to_localhost_unique, unique_port_range_for_tests}, solana_tls_utils::QuicClientCertificate, std::{ - net::{IpAddr, Ipv4Addr, SocketAddr}, + net::{Ipv4Addr, SocketAddr}, sync::Arc, time::Duration, }, @@ -343,10 +397,7 @@ mod tests { const TEST_MAX_TIME: Duration = Duration::from_secs(5); fn create_test_endpoint() -> Endpoint { - let port_range = localhost_port_range_for_tests(); - let socket = bind_in_range(IpAddr::V4(Ipv4Addr::LOCALHOST), port_range) - .unwrap() - .1; + let socket = bind_to_localhost_unique().unwrap(); let client_config = create_client_config(&QuicClientCertificate::new(None)); create_client_endpoint(BindTarget::Socket(socket), client_config).unwrap() } @@ -355,8 +406,8 @@ mod tests { async fn test_worker_stopped_after_failed_connect() { let endpoint = create_test_endpoint(); - let port_range = localhost_port_range_for_tests(); - let peer: SocketAddr = SocketAddr::new(Ipv4Addr::LOCALHOST.into(), port_range.0); + let port_range = unique_port_range_for_tests(2); + let peer: SocketAddr = SocketAddr::new(Ipv4Addr::LOCALHOST.into(), port_range.start); let worker_channel_size = 1; let skip_check_transaction_age = true; @@ -389,8 +440,8 @@ mod tests { async fn test_worker_shutdown() { let endpoint = create_test_endpoint(); - let port_range = localhost_port_range_for_tests(); - let peer: SocketAddr = SocketAddr::new(Ipv4Addr::LOCALHOST.into(), port_range.0); + let port_range = unique_port_range_for_tests(2); + let peer: SocketAddr = SocketAddr::new(Ipv4Addr::LOCALHOST.into(), port_range.start); let worker_channel_size = 1; let skip_check_transaction_age = true; @@ -422,8 +473,8 @@ mod tests { let cancel = CancellationToken::new(); let mut cache = WorkersCache::new(10, cancel.clone()); - let port_range = localhost_port_range_for_tests(); - let peer: SocketAddr = SocketAddr::new(Ipv4Addr::LOCALHOST.into(), port_range.0); + let port_range = unique_port_range_for_tests(2); + let peer: SocketAddr = SocketAddr::new(Ipv4Addr::LOCALHOST.into(), port_range.start); let worker_channel_size = 1; let skip_check_transaction_age = true; let max_reconnect_attempts = 0; @@ -449,6 +500,8 @@ mod tests { sleep(Duration::from_millis(500)).await; } + assert!(!worker_info.is_active(), "Worker should be inactive"); + // try to send to this worker — should fail and remove the worker let result = cache .try_send_transactions_to_address(&peer, TransactionBatch::new(vec![vec![0u8; 1]])); diff --git a/tpu-client-next/tests/connection_workers_scheduler_test.rs b/tpu-client-next/tests/connection_workers_scheduler_test.rs index 2cc808ab88ef85..10465f9ecd5b19 100644 --- a/tpu-client-next/tests/connection_workers_scheduler_test.rs +++ b/tpu-client-next/tests/connection_workers_scheduler_test.rs @@ -4,6 +4,7 @@ use { solana_cli_config::ConfigInput, solana_commitment_config::CommitmentConfig, solana_keypair::Keypair, + solana_net_utils::sockets::unique_port_range_for_tests, solana_pubkey::Pubkey, solana_rpc_client::nonblocking::rpc_client::RpcClient, solana_signer::Signer, @@ -28,7 +29,7 @@ use { collections::HashMap, net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, num::Saturating, - sync::{atomic::Ordering, Arc}, + sync::Arc, time::Duration, }, tokio::{ @@ -43,7 +44,10 @@ use { }; fn test_config(stake_identity: Option) -> ConnectionWorkersSchedulerConfig { - let address = SocketAddr::new(Ipv4Addr::new(127, 0, 0, 1).into(), 0); + let address = SocketAddr::new( + IpAddr::V4(Ipv4Addr::LOCALHOST), + unique_port_range_for_tests(1).start, + ); ConnectionWorkersSchedulerConfig { bind: BindTarget::Address(address), stake_identity: stake_identity.map(|identity| StakeIdentity::new(&identity)), @@ -192,10 +196,10 @@ fn spawn_tx_sender( async fn test_basic_transactions_sending() { let SpawnTestServerResult { join_handle: server_handle, - exit, receiver, server_address, stats: _stats, + cancel, } = setup_quic_server(None, QuicServerParams::default_for_tests()); // Setup sending txs @@ -251,7 +255,7 @@ async fn test_basic_transactions_sending() { assert_eq!(stats.successfully_sent, expected_num_txs as u64,); // Stop server - exit.store(true, Ordering::Relaxed); + cancel.cancel(); server_handle.await.unwrap(); } @@ -282,10 +286,10 @@ async fn count_received_packets_for( async fn test_connection_denied_until_allowed() { let SpawnTestServerResult { join_handle: server_handle, - exit, receiver, server_address, stats: _stats, + cancel, } = setup_quic_server(None, QuicServerParams::default_for_tests()); // To prevent server from accepting a new connection, we use the following observation. @@ -319,33 +323,34 @@ async fn test_connection_denied_until_allowed() { // Wait for the exchange to finish. tx_sender_shutdown.await; let stats = join_scheduler(scheduler_handle).await; - // in case of pruning, server closes the connection with code 1 and error - // message b"dropped". This might lead to connection error - // (ApplicationClosed::ApplicationClose) or to stream error - // (ConnectionLost::ApplicationClosed::ApplicationClose). - assert_eq!( - stats.write_error_connection_lost + stats.connection_error_application_closed, - 1 + // With proactive detection, we detect rejection immediately and retry within test duration. + // Expect at least 2 errors: initial rejection + retry attempts. + assert!( + stats.write_error_connection_lost + stats.connection_error_application_closed >= 2, + "Expected at least 2 connection errors, got write_error_connection_lost: {}, \ + connection_error_application_closed: {}", + stats.write_error_connection_lost, + stats.connection_error_application_closed ); drop(throttling_connection); // Exit server - exit.store(true, Ordering::Relaxed); + cancel.cancel(); server_handle.await.unwrap(); } // Check that if the client connection has been pruned, client manages to -// reestablish it. Pruning will lead to 1 packet loss, because when we send the -// next packet we will reestablish connection. +// reestablish it. With more packets, we can observe the impact of pruning +// even with proactive detection. #[tokio::test] async fn test_connection_pruned_and_reopened() { let SpawnTestServerResult { join_handle: server_handle, - exit, receiver, server_address, stats: _stats, + cancel, } = setup_quic_server( None, QuicServerParams { @@ -357,7 +362,7 @@ async fn test_connection_pruned_and_reopened() { // Setup sending txs let tx_size = 1; - let expected_num_txs: usize = 16; + let expected_num_txs: usize = 48; let SpawnTxGenerator { tx_receiver, tx_sender_shutdown, @@ -377,17 +382,14 @@ async fn test_connection_pruned_and_reopened() { // Wait for the exchange to finish. tx_sender_shutdown.await; let stats = join_scheduler(scheduler_handle).await; - // in case of pruning, server closes the connection with code 1 and error - // message b"dropped". This might lead to connection error - // (ApplicationClosed::ApplicationClose) or to stream error - // (ConnectionLost::ApplicationClosed::ApplicationClose). - assert_eq!( - stats.connection_error_application_closed + stats.write_error_connection_lost, - 1, + // Proactive detection catches pruning immediately, expect multiple retries. + assert!( + stats.connection_error_application_closed + stats.write_error_connection_lost >= 1, + "Expected at least 1 connection error from pruning and retries. Stats: {stats:?}" ); // Exit server - exit.store(true, Ordering::Relaxed); + cancel.cancel(); server_handle.await.unwrap(); } @@ -401,10 +403,10 @@ async fn test_staked_connection() { let SpawnTestServerResult { join_handle: server_handle, - exit, receiver, server_address, stats: _stats, + cancel, } = setup_quic_server( Some(staked_nodes), QuicServerParams { @@ -445,7 +447,7 @@ async fn test_staked_connection() { ); // Exit server - exit.store(true, Ordering::Relaxed); + cancel.cancel(); server_handle.await.unwrap(); } @@ -456,10 +458,10 @@ async fn test_staked_connection() { async fn test_connection_throttling() { let SpawnTestServerResult { join_handle: server_handle, - exit, receiver, server_address, stats: _stats, + cancel, } = setup_quic_server(None, QuicServerParams::default_for_tests()); // Setup sending txs @@ -492,7 +494,7 @@ async fn test_connection_throttling() { ); // Exit server - exit.store(true, Ordering::Relaxed); + cancel.cancel(); server_handle.await.unwrap(); } @@ -525,11 +527,15 @@ async fn test_no_host() { // Wait for the generator to finish. tx_sender_shutdown.await; - // While attempting to establish a connection with a nonexistent host, we fill the worker's - // channel. + // For each transaction, we will check if worker exists and active. In this + // case, worker will never be active because when failed creating + // connection, we stop it. So scheduler will `max_send_attempts` try to + // create worker and fail each time. let stats = join_scheduler(scheduler_handle).await; - // `5` because `config.max_reconnect_attempts` is 4 - assert_eq!(stats.connect_error_invalid_remote_address, 5); + assert_eq!( + stats.connect_error_invalid_remote_address, + max_send_attempts as u64 + ); } // Check that when the client is rate-limited by server, we update counters @@ -543,10 +549,10 @@ async fn test_no_host() { async fn test_rate_limiting() { let SpawnTestServerResult { join_handle: server_handle, - exit, receiver, server_address, stats: _stats, + cancel, } = setup_quic_server( None, QuicServerParams { @@ -594,7 +600,7 @@ async fn test_rate_limiting() { ); // Stop the server. - exit.store(true, Ordering::Relaxed); + cancel.cancel(); server_handle.await.unwrap(); } @@ -607,10 +613,10 @@ async fn test_rate_limiting() { async fn test_rate_limiting_establish_connection() { let SpawnTestServerResult { join_handle: server_handle, - exit, receiver, server_address, stats: _stats, + cancel, } = setup_quic_server( None, QuicServerParams { @@ -669,7 +675,7 @@ async fn test_rate_limiting_establish_connection() { assert_eq!(stats, SendTransactionStatsNonAtomic::default()); // Stop the server. - exit.store(true, Ordering::Relaxed); + cancel.cancel(); server_handle.await.unwrap(); } @@ -688,10 +694,10 @@ async fn test_update_identity() { let SpawnTestServerResult { join_handle: server_handle, - exit, receiver, server_address, stats: _stats, + cancel, } = setup_quic_server( Some(staked_nodes), QuicServerParams { @@ -740,6 +746,76 @@ async fn test_update_identity() { assert!(stats.successfully_sent > 0); // Exit server - exit.store(true, Ordering::Relaxed); + cancel.cancel(); + server_handle.await.unwrap(); +} + +// Test that connection close events are detected immediately via +// connection.closed() monitoring, not only when send operations fail. +#[tokio::test] +async fn test_proactive_connection_close_detection() { + let SpawnTestServerResult { + join_handle: server_handle, + receiver, + server_address, + stats: _stats, + cancel, + } = setup_quic_server( + None, + QuicServerParams { + max_connections_per_peer: 1, + max_unstaked_connections: 1, + ..QuicServerParams::default_for_tests() + }, + ); + + // Setup controlled transaction sending + let tx_size = 1; + let (tx_sender, tx_receiver) = channel(10); + + let (scheduler_handle, _update_identity_sender, scheduler_cancel) = + setup_connection_worker_scheduler(server_address, tx_receiver, None).await; + + // Send first transaction to establish connection + tx_sender + .send(TransactionBatch::new(vec![vec![1u8; tx_size]])) + .await + .expect("Send first batch"); + + // Verify first packet received + let mut first_packet_received = false; + let start = Instant::now(); + while !first_packet_received && start.elapsed() < Duration::from_secs(1) { + if let Ok(packets) = receiver.try_recv() { + if !packets.is_empty() { + first_packet_received = true; + } + } else { + sleep(Duration::from_millis(10)).await; + } + } + assert!(first_packet_received, "First packet should be received"); + + // Exit server + cancel.cancel(); server_handle.await.unwrap(); + + tx_sender + .send(TransactionBatch::new(vec![vec![2u8; tx_size]])) + .await + .expect("Send second batch"); + tx_sender + .send(TransactionBatch::new(vec![vec![3u8; tx_size]])) + .await + .expect("Send third batch"); + + // Clean up + scheduler_cancel.cancel(); + let stats = join_scheduler(scheduler_handle).await; + + // Verify proactive close detection + assert!( + stats.connection_error_application_closed > 0 || stats.write_error_connection_lost > 0, + "Should detect connection close proactively. Stats: {stats:?}" + ); } diff --git a/tpu-client/src/nonblocking/tpu_client.rs b/tpu-client/src/nonblocking/tpu_client.rs index 401c1360943ad0..2e8b7481926fdb 100644 --- a/tpu-client/src/nonblocking/tpu_client.rs +++ b/tpu-client/src/nonblocking/tpu_client.rs @@ -244,8 +244,7 @@ impl LeaderTpuCache { } Err(err) => { warn!( - "Failed to fetch slot leaders (first_slot: \ - {}): {err}", + "Failed to fetch slot leaders (first_slot: {}): {err}", cache_update_info.first_slot ); has_error = true; diff --git a/tpu-client/src/tpu_client.rs b/tpu-client/src/tpu_client.rs index 12fc59883ddb81..89165b3dc2192d 100644 --- a/tpu-client/src/tpu_client.rs +++ b/tpu-client/src/tpu_client.rs @@ -10,14 +10,12 @@ use { ConnectionCache, ConnectionManager, ConnectionPool, NewConnectionConfig, }, }, - solana_net_utils::bind_to_unspecified, solana_rpc_client::rpc_client::RpcClient, solana_signature::Signature, solana_transaction::{versioned::VersionedTransaction, Transaction}, solana_transaction_error::{TransportError, TransportResult}, std::{ collections::VecDeque, - net::UdpSocket, sync::{Arc, RwLock}, }, }; @@ -74,8 +72,6 @@ pub struct TpuClient< M, // ConnectionManager C, // NewConnectionConfig > { - _deprecated: UdpSocket, // TpuClient now uses the connection_cache to choose a send_socket - //todo: get rid of this field rpc_client: Arc, tpu_client: Arc>, } @@ -114,7 +110,7 @@ where transaction: &Transaction, ) -> TransportResult<()> { let wire_transaction = - bincode::serialize(&transaction).expect("should serialize transaction"); + Arc::new(bincode::serialize(&transaction).expect("should serialize transaction")); let leaders = self .tpu_client @@ -191,7 +187,6 @@ where tokio::task::block_in_place(|| rpc_client.runtime().block_on(create_tpu_client))?; Ok(Self { - _deprecated: bind_to_unspecified().unwrap(), rpc_client, tpu_client: Arc::new(tpu_client), }) @@ -214,7 +209,6 @@ where tokio::task::block_in_place(|| rpc_client.runtime().block_on(create_tpu_client))?; Ok(Self { - _deprecated: bind_to_unspecified().unwrap(), rpc_client, tpu_client: Arc::new(tpu_client), }) diff --git a/transaction-context/Cargo.toml b/transaction-context/Cargo.toml index 6b3abb6ba72cb8..d94b62efa67da5 100644 --- a/transaction-context/Cargo.toml +++ b/transaction-context/Cargo.toml @@ -16,10 +16,11 @@ rustdoc-args = ["--cfg=docsrs"] [features] bincode = ["dep:bincode", "serde", "solana-account/bincode"] -dev-context-only-utils = ["bincode", "solana-account/dev-context-only-utils"] +dev-context-only-utils = ["bincode", "solana-account/dev-context-only-utils", "dep:qualifier_attr"] serde = ["dep:serde", "dep:serde_derive"] [dependencies] +qualifier_attr = { workspace = true, optional = true } serde = { workspace = true, optional = true } serde_derive = { workspace = true, optional = true } solana-account = { workspace = true } @@ -36,6 +37,7 @@ solana-signature = { workspace = true, optional = true } [dev-dependencies] solana-account-info = { workspace = true } +solana-program-entrypoint = { workspace = true } solana-system-interface = { workspace = true } solana-transaction-context = { path = ".", features = [ "dev-context-only-utils", diff --git a/transaction-context/src/lib.rs b/transaction-context/src/lib.rs index c6a74463bf73b1..2cfc3c98bec0a3 100644 --- a/transaction-context/src/lib.rs +++ b/transaction-context/src/lib.rs @@ -2,68 +2,72 @@ #![deny(clippy::indexing_slicing)] #![cfg_attr(docsrs, feature(doc_auto_cfg))] -#[cfg(not(target_os = "solana"))] -use {solana_account::WritableAccount, solana_rent::Rent}; use { + crate::transaction_accounts::{AccountRefMut, TransactionAccount, TransactionAccounts}, solana_account::{AccountSharedData, ReadableAccount}, solana_instruction::error::InstructionError, solana_instructions_sysvar as instructions, solana_pubkey::Pubkey, solana_sbpf::memory_region::{AccessType, AccessViolationHandler, MemoryRegion}, std::{ - cell::{Ref, RefCell, RefMut}, + cell::{Cell, UnsafeCell}, collections::HashSet, - pin::Pin, rc::Rc, }, }; - -// Inlined to avoid solana_system_interface dep #[cfg(not(target_os = "solana"))] -const MAX_PERMITTED_DATA_LENGTH: u64 = 10 * 1024 * 1024; +use {solana_account::WritableAccount, solana_rent::Rent}; + +pub mod transaction_accounts; +pub mod vm_slice; + +pub const MAX_ACCOUNTS_PER_TRANSACTION: usize = 256; +// This is one less than MAX_ACCOUNTS_PER_TRANSACTION because +// one index is used as NON_DUP_MARKER in ABI v0 and v1. +pub const MAX_ACCOUNTS_PER_INSTRUCTION: usize = 255; +pub const MAX_INSTRUCTION_DATA_LEN: usize = 10 * 1024; +pub const MAX_ACCOUNT_DATA_LEN: u64 = 10 * 1024 * 1024; +// Note: With stricter_abi_and_runtime_constraints programs can grow accounts +// faster than they intend to, because the AccessViolationHandler might grow +// an account up to MAX_ACCOUNT_DATA_GROWTH_PER_INSTRUCTION at once. +pub const MAX_ACCOUNT_DATA_GROWTH_PER_TRANSACTION: i64 = MAX_ACCOUNT_DATA_LEN as i64 * 2; +pub const MAX_ACCOUNT_DATA_GROWTH_PER_INSTRUCTION: usize = 10 * 1_024; + #[cfg(test)] static_assertions::const_assert_eq!( - MAX_PERMITTED_DATA_LENGTH, - solana_system_interface::MAX_PERMITTED_DATA_LENGTH + MAX_ACCOUNTS_PER_INSTRUCTION, + solana_program_entrypoint::NON_DUP_MARKER as usize, ); - -// Inlined to avoid solana_system_interface dep -#[cfg(not(target_os = "solana"))] -const MAX_PERMITTED_ACCOUNTS_DATA_ALLOCATIONS_PER_TRANSACTION: i64 = - MAX_PERMITTED_DATA_LENGTH as i64 * 2; -// Note: With stricter_abi_and_runtime_constraints programs can grow accounts faster than they intend to, -// because the AccessViolationHandler might grow an account up to -// MAX_PERMITTED_DATA_LENGTH at once. #[cfg(test)] static_assertions::const_assert_eq!( - MAX_PERMITTED_ACCOUNTS_DATA_ALLOCATIONS_PER_TRANSACTION, - solana_system_interface::MAX_PERMITTED_ACCOUNTS_DATA_ALLOCATIONS_PER_TRANSACTION + MAX_ACCOUNT_DATA_LEN, + solana_system_interface::MAX_PERMITTED_DATA_LENGTH, ); - -// Inlined to avoid solana_account_info dep -#[cfg(not(target_os = "solana"))] -const MAX_PERMITTED_DATA_INCREASE: usize = 1_024 * 10; #[cfg(test)] static_assertions::const_assert_eq!( - MAX_PERMITTED_DATA_INCREASE, - solana_account_info::MAX_PERMITTED_DATA_INCREASE + MAX_ACCOUNT_DATA_GROWTH_PER_TRANSACTION, + solana_system_interface::MAX_PERMITTED_ACCOUNTS_DATA_ALLOCATIONS_PER_TRANSACTION, +); +#[cfg(test)] +static_assertions::const_assert_eq!( + MAX_ACCOUNT_DATA_GROWTH_PER_INSTRUCTION, + solana_account_info::MAX_PERMITTED_DATA_INCREASE, ); -/// Index of an account inside of the TransactionContext or an InstructionContext. +/// Index of an account inside of the transaction or an instruction. pub type IndexOfAccount = u16; /// Contains account meta data which varies between instruction. /// /// It also contains indices to other structures for faster lookup. +/// +/// This data structure is supposed to be shared with programs in ABIv2, so do not modify it +/// without consulting SIMD-0177. #[repr(C)] -#[derive(Clone, Debug, Eq, PartialEq)] +#[derive(Clone, Copy, Debug)] pub struct InstructionAccount { /// Points to the account and its key in the `TransactionContext` pub index_in_transaction: IndexOfAccount, - /// Points to the first occurrence in the current `InstructionContext` - /// - /// This excludes the program accounts. - pub index_in_callee: IndexOfAccount, /// Is this account supposed to sign is_signer: u8, /// Is this account allowed to become writable @@ -73,13 +77,11 @@ pub struct InstructionAccount { impl InstructionAccount { pub fn new( index_in_transaction: IndexOfAccount, - index_in_callee: IndexOfAccount, is_signer: bool, is_writable: bool, ) -> InstructionAccount { InstructionAccount { index_in_transaction, - index_in_callee, is_signer: is_signer as u8, is_writable: is_writable as u8, } @@ -102,107 +104,19 @@ impl InstructionAccount { } } -/// An account key and the matching account -pub type TransactionAccount = (Pubkey, AccountSharedData); - -#[derive(Clone, Debug, PartialEq)] -pub struct TransactionAccounts { - accounts: Vec>, - touched_flags: RefCell>, - resize_delta: RefCell, -} - -impl TransactionAccounts { - #[cfg(not(target_os = "solana"))] - fn new(accounts: Vec>) -> TransactionAccounts { - let touched_flags = vec![false; accounts.len()].into_boxed_slice(); - TransactionAccounts { - accounts, - touched_flags: RefCell::new(touched_flags), - resize_delta: RefCell::new(0), - } - } - - fn len(&self) -> usize { - self.accounts.len() - } - - fn get(&self, index: IndexOfAccount) -> Option<&RefCell> { - self.accounts.get(index as usize) - } - - #[cfg(not(target_os = "solana"))] - pub fn touch(&self, index: IndexOfAccount) -> Result<(), InstructionError> { - *self - .touched_flags - .borrow_mut() - .get_mut(index as usize) - .ok_or(InstructionError::NotEnoughAccountKeys)? = true; - Ok(()) - } - - fn update_accounts_resize_delta( - &self, - old_len: usize, - new_len: usize, - ) -> Result<(), InstructionError> { - let mut accounts_resize_delta = self - .resize_delta - .try_borrow_mut() - .map_err(|_| InstructionError::GenericError)?; - *accounts_resize_delta = - accounts_resize_delta.saturating_add((new_len as i64).saturating_sub(old_len as i64)); - Ok(()) - } - - fn can_data_be_resized(&self, old_len: usize, new_len: usize) -> Result<(), InstructionError> { - // The new length can not exceed the maximum permitted length - if new_len > MAX_PERMITTED_DATA_LENGTH as usize { - return Err(InstructionError::InvalidRealloc); - } - // The resize can not exceed the per-transaction maximum - let length_delta = (new_len as i64).saturating_sub(old_len as i64); - if self - .resize_delta - .try_borrow() - .map_err(|_| InstructionError::GenericError) - .map(|value_ref| *value_ref)? - .saturating_add(length_delta) - > MAX_PERMITTED_ACCOUNTS_DATA_ALLOCATIONS_PER_TRANSACTION - { - return Err(InstructionError::MaxAccountsDataAllocationsExceeded); - } - Ok(()) - } - - pub fn try_borrow( - &self, - index: IndexOfAccount, - ) -> Result, InstructionError> { - self.accounts - .get(index as usize) - .ok_or(InstructionError::MissingAccount)? - .try_borrow() - .map_err(|_| InstructionError::AccountBorrowFailed) - } -} - /// Loaded transaction shared between runtime and programs. /// /// This context is valid for the entire duration of a transaction being processed. -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug)] pub struct TransactionContext { - account_keys: Pin>, accounts: Rc, instruction_stack_capacity: usize, instruction_trace_capacity: usize, instruction_stack: Vec, - instruction_trace: Vec, + instruction_trace: Vec, top_level_instruction_index: usize, return_data: TransactionReturnData, #[cfg(not(target_os = "solana"))] - remove_accounts_executable_flag_checks: bool, - #[cfg(not(target_os = "solana"))] rent: Rent, } @@ -215,27 +129,21 @@ impl TransactionContext { instruction_stack_capacity: usize, instruction_trace_capacity: usize, ) -> Self { - let (account_keys, accounts): (Vec<_>, Vec<_>) = transaction_accounts - .into_iter() - .map(|(key, account)| (key, RefCell::new(account))) - .unzip(); Self { - account_keys: Pin::new(account_keys.into_boxed_slice()), - accounts: Rc::new(TransactionAccounts::new(accounts)), + accounts: Rc::new(TransactionAccounts::new(transaction_accounts)), instruction_stack_capacity, instruction_trace_capacity, instruction_stack: Vec::with_capacity(instruction_stack_capacity), - instruction_trace: vec![InstructionContext::default()], + instruction_trace: vec![InstructionFrame::default()], top_level_instruction_index: 0, return_data: TransactionReturnData::default(), - remove_accounts_executable_flag_checks: true, rent, } } - #[cfg(not(target_os = "solana"))] - pub fn set_remove_accounts_executable_flag_checks(&mut self, enabled: bool) { - self.remove_accounts_executable_flag_checks = enabled; + #[cfg(feature = "dev-context-only-utils")] + pub fn set_top_level_instruction_index(&mut self, top_level_instruction_index: usize) { + self.top_level_instruction_index = top_level_instruction_index; } /// Used in mock_process_instruction @@ -245,11 +153,14 @@ impl TransactionContext { return Err(InstructionError::CallDepth); } - Ok(Rc::try_unwrap(self.accounts) + let (accounts, _, _) = Rc::try_unwrap(self.accounts) .expect("transaction_context.accounts has unexpected outstanding refs") - .accounts + .take(); + + Ok(UnsafeCell::into_inner(accounts) + .into_vec() .into_iter() - .map(RefCell::into_inner) + .map(|(_, account)| account) .collect()) } @@ -268,69 +179,56 @@ impl TransactionContext { &self, index_in_transaction: IndexOfAccount, ) -> Result<&Pubkey, InstructionError> { - self.account_keys - .get(index_in_transaction as usize) - .ok_or(InstructionError::NotEnoughAccountKeys) - } - - /// Searches for an account by its key - #[cfg(all( - not(target_os = "solana"), - any(test, feature = "dev-context-only-utils") - ))] - pub fn get_account_at_index( - &self, - index_in_transaction: IndexOfAccount, - ) -> Result<&RefCell, InstructionError> { self.accounts - .get(index_in_transaction) - .ok_or(InstructionError::NotEnoughAccountKeys) + .account_key(index_in_transaction) + .ok_or(InstructionError::MissingAccount) } /// Searches for an account by its key pub fn find_index_of_account(&self, pubkey: &Pubkey) -> Option { - self.account_keys - .iter() + self.accounts + .account_keys_iter() .position(|key| key == pubkey) .map(|index| index as IndexOfAccount) } - /// Searches for a program account by its key - pub fn find_index_of_program_account(&self, pubkey: &Pubkey) -> Option { - self.account_keys - .iter() - .rposition(|key| key == pubkey) - .map(|index| index as IndexOfAccount) - } - - /// Gets the max length of the InstructionContext trace + /// Gets the max length of the instruction trace pub fn get_instruction_trace_capacity(&self) -> usize { self.instruction_trace_capacity } /// Returns the instruction trace length. /// - /// Not counting the last empty InstructionContext which is always pre-reserved for the next instruction. + /// Not counting the last empty instruction which is always pre-reserved for the next instruction. /// See also `get_next_instruction_context()`. pub fn get_instruction_trace_length(&self) -> usize { self.instruction_trace.len().saturating_sub(1) } - /// Gets an InstructionContext by its index in the trace + /// Gets a view on an instruction by its index in the trace pub fn get_instruction_context_at_index_in_trace( &self, index_in_trace: usize, - ) -> Result<&InstructionContext, InstructionError> { - self.instruction_trace + ) -> Result { + let instruction = self + .instruction_trace .get(index_in_trace) - .ok_or(InstructionError::CallDepth) + .ok_or(InstructionError::CallDepth)?; + Ok(InstructionContext { + transaction_context: self, + nesting_level: instruction.nesting_level, + program_account_index_in_tx: instruction.program_account_index_in_tx, + instruction_accounts: &instruction.instruction_accounts, + dedup_map: &instruction.dedup_map, + instruction_data: &instruction.instruction_data, + }) } - /// Gets an InstructionContext by its nesting level in the stack + /// Gets a view on the instruction by its nesting level in the stack pub fn get_instruction_context_at_nesting_level( &self, nesting_level: usize, - ) -> Result<&InstructionContext, InstructionError> { + ) -> Result { let index_in_trace = *self .instruction_stack .get(nesting_level) @@ -340,90 +238,110 @@ impl TransactionContext { Ok(instruction_context) } - /// Gets the max height of the InstructionContext stack + /// Gets the max height of the instruction stack pub fn get_instruction_stack_capacity(&self) -> usize { self.instruction_stack_capacity } /// Gets instruction stack height, top-level instructions are height /// `solana_instruction::TRANSACTION_LEVEL_STACK_HEIGHT` - pub fn get_instruction_context_stack_height(&self) -> usize { + pub fn get_instruction_stack_height(&self) -> usize { self.instruction_stack.len() } - /// Returns the current InstructionContext - pub fn get_current_instruction_context(&self) -> Result<&InstructionContext, InstructionError> { + /// Returns a view on the current instruction + pub fn get_current_instruction_context(&self) -> Result { let level = self - .get_instruction_context_stack_height() + .get_instruction_stack_height() .checked_sub(1) .ok_or(InstructionError::CallDepth)?; self.get_instruction_context_at_nesting_level(level) } - /// Returns the mutable InstructionContext to configure for the next invocation. + /// Returns a view on the next instruction. This function assumes it has already been + /// configured with the correct values in `prepare_next_instruction` or + /// `prepare_next_top_level_instruction` + pub fn get_next_instruction_context(&self) -> Result { + let index_in_trace = self + .instruction_trace + .len() + .checked_sub(1) + .ok_or(InstructionError::CallDepth)?; + self.get_instruction_context_at_index_in_trace(index_in_trace) + } + + /// Configures the next instruction. /// /// The last InstructionContext is always empty and pre-reserved for the next instruction. - pub fn get_next_instruction_context_mut( + pub fn configure_next_instruction( &mut self, - ) -> Result<&mut InstructionContext, InstructionError> { - self.instruction_trace + program_index: IndexOfAccount, + instruction_accounts: Vec, + deduplication_map: Vec, + instruction_data: &[u8], + ) -> Result<(), InstructionError> { + debug_assert_eq!(deduplication_map.len(), MAX_ACCOUNTS_PER_TRANSACTION); + let instruction = self + .instruction_trace .last_mut() - .ok_or(InstructionError::CallDepth) + .ok_or(InstructionError::CallDepth)?; + instruction.program_account_index_in_tx = program_index; + instruction.instruction_accounts = instruction_accounts; + instruction.instruction_data = instruction_data.to_vec(); + instruction.dedup_map = deduplication_map; + Ok(()) } - /// Returns the immutable InstructionContext. This function assumes it has already been - /// configured with the correct values in `prepare_next_instruction` or - /// `prepare_next_top_level_instruction` - pub fn get_next_instruction_context(&self) -> Result<&InstructionContext, InstructionError> { - self.instruction_trace - .last() - .ok_or(InstructionError::CallDepth) + /// A version of `configure_next_instruction` to help creating the deduplication map in tests + pub fn configure_next_instruction_for_tests( + &mut self, + program_index: IndexOfAccount, + instruction_accounts: Vec, + instruction_data: &[u8], + ) -> Result<(), InstructionError> { + debug_assert!(instruction_accounts.len() <= u8::MAX as usize); + let mut dedup_map = vec![u8::MAX; MAX_ACCOUNTS_PER_TRANSACTION]; + for (idx, account) in instruction_accounts.iter().enumerate() { + let index_in_instruction = dedup_map + .get_mut(account.index_in_transaction as usize) + .unwrap(); + if *index_in_instruction == u8::MAX { + *index_in_instruction = idx as u8; + } + } + self.configure_next_instruction( + program_index, + instruction_accounts, + dedup_map, + instruction_data, + ) } - /// Pushes the next InstructionContext + /// Pushes the next instruction #[cfg(not(target_os = "solana"))] pub fn push(&mut self) -> Result<(), InstructionError> { - let nesting_level = self.get_instruction_context_stack_height(); - let caller_instruction_context = self - .instruction_trace - .last() - .ok_or(InstructionError::CallDepth)?; - let callee_instruction_accounts_lamport_sum = - self.instruction_accounts_lamport_sum(caller_instruction_context)?; - if !self.instruction_stack.is_empty() { - let caller_instruction_context = self.get_current_instruction_context()?; - let original_caller_instruction_accounts_lamport_sum = - caller_instruction_context.instruction_accounts_lamport_sum; - let current_caller_instruction_accounts_lamport_sum = - self.instruction_accounts_lamport_sum(caller_instruction_context)?; - if original_caller_instruction_accounts_lamport_sum - != current_caller_instruction_accounts_lamport_sum - { - return Err(InstructionError::UnbalancedInstruction); - } + let nesting_level = self.get_instruction_stack_height(); + if !self.instruction_stack.is_empty() && self.accounts.get_lamports_delta() != 0 { + return Err(InstructionError::UnbalancedInstruction); } { - let instruction_context = self.get_next_instruction_context_mut()?; - instruction_context.nesting_level = nesting_level; - instruction_context.instruction_accounts_lamport_sum = - callee_instruction_accounts_lamport_sum; + let instruction = self + .instruction_trace + .last_mut() + .ok_or(InstructionError::CallDepth)?; + instruction.nesting_level = nesting_level; } let index_in_trace = self.get_instruction_trace_length(); if index_in_trace >= self.instruction_trace_capacity { return Err(InstructionError::MaxInstructionTraceLengthExceeded); } - self.instruction_trace.push(InstructionContext::default()); + self.instruction_trace.push(InstructionFrame::default()); if nesting_level >= self.instruction_stack_capacity { return Err(InstructionError::CallDepth); } self.instruction_stack.push(index_in_trace); if let Some(index_in_transaction) = self.find_index_of_account(&instructions::id()) { - let mut mut_account_ref = self - .accounts - .get(index_in_transaction) - .ok_or(InstructionError::NotEnoughAccountKeys)? - .try_borrow_mut() - .map_err(|_| InstructionError::AccountBorrowFailed)?; + let mut mut_account_ref = self.accounts.try_borrow_mut(index_in_transaction)?; if mut_account_ref.owner() != &solana_sdk_ids::sysvar::id() { return Err(InstructionError::InvalidAccountOwner); } @@ -435,7 +353,7 @@ impl TransactionContext { Ok(()) } - /// Pops the current InstructionContext + /// Pops the current instruction #[cfg(not(target_os = "solana"))] pub fn pop(&mut self) -> Result<(), InstructionError> { if self.instruction_stack.is_empty() { @@ -446,18 +364,18 @@ impl TransactionContext { self.get_current_instruction_context() .and_then(|instruction_context| { // Verify all executable accounts have no outstanding refs - for index_in_transaction in instruction_context.program_accounts.iter() { - self.accounts - .get(*index_in_transaction) - .ok_or(InstructionError::NotEnoughAccountKeys)? - .try_borrow_mut() - .map_err(|_| InstructionError::AccountBorrowOutstanding)?; - } - self.instruction_accounts_lamport_sum(instruction_context) - .map(|instruction_accounts_lamport_sum| { - instruction_context.instruction_accounts_lamport_sum - != instruction_accounts_lamport_sum - }) + self.accounts + .try_borrow_mut( + instruction_context.get_index_of_program_account_in_transaction()?, + ) + .map_err(|err| { + if err == InstructionError::AccountBorrowFailed { + InstructionError::AccountBorrowOutstanding + } else { + err + } + })?; + Ok(self.accounts.get_lamports_delta() != 0) }); // Always pop, even if we `detected_an_unbalanced_instruction` self.instruction_stack.pop(); @@ -471,12 +389,12 @@ impl TransactionContext { } } - /// Gets the return data of the current InstructionContext or any above + /// Gets the return data of the current instruction or any above pub fn get_return_data(&self) -> (&Pubkey, &[u8]) { (&self.return_data.program_id, &self.return_data.data) } - /// Set the return data of the current InstructionContext + /// Set the return data of the current instruction pub fn set_return_data( &mut self, program_id: Pubkey, @@ -486,45 +404,6 @@ impl TransactionContext { Ok(()) } - /// Calculates the sum of all lamports within an instruction - #[cfg(not(target_os = "solana"))] - fn instruction_accounts_lamport_sum( - &self, - instruction_context: &InstructionContext, - ) -> Result { - let mut instruction_accounts_lamport_sum: u128 = 0; - for instruction_account_index in 0..instruction_context.get_number_of_instruction_accounts() - { - if instruction_context - .is_instruction_account_duplicate(instruction_account_index)? - .is_some() - { - continue; // Skip duplicate account - } - let index_in_transaction = instruction_context - .get_index_of_instruction_account_in_transaction(instruction_account_index)?; - instruction_accounts_lamport_sum = (self - .accounts - .get(index_in_transaction) - .ok_or(InstructionError::NotEnoughAccountKeys)? - .try_borrow() - .map_err(|_| InstructionError::AccountBorrowOutstanding)? - .lamports() as u128) - .checked_add(instruction_accounts_lamport_sum) - .ok_or(InstructionError::ArithmeticOverflow)?; - } - Ok(instruction_accounts_lamport_sum) - } - - /// Returns the accounts resize delta - pub fn accounts_resize_delta(&self) -> Result { - self.accounts - .resize_delta - .try_borrow() - .map_err(|_| InstructionError::GenericError) - .map(|value_ref| *value_ref) - } - /// Returns a new account data write access handler pub fn access_violation_handler( &self, @@ -555,11 +434,7 @@ impl TransactionContext { // The four calls below can't really fail. If they fail because of a bug, // whatever is writing will trigger an EbpfError::AccessViolation like // if the region was readonly, and the transaction will fail gracefully. - let Some(account) = accounts.accounts.get(index_in_transaction as usize) else { - debug_assert!(false); - return; - }; - let Ok(mut account) = account.try_borrow_mut() else { + let Ok(mut account) = accounts.try_borrow_mut(index_in_transaction) else { debug_assert!(false); return; }; @@ -567,16 +442,10 @@ impl TransactionContext { debug_assert!(false); return; } - let Ok(remaining_allowed_growth) = - accounts.resize_delta.try_borrow().map(|resize_delta| { - MAX_PERMITTED_ACCOUNTS_DATA_ALLOCATIONS_PER_TRANSACTION - .saturating_sub(*resize_delta) - .max(0) as usize - }) - else { - debug_assert!(false); - return; - }; + + let remaining_allowed_growth = MAX_ACCOUNT_DATA_GROWTH_PER_TRANSACTION + .saturating_sub(accounts.resize_delta()) + .max(0) as usize; if requested_length > region.len as usize { // Realloc immediately here to fit the requested access, @@ -584,7 +453,7 @@ impl TransactionContext { // account length the program stored in AccountInfo. let old_len = account.data().len(); let new_len = (address_space_reserved_for_account as usize) - .min(MAX_PERMITTED_DATA_LENGTH as usize) + .min(MAX_ACCOUNT_DATA_LEN as usize) .min(old_len.saturating_add(remaining_allowed_growth)); // The last two min operations ensure the following: debug_assert!(accounts.can_data_be_resized(old_len, new_len).is_ok()); @@ -619,32 +488,32 @@ pub struct TransactionReturnData { pub data: Vec, } -/// Loaded instruction shared between runtime and programs. -/// -/// This context is valid for the entire duration of a (possibly cross program) instruction being processed. -#[derive(Debug, Clone, Default, Eq, PartialEq)] -pub struct InstructionContext { +/// Instruction shared between runtime and programs. +#[derive(Debug, Clone, Default)] +pub struct InstructionFrame { nesting_level: usize, - instruction_accounts_lamport_sum: u128, - program_accounts: Vec, + program_account_index_in_tx: IndexOfAccount, instruction_accounts: Vec, + /// This is an account deduplication map that maps index_in_transaction to index_in_instruction + /// Usage: dedup_map[index_in_transaction] = index_in_instruction + /// This is a vector of u8s to save memory, since many entries may be unused. + dedup_map: Vec, instruction_data: Vec, } -impl InstructionContext { - /// Used together with TransactionContext::get_next_instruction_context() - #[cfg(not(target_os = "solana"))] - pub fn configure( - &mut self, - program_accounts: Vec, - instruction_accounts: Vec, - instruction_data: &[u8], - ) { - self.program_accounts = program_accounts; - self.instruction_accounts = instruction_accounts; - self.instruction_data = instruction_data.to_vec(); - } +/// View interface to read instructions. +#[derive(Debug, Clone)] +pub struct InstructionContext<'a> { + transaction_context: &'a TransactionContext, + // The rest of the fields are redundant shortcuts + nesting_level: usize, + program_account_index_in_tx: IndexOfAccount, + instruction_accounts: &'a [InstructionAccount], + dedup_map: &'a [u8], + instruction_data: &'a [u8], +} +impl<'a> InstructionContext<'a> { /// How many Instructions were on the stack after this one was pushed /// /// That is the number of nested parent Instructions plus one (itself). @@ -652,11 +521,6 @@ impl InstructionContext { self.nesting_level.saturating_add(1) } - /// Number of program accounts - pub fn get_number_of_program_accounts(&self) -> IndexOfAccount { - self.program_accounts.len() as IndexOfAccount - } - /// Number of accounts in this Instruction (without program accounts) pub fn get_number_of_instruction_accounts(&self) -> IndexOfAccount { self.instruction_accounts.len() as IndexOfAccount @@ -668,7 +532,7 @@ impl InstructionContext { expected_at_least: IndexOfAccount, ) -> Result<(), InstructionError> { if self.get_number_of_instruction_accounts() < expected_at_least { - Err(InstructionError::NotEnoughAccountKeys) + Err(InstructionError::MissingAccount) } else { Ok(()) } @@ -676,52 +540,18 @@ impl InstructionContext { /// Data parameter for the programs `process_instruction` handler pub fn get_instruction_data(&self) -> &[u8] { - &self.instruction_data - } - - /// Searches for a program account by its key - pub fn find_index_of_program_account( - &self, - transaction_context: &TransactionContext, - pubkey: &Pubkey, - ) -> Option { - self.program_accounts - .iter() - .position(|index_in_transaction| { - transaction_context - .account_keys - .get(*index_in_transaction as usize) - == Some(pubkey) - }) - .map(|index| index as IndexOfAccount) - } - - /// Searches for an instruction account by its key - pub fn find_index_of_instruction_account( - &self, - transaction_context: &TransactionContext, - pubkey: &Pubkey, - ) -> Option { - self.instruction_accounts - .iter() - .position(|instruction_account| { - transaction_context - .account_keys - .get(instruction_account.index_in_transaction as usize) - == Some(pubkey) - }) - .map(|index| index as IndexOfAccount) + self.instruction_data } /// Translates the given instruction wide program_account_index into a transaction wide index pub fn get_index_of_program_account_in_transaction( &self, - program_account_index: IndexOfAccount, ) -> Result { - Ok(*self - .program_accounts - .get(program_account_index as usize) - .ok_or(InstructionError::NotEnoughAccountKeys)?) + if self.program_account_index_in_tx == u16::MAX { + Err(InstructionError::MissingAccount) + } else { + Ok(self.program_account_index_in_tx) + } } /// Translates the given instruction wide instruction_account_index into a transaction wide index @@ -732,7 +562,7 @@ impl InstructionContext { Ok(self .instruction_accounts .get(instruction_account_index as usize) - .ok_or(InstructionError::NotEnoughAccountKeys)? + .ok_or(InstructionError::MissingAccount)? .index_in_transaction as IndexOfAccount) } @@ -741,10 +571,15 @@ impl InstructionContext { &self, index_in_transaction: IndexOfAccount, ) -> Result { - self.instruction_accounts - .iter() - .position(|account| account.index_in_transaction == index_in_transaction) - .map(|idx| idx as IndexOfAccount) + self.dedup_map + .get(index_in_transaction as usize) + .and_then(|idx| { + if *idx as usize >= self.instruction_accounts.len() { + None + } else { + Some(*idx as IndexOfAccount) + } + }) .ok_or(InstructionError::MissingAccount) } @@ -754,89 +589,61 @@ impl InstructionContext { &self, instruction_account_index: IndexOfAccount, ) -> Result, InstructionError> { - let index_in_callee = self - .instruction_accounts - .get(instruction_account_index as usize) - .ok_or(InstructionError::NotEnoughAccountKeys)? - .index_in_callee; - Ok(if index_in_callee == instruction_account_index { - None - } else { - Some(index_in_callee) - }) + let index_in_transaction = + self.get_index_of_instruction_account_in_transaction(instruction_account_index)?; + let first_instruction_account_index = + self.get_index_of_account_in_instruction(index_in_transaction)?; + + Ok( + if first_instruction_account_index == instruction_account_index { + None + } else { + Some(first_instruction_account_index) + }, + ) } /// Gets the key of the last program account of this Instruction - pub fn get_last_program_key<'a, 'b: 'a>( - &'a self, - transaction_context: &'b TransactionContext, - ) -> Result<&'b Pubkey, InstructionError> { - self.get_index_of_program_account_in_transaction( - self.get_number_of_program_accounts().saturating_sub(1), - ) - .and_then(|index_in_transaction| { - transaction_context.get_key_of_account_at_index(index_in_transaction) - }) + pub fn get_program_key(&self) -> Result<&'a Pubkey, InstructionError> { + self.get_index_of_program_account_in_transaction() + .and_then(|index_in_transaction| { + self.transaction_context + .get_key_of_account_at_index(index_in_transaction) + }) } - fn try_borrow_account<'a, 'b: 'a>( - &'a self, - transaction_context: &'b TransactionContext, - index_in_transaction: IndexOfAccount, - index_in_instruction: Option, - ) -> Result, InstructionError> { - let account = transaction_context - .accounts - .get(index_in_transaction) - .ok_or(InstructionError::MissingAccount)? - .try_borrow_mut() - .map_err(|_| InstructionError::AccountBorrowFailed)?; - Ok(BorrowedAccount { - transaction_context, - instruction_context: self, - index_in_transaction, - index_in_instruction_accounts: index_in_instruction, - account, - }) + /// Get the owner of the program account of this instruction + pub fn get_program_owner(&self) -> Result { + self.get_index_of_program_account_in_transaction() + .and_then(|index_in_transaction| { + self.transaction_context + .accounts + .try_borrow(index_in_transaction) + }) + .map(|acc| *acc.owner()) } - /// Gets the last program account of this Instruction - pub fn try_borrow_last_program_account<'a, 'b: 'a>( - &'a self, - transaction_context: &'b TransactionContext, - ) -> Result, InstructionError> { - let result = self.try_borrow_program_account( - transaction_context, - self.get_number_of_program_accounts().saturating_sub(1), - ); - debug_assert!(result.is_ok()); - result - } + /// Gets an instruction account of this Instruction + pub fn try_borrow_instruction_account( + &self, + index_in_instruction: IndexOfAccount, + ) -> Result { + let instruction_account = *self + .instruction_accounts + .get(index_in_instruction as usize) + .ok_or(InstructionError::MissingAccount)?; - /// Tries to borrow a program account from this Instruction - pub fn try_borrow_program_account<'a, 'b: 'a>( - &'a self, - transaction_context: &'b TransactionContext, - program_account_index: IndexOfAccount, - ) -> Result, InstructionError> { - let index_in_transaction = - self.get_index_of_program_account_in_transaction(program_account_index)?; - self.try_borrow_account(transaction_context, index_in_transaction, None) - } + let account = self + .transaction_context + .accounts + .try_borrow_mut(instruction_account.index_in_transaction)?; - /// Gets an instruction account of this Instruction - pub fn try_borrow_instruction_account<'a, 'b: 'a>( - &'a self, - transaction_context: &'b TransactionContext, - instruction_account_index: IndexOfAccount, - ) -> Result, InstructionError> { - let index_in_transaction = - self.get_index_of_instruction_account_in_transaction(instruction_account_index)?; - self.try_borrow_account( - transaction_context, - index_in_transaction, - Some(instruction_account_index), - ) + Ok(BorrowedInstructionAccount { + transaction_context: self.transaction_context, + instruction_account, + account, + index_in_transaction_of_instruction_program: self.program_account_index_in_tx, + }) } /// Returns whether an instruction account is a signer @@ -864,15 +671,13 @@ impl InstructionContext { } /// Calculates the set of all keys of signer instruction accounts in this Instruction - pub fn get_signers( - &self, - transaction_context: &TransactionContext, - ) -> Result, InstructionError> { + pub fn get_signers(&self) -> Result, InstructionError> { let mut result = HashSet::new(); for instruction_account in self.instruction_accounts.iter() { if instruction_account.is_signer() { result.insert( - *transaction_context + *self + .transaction_context .get_key_of_account_at_index(instruction_account.index_in_transaction)?, ); } @@ -881,38 +686,39 @@ impl InstructionContext { } pub fn instruction_accounts(&self) -> &[InstructionAccount] { - &self.instruction_accounts + self.instruction_accounts + } + + pub fn get_key_of_instruction_account( + &self, + index_in_instruction: IndexOfAccount, + ) -> Result<&'a Pubkey, InstructionError> { + self.get_index_of_instruction_account_in_transaction(index_in_instruction) + .and_then(|idx| self.transaction_context.get_key_of_account_at_index(idx)) } } /// Shared account borrowed from the TransactionContext and an InstructionContext. #[derive(Debug)] -pub struct BorrowedAccount<'a> { +pub struct BorrowedInstructionAccount<'a> { transaction_context: &'a TransactionContext, - instruction_context: &'a InstructionContext, - index_in_transaction: IndexOfAccount, - // Program accounts are not part of the instruction_accounts vector, and thus None - index_in_instruction_accounts: Option, - account: RefMut<'a, AccountSharedData>, + account: AccountRefMut<'a>, + instruction_account: InstructionAccount, + index_in_transaction_of_instruction_program: IndexOfAccount, } -impl BorrowedAccount<'_> { - /// Returns the transaction context - pub fn transaction_context(&self) -> &TransactionContext { - self.transaction_context - } - +impl BorrowedInstructionAccount<'_> { /// Returns the index of this account (transaction wide) #[inline] pub fn get_index_in_transaction(&self) -> IndexOfAccount { - self.index_in_transaction + self.instruction_account.index_in_transaction } /// Returns the public key of this account (transaction wide) #[inline] pub fn get_key(&self) -> &Pubkey { self.transaction_context - .get_key_of_account_at_index(self.index_in_transaction) + .get_key_of_account_at_index(self.instruction_account.index_in_transaction) .unwrap() } @@ -933,10 +739,6 @@ impl BorrowedAccount<'_> { if !self.is_writable() { return Err(InstructionError::ModifiedProgramId); } - // and only if the account is not executable - if self.is_executable_internal() { - return Err(InstructionError::ModifiedProgramId); - } // and only if the data is zero-initialized or empty if !is_zeroed(self.get_data()) { return Err(InstructionError::ModifiedProgramId); @@ -967,14 +769,17 @@ impl BorrowedAccount<'_> { if !self.is_writable() { return Err(InstructionError::ReadonlyLamportChange); } - // The balance of executable accounts may not change - if self.is_executable_internal() { - return Err(InstructionError::ExecutableLamportChange); - } // don't touch the account if the lamports do not change - if self.get_lamports() == lamports { + let old_lamports = self.get_lamports(); + if old_lamports == lamports { return Ok(()); } + + let lamports_balance = (lamports as i128).saturating_sub(old_lamports as i128); + self.transaction_context + .accounts + .add_lamports_delta(lamports_balance)?; + self.touch()?; self.account.set_lamports(lamports); Ok(()) @@ -1015,24 +820,6 @@ impl BorrowedAccount<'_> { Ok(self.account.data_as_mut_slice()) } - /// Overwrites the account data and size (transaction wide). - /// - /// You should always prefer set_data_from_slice(). Calling this method is - /// currently safe but requires some special casing during CPI when direct - /// account mapping is enabled. - #[cfg(all( - not(target_os = "solana"), - any(test, feature = "dev-context-only-utils") - ))] - pub fn set_data(&mut self, data: Vec) -> Result<(), InstructionError> { - self.can_data_be_resized(data.len())?; - self.touch()?; - - self.update_accounts_resize_delta(data.len())?; - self.account.set_data(data); - Ok(()) - } - /// Overwrites the account data and size (transaction wide). /// /// Call this when you have a slice of data you do not own and want to @@ -1103,23 +890,19 @@ impl BorrowedAccount<'_> { fn make_data_mut(&mut self) { // if the account is still shared, it means this is the first time we're // about to write into it. Make the account mutable by copying it in a - // buffer with MAX_PERMITTED_DATA_INCREASE capacity so that if the + // buffer with MAX_ACCOUNT_DATA_GROWTH_PER_INSTRUCTION capacity so that if the // transaction reallocs, we don't have to copy the whole account data a // second time to fullfill the realloc. - // - // NOTE: The account memory region CoW code in bpf_loader::create_vm() implements the same - // logic and must be kept in sync. if self.account.is_shared() { - self.account.reserve(MAX_PERMITTED_DATA_INCREASE); + self.account + .reserve(MAX_ACCOUNT_DATA_GROWTH_PER_INSTRUCTION); } } /// Deserializes the account data into a state #[cfg(all(not(target_os = "solana"), feature = "bincode"))] pub fn get_state(&self) -> Result { - self.account - .deserialize_data() - .map_err(|_| InstructionError::InvalidAccountData) + bincode::deserialize(self.account.data()).map_err(|_| InstructionError::InvalidAccountData) } /// Serializes a state into the account data @@ -1151,16 +934,6 @@ impl BorrowedAccount<'_> { self.account.executable() } - /// Feature gating to remove `is_executable` flag related checks - #[cfg(not(target_os = "solana"))] - #[inline] - fn is_executable_internal(&self) -> bool { - !self - .transaction_context - .remove_accounts_executable_flag_checks - && self.account.executable() - } - /// Configures whether this account is executable (transaction wide) #[cfg(not(target_os = "solana"))] pub fn set_executable(&mut self, is_executable: bool) -> Result<(), InstructionError> { @@ -1180,10 +953,6 @@ impl BorrowedAccount<'_> { if !self.is_writable() { return Err(InstructionError::ExecutableModified); } - // one can not clear the executable flag - if self.is_executable_internal() && !is_executable { - return Err(InstructionError::ExecutableModified); - } // don't touch the account if the executable flag does not change #[allow(deprecated)] if self.is_executable() == is_executable { @@ -1203,41 +972,25 @@ impl BorrowedAccount<'_> { /// Returns whether this account is a signer (instruction wide) pub fn is_signer(&self) -> bool { - if let Some(index_in_instruction_accounts) = self.index_in_instruction_accounts { - self.instruction_context - .is_instruction_account_signer(index_in_instruction_accounts) - .unwrap_or_default() - } else { - false - } + self.instruction_account.is_signer() } /// Returns whether this account is writable (instruction wide) pub fn is_writable(&self) -> bool { - if let Some(index_in_instruction_accounts) = self.index_in_instruction_accounts { - self.instruction_context - .is_instruction_account_writable(index_in_instruction_accounts) - .unwrap_or_default() - } else { - false - } + self.instruction_account.is_writable() } /// Returns true if the owner of this account is the current `InstructionContext`s last program (instruction wide) pub fn is_owned_by_current_program(&self) -> bool { - self.instruction_context - .get_last_program_key(self.transaction_context) - .map(|key| key == self.get_owner()) + self.transaction_context + .get_key_of_account_at_index(self.index_in_transaction_of_instruction_program) + .map(|program_key| program_key == self.get_owner()) .unwrap_or_default() } /// Returns an error if the account data can not be mutated by the current program #[cfg(not(target_os = "solana"))] pub fn can_data_be_changed(&self) -> Result<(), InstructionError> { - // Only non-executable accounts data can be changed - if self.is_executable_internal() { - return Err(InstructionError::ExecutableDataModified); - } // and only if the account is writable if !self.is_writable() { return Err(InstructionError::ReadonlyDataModified); @@ -1267,7 +1020,7 @@ impl BorrowedAccount<'_> { fn touch(&self) -> Result<(), InstructionError> { self.transaction_context .accounts - .touch(self.index_in_transaction) + .touch(self.instruction_account.index_in_transaction) } #[cfg(not(target_os = "solana"))] @@ -1291,27 +1044,20 @@ pub struct ExecutionRecord { #[cfg(not(target_os = "solana"))] impl From for ExecutionRecord { fn from(context: TransactionContext) -> Self { - let TransactionAccounts { - accounts, - touched_flags, - resize_delta, - } = Rc::try_unwrap(context.accounts) - .expect("transaction_context.accounts has unexpected outstanding refs"); - let accounts = Vec::from(Pin::into_inner(context.account_keys)) - .into_iter() - .zip(accounts.into_iter().map(RefCell::into_inner)) - .collect(); + let (accounts, touched_flags, resize_delta) = Rc::try_unwrap(context.accounts) + .expect("transaction_context.accounts has unexpected outstanding refs") + .take(); + let accounts = UnsafeCell::into_inner(accounts).into_vec(); let touched_account_count = touched_flags - .borrow() .iter() .fold(0usize, |accumulator, was_touched| { - accumulator.saturating_add(*was_touched as usize) + accumulator.saturating_add(was_touched.get() as usize) }) as u64; Self { accounts, return_data: context.return_data, touched_account_count, - accounts_resize_delta: RefCell::into_inner(resize_delta), + accounts_resize_delta: Cell::into_inner(resize_delta), } } } @@ -1374,4 +1120,35 @@ mod tests { ); assert_eq!(build_transaction_context(account).push(), Ok(()),); } + + #[test] + fn test_invalid_native_loader_index() { + let mut transaction_context = TransactionContext::new( + vec![( + Pubkey::new_unique(), + AccountSharedData::new(1, 1, &Pubkey::new_unique()), + )], + Rent::default(), + 20, + 20, + ); + + transaction_context + .configure_next_instruction_for_tests( + u16::MAX, + vec![InstructionAccount::new(0, false, false)], + &[], + ) + .unwrap(); + let instruction_context = transaction_context.get_next_instruction_context().unwrap(); + + let result = instruction_context.get_index_of_program_account_in_transaction(); + assert_eq!(result, Err(InstructionError::MissingAccount)); + + let result = instruction_context.get_program_key(); + assert_eq!(result, Err(InstructionError::MissingAccount)); + + let result = instruction_context.get_program_owner(); + assert_eq!(result.err(), Some(InstructionError::MissingAccount)); + } } diff --git a/transaction-context/src/transaction_accounts.rs b/transaction-context/src/transaction_accounts.rs new file mode 100644 index 00000000000000..93509c3ddccd32 --- /dev/null +++ b/transaction-context/src/transaction_accounts.rs @@ -0,0 +1,390 @@ +#[cfg(feature = "dev-context-only-utils")] +use qualifier_attr::qualifiers; +use { + crate::{IndexOfAccount, MAX_ACCOUNT_DATA_GROWTH_PER_TRANSACTION, MAX_ACCOUNT_DATA_LEN}, + solana_account::AccountSharedData, + solana_instruction::error::InstructionError, + solana_pubkey::Pubkey, + std::{ + cell::{Cell, UnsafeCell}, + ops::{Deref, DerefMut}, + }, +}; + +/// An account key and the matching account +pub type TransactionAccount = (Pubkey, AccountSharedData); +pub(crate) type OwnedTransactionAccounts = ( + UnsafeCell>, + Box<[Cell]>, + Cell, +); + +#[derive(Debug)] +pub struct TransactionAccounts { + accounts: UnsafeCell>, + borrow_counters: Box<[BorrowCounter]>, + touched_flags: Box<[Cell]>, + resize_delta: Cell, + lamports_delta: Cell, +} + +impl TransactionAccounts { + #[cfg(not(target_os = "solana"))] + pub(crate) fn new(accounts: Vec) -> TransactionAccounts { + let touched_flags = vec![Cell::new(false); accounts.len()].into_boxed_slice(); + let borrow_counters = vec![BorrowCounter::default(); accounts.len()].into_boxed_slice(); + let accounts = UnsafeCell::new(accounts.into_boxed_slice()); + TransactionAccounts { + accounts, + borrow_counters, + touched_flags, + resize_delta: Cell::new(0), + lamports_delta: Cell::new(0), + } + } + + pub(crate) fn len(&self) -> usize { + // SAFETY: The borrow is local to this function and is only reading length. + unsafe { (*self.accounts.get()).len() } + } + + #[cfg(not(target_os = "solana"))] + pub fn touch(&self, index: IndexOfAccount) -> Result<(), InstructionError> { + self.touched_flags + .get(index as usize) + .ok_or(InstructionError::MissingAccount)? + .set(true); + Ok(()) + } + + pub(crate) fn update_accounts_resize_delta( + &self, + old_len: usize, + new_len: usize, + ) -> Result<(), InstructionError> { + let accounts_resize_delta = self.resize_delta.get(); + self.resize_delta.set( + accounts_resize_delta.saturating_add((new_len as i64).saturating_sub(old_len as i64)), + ); + Ok(()) + } + + pub(crate) fn can_data_be_resized( + &self, + old_len: usize, + new_len: usize, + ) -> Result<(), InstructionError> { + // The new length can not exceed the maximum permitted length + if new_len > MAX_ACCOUNT_DATA_LEN as usize { + return Err(InstructionError::InvalidRealloc); + } + // The resize can not exceed the per-transaction maximum + let length_delta = (new_len as i64).saturating_sub(old_len as i64); + if self.resize_delta.get().saturating_add(length_delta) + > MAX_ACCOUNT_DATA_GROWTH_PER_TRANSACTION + { + return Err(InstructionError::MaxAccountsDataAllocationsExceeded); + } + Ok(()) + } + + #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] + pub(crate) fn try_borrow_mut( + &self, + index: IndexOfAccount, + ) -> Result { + let borrow_counter = self + .borrow_counters + .get(index as usize) + .ok_or(InstructionError::MissingAccount)?; + borrow_counter.try_borrow_mut()?; + + // SAFETY: The borrow counter guarantees this is the only mutable borrow of this account. + // The unwrap is safe because accounts.len() == borrow_counters.len(), so the missing + // account error should have been returned above. + let account = unsafe { &mut (*self.accounts.get()).get_mut(index as usize).unwrap().1 }; + + Ok(AccountRefMut { + account, + borrow_counter, + }) + } + + pub fn try_borrow(&self, index: IndexOfAccount) -> Result { + let borrow_counter = self + .borrow_counters + .get(index as usize) + .ok_or(InstructionError::MissingAccount)?; + borrow_counter.try_borrow()?; + + // SAFETY: The borrow counter guarantees there are no mutable borrow of this account. + // The unwrap is safe because accounts.len() == borrow_counters.len(), so the missing + // account error should have been returned above. + let account = unsafe { &(*self.accounts.get()).get(index as usize).unwrap().1 }; + + Ok(AccountRef { + account, + borrow_counter, + }) + } + + pub(crate) fn add_lamports_delta(&self, balance: i128) -> Result<(), InstructionError> { + let delta = self.lamports_delta.get(); + self.lamports_delta.set( + delta + .checked_add(balance) + .ok_or(InstructionError::ArithmeticOverflow)?, + ); + Ok(()) + } + + pub(crate) fn get_lamports_delta(&self) -> i128 { + self.lamports_delta.get() + } + + pub(crate) fn take(self) -> OwnedTransactionAccounts { + (self.accounts, self.touched_flags, self.resize_delta) + } + + pub fn resize_delta(&self) -> i64 { + self.resize_delta.get() + } + + pub(crate) fn account_key(&self, index: IndexOfAccount) -> Option<&Pubkey> { + // SAFETY: We never modify an account key, so returning a reference to it is safe. + unsafe { (*self.accounts.get()).get(index as usize).map(|acc| &acc.0) } + } + + pub(crate) fn account_keys_iter(&self) -> impl Iterator { + // SAFETY: We never modify account keys, so returning an immutable reference to them is safe. + unsafe { (*self.accounts.get()).iter().map(|item| &item.0) } + } +} + +#[derive(Default, Debug, Clone)] +struct BorrowCounter { + counter: Cell, +} + +impl BorrowCounter { + #[inline] + fn is_writing(&self) -> bool { + self.counter.get() < 0 + } + + #[inline] + fn is_reading(&self) -> bool { + self.counter.get() > 0 + } + + #[inline] + fn try_borrow(&self) -> Result<(), InstructionError> { + if self.is_writing() { + return Err(InstructionError::AccountBorrowFailed); + } + + if let Some(counter) = self.counter.get().checked_add(1) { + self.counter.set(counter); + return Ok(()); + } + + Err(InstructionError::AccountBorrowFailed) + } + + #[inline] + fn try_borrow_mut(&self) -> Result<(), InstructionError> { + if self.is_writing() || self.is_reading() { + return Err(InstructionError::AccountBorrowFailed); + } + + self.counter.set(self.counter.get().saturating_sub(1)); + + Ok(()) + } + + #[inline] + fn release_borrow(&self) { + self.counter.set(self.counter.get().saturating_sub(1)); + } + + #[inline] + fn release_borrow_mut(&self) { + self.counter.set(self.counter.get().saturating_add(1)); + } +} + +pub struct AccountRef<'a> { + account: &'a AccountSharedData, + borrow_counter: &'a BorrowCounter, +} + +impl Drop for AccountRef<'_> { + fn drop(&mut self) { + self.borrow_counter.release_borrow(); + } +} + +impl Deref for AccountRef<'_> { + type Target = AccountSharedData; + fn deref(&self) -> &Self::Target { + self.account + } +} + +#[derive(Debug)] +pub struct AccountRefMut<'a> { + account: &'a mut AccountSharedData, + borrow_counter: &'a BorrowCounter, +} + +impl Drop for AccountRefMut<'_> { + fn drop(&mut self) { + self.borrow_counter.release_borrow_mut(); + } +} + +impl Deref for AccountRefMut<'_> { + type Target = AccountSharedData; + fn deref(&self) -> &Self::Target { + self.account + } +} + +impl DerefMut for AccountRefMut<'_> { + fn deref_mut(&mut self) -> &mut Self::Target { + self.account + } +} + +#[cfg(test)] +mod tests { + use { + crate::transaction_accounts::TransactionAccounts, solana_account::AccountSharedData, + solana_instruction::error::InstructionError, solana_pubkey::Pubkey, + }; + + #[test] + fn test_missing_account() { + let accounts = vec![ + ( + Pubkey::new_unique(), + AccountSharedData::new(2, 1, &Pubkey::new_unique()), + ), + ( + Pubkey::new_unique(), + AccountSharedData::new(2, 1, &Pubkey::new_unique()), + ), + ]; + + let tx_accounts = TransactionAccounts::new(accounts); + + let res = tx_accounts.try_borrow(3); + assert_eq!(res.err(), Some(InstructionError::MissingAccount)); + + let res = tx_accounts.try_borrow_mut(3); + assert_eq!(res.err(), Some(InstructionError::MissingAccount)); + } + + #[test] + fn test_invalid_borrow() { + let accounts = vec![ + ( + Pubkey::new_unique(), + AccountSharedData::new(2, 1, &Pubkey::new_unique()), + ), + ( + Pubkey::new_unique(), + AccountSharedData::new(2, 1, &Pubkey::new_unique()), + ), + ]; + + let tx_accounts = TransactionAccounts::new(accounts); + + // Two immutable borrows are valid + { + let acc_1 = tx_accounts.try_borrow(0); + assert!(acc_1.is_ok()); + + let acc_2 = tx_accounts.try_borrow(1); + assert!(acc_2.is_ok()); + + let acc_1_new = tx_accounts.try_borrow(0); + assert!(acc_1_new.is_ok()); + + assert_eq!(&*acc_1.unwrap(), &*acc_1_new.unwrap()); + } + + // Two mutable borrows are invalid + { + let acc_1 = tx_accounts.try_borrow_mut(0); + assert!(acc_1.is_ok()); + + let acc_2 = tx_accounts.try_borrow_mut(1); + assert!(acc_2.is_ok()); + + let acc_1_new = tx_accounts.try_borrow_mut(0); + assert_eq!(acc_1_new.err(), Some(InstructionError::AccountBorrowFailed)); + } + + // Mutable after immutable must fail + { + let acc_1 = tx_accounts.try_borrow(0); + assert!(acc_1.is_ok()); + + let acc_2 = tx_accounts.try_borrow(1); + assert!(acc_2.is_ok()); + + let acc_1_new = tx_accounts.try_borrow_mut(0); + assert_eq!(acc_1_new.err(), Some(InstructionError::AccountBorrowFailed)); + } + + // Immutable after mutable must fail + { + let acc_1 = tx_accounts.try_borrow_mut(0); + assert!(acc_1.is_ok()); + + let acc_2 = tx_accounts.try_borrow_mut(1); + assert!(acc_2.is_ok()); + + let acc_1_new = tx_accounts.try_borrow(0); + assert_eq!(acc_1_new.err(), Some(InstructionError::AccountBorrowFailed)); + } + + // Different scopes are good + { + let acc_1 = tx_accounts.try_borrow_mut(0); + assert!(acc_1.is_ok()); + } + + { + let acc_1 = tx_accounts.try_borrow_mut(0); + assert!(acc_1.is_ok()); + } + } + + #[test] + fn too_many_borrows() { + let accounts = vec![ + ( + Pubkey::new_unique(), + AccountSharedData::new(2, 1, &Pubkey::new_unique()), + ), + ( + Pubkey::new_unique(), + AccountSharedData::new(2, 1, &Pubkey::new_unique()), + ), + ]; + + let tx_accounts = TransactionAccounts::new(accounts); + let mut borrows = Vec::new(); + for i in 0..129 { + let acc = tx_accounts.try_borrow(1); + if i < 127 { + assert!(acc.is_ok()); + borrows.push(acc.unwrap()); + } else { + assert_eq!(acc.err(), Some(InstructionError::AccountBorrowFailed)); + } + } + } +} diff --git a/transaction-context/src/vm_slice.rs b/transaction-context/src/vm_slice.rs new file mode 100644 index 00000000000000..751a8d48a50380 --- /dev/null +++ b/transaction-context/src/vm_slice.rs @@ -0,0 +1,41 @@ +// The VmSlice class is used for cases when you need a slice that is stored in the BPF +// interpreter's virtual address space. Because this source code can be compiled with +// addresses of different bit depths, we cannot assume that the 64-bit BPF interpreter's +// pointer sizes can be mapped to physical pointer sizes. In particular, if you need a +// slice-of-slices in the virtual space, the inner slices will be different sizes in a +// 32-bit app build than in the 64-bit virtual space. Therefore instead of a slice-of-slices, +// you should implement a slice-of-VmSlices, which can then use VmSlice::translate() to +// map to the physical address. +// This class must consist only of 16 bytes: a u64 ptr and a u64 len, to match the 64-bit +// implementation of a slice in Rust. The PhantomData entry takes up 0 bytes. + +use std::marker::PhantomData; + +#[repr(C)] +pub struct VmSlice { + ptr: u64, + len: u64, + resource_type: PhantomData, +} + +impl VmSlice { + pub fn new(ptr: u64, len: u64) -> Self { + VmSlice { + ptr, + len, + resource_type: PhantomData, + } + } + + pub fn ptr(&self) -> u64 { + self.ptr + } + + pub fn len(&self) -> u64 { + self.len + } + + pub fn is_empty(&self) -> bool { + self.len == 0 + } +} diff --git a/transaction-dos/Cargo.toml b/transaction-dos/Cargo.toml index dae57acdd5fcdb..20b69ae8addc65 100644 --- a/transaction-dos/Cargo.toml +++ b/transaction-dos/Cargo.toml @@ -11,6 +11,9 @@ edition = { workspace = true } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] +[features] +dev-context-only-utils = [] + [dependencies] bincode = { workspace = true } clap = { workspace = true } @@ -43,6 +46,6 @@ solana-version = { workspace = true } [dev-dependencies] solana-core = { workspace = true, features = ["dev-context-only-utils"] } solana-hash = { workspace = true } -solana-local-cluster = { workspace = true } +solana-local-cluster = { workspace = true, features = ["dev-context-only-utils"] } solana-poh-config = { workspace = true } solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } diff --git a/transaction-dos/src/main.rs b/transaction-dos/src/main.rs index d5d2655ad91107..e7405de187608d 100644 --- a/transaction-dos/src/main.rs +++ b/transaction-dos/src/main.rs @@ -43,7 +43,7 @@ pub fn airdrop_lamports( desired_balance: u64, ) -> bool { let starting_balance = client.get_balance(&id.pubkey()).unwrap_or(0); - info!("starting balance {}", starting_balance); + info!("starting balance {starting_balance}"); if starting_balance < desired_balance { let airdrop_amount = desired_balance - starting_balance; @@ -67,14 +67,16 @@ pub fn airdrop_lamports( } if tries >= 5 { panic!( - "Error requesting airdrop: to addr: {faucet_addr:?} amount: {airdrop_amount} {result:?}" + "Error requesting airdrop: to addr: {faucet_addr:?} amount: \ + {airdrop_amount} {result:?}" ) } } } Err(err) => { panic!( - "Error requesting airdrop: {err:?} to addr: {faucet_addr:?} amount: {airdrop_amount}" + "Error requesting airdrop: {err:?} to addr: {faucet_addr:?} amount: \ + {airdrop_amount}" ); } }; @@ -82,7 +84,7 @@ pub fn airdrop_lamports( let current_balance = client.get_balance(&id.pubkey()).unwrap_or_else(|e| { panic!("airdrop error {e}"); }); - info!("current balance {}...", current_balance); + info!("current balance {current_balance}..."); if current_balance - starting_balance != airdrop_amount { info!( @@ -159,7 +161,7 @@ fn run_transactions_dos( CommitmentConfig::confirmed(), )); - info!("Targeting {}", entrypoint_addr); + info!("Targeting {entrypoint_addr}"); let space = maybe_space.unwrap_or(1000); @@ -266,7 +268,7 @@ fn run_transactions_dos( .collect(); let mut last_balance = Instant::now(); - info!("Starting balance(s): {:?}", balances); + info!("Starting balance(s): {balances:?}"); let executor = TransactionExecutor::new(entrypoint_addr); @@ -297,10 +299,7 @@ fn run_transactions_dos( } last_balance = Instant::now(); if *balance < lamports * 2 { - info!( - "Balance {} is less than needed: {}, doing aidrop...", - balance, lamports - ); + info!("Balance {balance} is less than needed: {lamports}, doing aidrop..."); if !airdrop_lamports( &client, &faucet_addr, @@ -375,7 +374,7 @@ fn run_transactions_dos( accounts_created = true; } else { // Create dos transactions - info!("creating new batch of size: {}", batch_size); + info!("creating new batch of size: {batch_size}"); let chunk_size = batch_size / payer_keypairs.len(); for (i, keypair) in payer_keypairs.iter().enumerate() { let txs: Vec<_> = (0..chunk_size) @@ -412,8 +411,8 @@ fn run_transactions_dos( count += 1; if last_log.elapsed().as_secs() > 3 { info!( - "total_dos_messages_sent: {} tx_sent_count: {} loop_count: {} balance(s): {:?}", - total_dos_messages_sent, tx_sent_count, count, balances + "total_dos_messages_sent: {total_dos_messages_sent} tx_sent_count: \ + {tx_sent_count} loop_count: {count} balance(s): {balances:?}" ); last_log = Instant::now(); } @@ -474,14 +473,17 @@ fn main() { .takes_value(true) .multiple(true) .value_name("FILE") - .help("One or more keypairs to create accounts owned by the program and which the program will write to."), + .help( + "One or more keypairs to create accounts owned by the program and which the \ + program will write to.", + ), ) .arg( Arg::with_name("account_groups") - .long("account_groups") - .takes_value(true) - .value_name("NUM") - .help("Number of groups of accounts to split the accounts into") + .long("account_groups") + .takes_value(true) + .value_name("NUM") + .help("Number of groups of accounts to split the accounts into"), ) .arg( Arg::with_name("batch_size") @@ -516,7 +518,10 @@ fn main() { .long("batch-sleep-ms") .takes_value(true) .value_name("NUM") - .help("Sleep for this long the num outstanding transactions is greater than the batch size."), + .help( + "Sleep for this long the num outstanding transactions is greater than the \ + batch size.", + ), ) .arg( Arg::with_name("check_gossip") @@ -561,7 +566,7 @@ fn main() { Some( solana_net_utils::get_cluster_shred_version(&entrypoint_addr).unwrap_or_else( |err| { - eprintln!("Failed to get shred version: {}", err); + eprintln!("Failed to get shred version: {err}"); exit(1); }, ), @@ -615,7 +620,7 @@ fn main() { let account_keypair_refs: Vec<&Keypair> = account_keypairs.iter().collect(); let rpc_addr = if !skip_gossip { - info!("Finding cluster entry: {:?}", entrypoint_addr); + info!("Finding cluster entry: {entrypoint_addr:?}"); let (gossip_nodes, _validators) = discover( None, // keypair Some(&entrypoint_addr), @@ -635,7 +640,7 @@ fn main() { info!("done found {} nodes", gossip_nodes.len()); gossip_nodes[0].rpc().unwrap() } else { - info!("Using {:?} as the RPC address", entrypoint_addr); + info!("Using {entrypoint_addr:?} as the RPC address"); entrypoint_addr }; @@ -694,7 +699,7 @@ pub mod test { let blockhash = solana_hash::Hash::default(); let tx = Transaction::new(&signers, message, blockhash); let size = bincode::serialized_size(&tx).unwrap(); - info!("size:{}", size); + info!("size:{size}"); assert!(size < PACKET_DATA_SIZE as u64); } @@ -758,6 +763,6 @@ pub mod test { 100, ); start.stop(); - info!("{}", start); + info!("{start}"); } } diff --git a/transaction-status/src/parse_vote.rs b/transaction-status/src/parse_vote.rs index d82d0c27fba81c..c543435d0cc8ff 100644 --- a/transaction-status/src/parse_vote.rs +++ b/transaction-status/src/parse_vote.rs @@ -290,7 +290,7 @@ mod test { solana_sdk_ids::sysvar, solana_vote_interface::{ instruction as vote_instruction, - state::{TowerSync, Vote, VoteAuthorize, VoteInit, VoteStateUpdate, VoteStateVersions}, + state::{TowerSync, Vote, VoteAuthorize, VoteInit, VoteStateUpdate, VoteStateV3}, }, }; @@ -316,7 +316,7 @@ mod test { &vote_init, lamports, vote_instruction::CreateVoteAccountConfig { - space: VoteStateVersions::vote_state_size_of(true) as u64, + space: VoteStateV3::size_of() as u64, ..vote_instruction::CreateVoteAccountConfig::default() }, ); diff --git a/transaction-view/benches/bytes.rs b/transaction-view/benches/bytes.rs index d60cfdc8141530..dad920ba452af6 100644 --- a/transaction-view/benches/bytes.rs +++ b/transaction-view/benches/bytes.rs @@ -55,12 +55,8 @@ fn bench_u16_parsing(c: &mut Criterion) { fn decode_shortu16_len_iter(values_serialized_lengths_and_buffers: &[(u16, usize, Vec)]) { for (value, serialized_len, buffer) in values_serialized_lengths_and_buffers.iter() { let (read_value, bytes_read) = decode_shortu16_len(black_box(buffer)).unwrap(); - assert_eq!(read_value, *value as usize, "Value mismatch for: {}", value); - assert_eq!( - bytes_read, *serialized_len, - "Offset mismatch for: {}", - value - ); + assert_eq!(read_value, *value as usize, "Value mismatch for: {value}"); + assert_eq!(bytes_read, *serialized_len, "Offset mismatch for: {value}"); } } @@ -68,8 +64,8 @@ fn read_compressed_u16_iter(values_serialized_lengths_and_buffers: &[(u16, usize for (value, serialized_len, buffer) in values_serialized_lengths_and_buffers.iter() { let mut offset = 0; let read_value = read_compressed_u16(black_box(buffer), &mut offset).unwrap(); - assert_eq!(read_value, *value, "Value mismatch for: {}", value); - assert_eq!(offset, *serialized_len, "Offset mismatch for: {}", value); + assert_eq!(read_value, *value, "Value mismatch for: {value}"); + assert_eq!(offset, *serialized_len, "Offset mismatch for: {value}"); } } @@ -79,8 +75,8 @@ fn optimized_read_compressed_u16_iter( for (value, serialized_len, buffer) in values_serialized_lengths_and_buffers.iter() { let mut offset = 0; let read_value = optimized_read_compressed_u16(black_box(buffer), &mut offset).unwrap(); - assert_eq!(read_value, *value, "Value mismatch for: {}", value); - assert_eq!(offset, *serialized_len, "Offset mismatch for: {}", value); + assert_eq!(read_value, *value, "Value mismatch for: {value}"); + assert_eq!(offset, *serialized_len, "Offset mismatch for: {value}"); } } diff --git a/transaction-view/src/bytes.rs b/transaction-view/src/bytes.rs index 4317a363392746..86a0ea8eddbb0e 100644 --- a/transaction-view/src/bytes.rs +++ b/transaction-view/src/bytes.rs @@ -301,13 +301,12 @@ mod tests { let read_value = read_compressed_u16(&buffer, &mut offset); // Assert that the read value matches the original value - assert_eq!(read_value, Ok(value), "Value mismatch for: {}", value); + assert_eq!(read_value, Ok(value), "Value mismatch for: {value}"); // Assert that the offset matches the serialized length assert_eq!( offset, serialized_len as usize, - "Offset mismatch for: {}", - value + "Offset mismatch for: {value}" ); } @@ -354,13 +353,12 @@ mod tests { let read_value = optimized_read_compressed_u16(&buffer, &mut offset); // Assert that the read value matches the original value - assert_eq!(read_value, Ok(value), "Value mismatch for: {}", value); + assert_eq!(read_value, Ok(value), "Value mismatch for: {value}"); // Assert that the offset matches the serialized length assert_eq!( offset, serialized_len as usize, - "Offset mismatch for: {}", - value + "Offset mismatch for: {value}" ); } diff --git a/turbine/Cargo.toml b/turbine/Cargo.toml index ab830cf0758303..5496b2698c067f 100644 --- a/turbine/Cargo.toml +++ b/turbine/Cargo.toml @@ -1,14 +1,17 @@ [package] name = "solana-turbine" -description = "Blockchain, Rebuilt for Scale" documentation = "https://docs.rs/solana-turbine" version = { workspace = true } authors = { workspace = true } +description = { workspace = true } repository = { workspace = true } homepage = { workspace = true } license = { workspace = true } edition = { workspace = true } +[features] +agave-unstable-api = [] + [dependencies] agave-feature-set = { workspace = true } agave-xdp = { workspace = true } @@ -31,11 +34,12 @@ solana-entry = { workspace = true } solana-gossip = { workspace = true } solana-hash = { workspace = true } solana-keypair = { workspace = true } -solana-ledger = { workspace = true } +solana-ledger = { workspace = true, features = ["agave-unstable-api"] } solana-measure = { workspace = true } solana-metrics = { workspace = true } solana-native-token = { workspace = true } solana-net-utils = { workspace = true } +solana-nohash-hasher = { workspace = true } solana-perf = { workspace = true } solana-poh = { workspace = true } solana-pubkey = { workspace = true } @@ -62,12 +66,14 @@ caps = { workspace = true } assert_matches = { workspace = true } bencher = { workspace = true } bs58 = { workspace = true } +conditional-mod = { workspace = true } solana-genesis-config = { workspace = true } -solana-ledger = { workspace = true, features = ["dev-context-only-utils"] } +solana-ledger = { workspace = true, features = ["agave-unstable-api","dev-context-only-utils"] } solana-logger = { workspace = true } solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } solana-signature = { workspace = true, features = ["rand"] } solana-transaction = { workspace = true } +solana-turbine = { path = ".", features = ["agave-unstable-api"] } test-case = { workspace = true } [[bench]] diff --git a/turbine/benches/cluster_info.rs b/turbine/benches/cluster_info.rs index f5facdd8c12e19..ffd221aad0dc71 100644 --- a/turbine/benches/cluster_info.rs +++ b/turbine/benches/cluster_info.rs @@ -57,10 +57,10 @@ fn broadcast_shreds_bench(b: &mut Bencher) { let data_shreds = shredder.make_merkle_shreds_from_entries( &leader_keypair, &entries, - true, // is_last_in_slot - None, // chained_merkle_root - 0, // next_shred_index - 0, // next_code_index + true, // is_last_in_slot + Hash::default(), // chained_merkle_root + 0, // next_shred_index + 0, // next_code_index &ReedSolomonCache::default(), &mut ProcessShredsStats::default(), ); @@ -85,7 +85,7 @@ fn broadcast_shreds_bench(b: &mut Bencher) { let shreds = shreds.clone(); broadcast_shreds( socket, - shreds, + &shreds, &cluster_nodes_cache, &last_datapoint, &mut TransmitShredsStats::default(), diff --git a/turbine/benches/cluster_nodes.rs b/turbine/benches/cluster_nodes.rs index a34fd227180bb7..8cd65c90bb5935 100644 --- a/turbine/benches/cluster_nodes.rs +++ b/turbine/benches/cluster_nodes.rs @@ -32,7 +32,6 @@ fn get_retransmit_peers_deterministic( slot_leader: &Pubkey, ) { let keypair = Keypair::new(); - let merkle_root = Some(Hash::default()); let reed_solomon_cache = ReedSolomonCache::default(); let mut stats = ProcessShredsStats::default(); let parent_slot = if slot > 0 { slot - 1 } else { 0 }; @@ -40,11 +39,11 @@ fn get_retransmit_peers_deterministic( let shreds = shredder.make_merkle_shreds_from_entries( &keypair, - &[], // entries - true, // is_last_in_slot - merkle_root, - 0, // next_shred_index - 0, // next_code_index + &[], // entries + true, // is_last_in_slot + Hash::default(), // chained_merkle_root + 0, // next_shred_index + 0, // next_code_index &reed_solomon_cache, &mut stats, ); diff --git a/turbine/src/addr_cache.rs b/turbine/src/addr_cache.rs index 15a4d2c6bbdd17..b1033f5aae3f0d 100644 --- a/turbine/src/addr_cache.rs +++ b/turbine/src/addr_cache.rs @@ -2,9 +2,8 @@ use { crate::retransmit_stage::RetransmitSlotStats, itertools::Itertools, solana_clock::Slot, - solana_ledger::{ - blockstore::MAX_DATA_SHREDS_PER_SLOT, - shred::{shred_code::MAX_CODE_SHREDS_PER_SLOT, ShredId, ShredType}, + solana_ledger::shred::{ + ShredId, ShredType, MAX_CODE_SHREDS_PER_SLOT, MAX_DATA_SHREDS_PER_SLOT, }, std::{ cmp::Reverse, diff --git a/turbine/src/broadcast_stage.rs b/turbine/src/broadcast_stage.rs index a7dd75ca3aa23c..48567c69830b17 100644 --- a/turbine/src/broadcast_stage.rs +++ b/turbine/src/broadcast_stage.rs @@ -10,7 +10,7 @@ use { }, crate::{ cluster_nodes::{self, ClusterNodes, ClusterNodesCache}, - xdp::{XdpSender, XdpShredPayload}, + xdp::XdpSender, }, bytes::Bytes, crossbeam_channel::{unbounded, Receiver, RecvError, RecvTimeoutError, Sender}, @@ -21,10 +21,7 @@ use { contact_info::Protocol, }, solana_keypair::Keypair, - solana_ledger::{ - blockstore::Blockstore, - shred::{self, Shred}, - }, + solana_ledger::{blockstore::Blockstore, shred::Shred}, solana_measure::measure::Measure, solana_metrics::{inc_new_counter_error, inc_new_counter_info}, solana_poh::poh_recorder::WorkingBankEntry, @@ -320,7 +317,34 @@ impl BroadcastStage { .unwrap() }; let mut thread_hdls = vec![thread_hdl]; - thread_hdls.extend(socks.into_iter().map(|sock| { + let num_broadcast_sockets_per_interface = socks.len() / cluster_info.bind_ip_addrs().len(); + let num_interfaces: usize = cluster_info.bind_ip_addrs().len(); + + // Partition by interface + // With 2 interfaces and the default of 4 sockets per interface, `sockets_by_interface` is: + // sockets_by_interface = [[s0, s1, s2, s3], [s4, s5, s6, s7]] + let mut it = socks.into_iter(); + let sockets_by_interface: Vec> = (0..num_interfaces) + .map(|_| { + it.by_ref() + .take(num_broadcast_sockets_per_interface) + .collect() + }) + .collect(); + + let mut iters: Vec<_> = sockets_by_interface + .into_iter() + .map(|sockets| sockets.into_iter()) + .collect(); + + // Spawn `num_broadcast_sockets_per_interface` threads + // Each thread gets a socket from each interface (i.e. 2 sockets per thread if multihomed w/ 2 interfaces) + thread_hdls.extend((0..num_broadcast_sockets_per_interface).map(|_| { + let mut group = Vec::with_capacity(num_interfaces); + for it in &mut iters { + group.push(it.next().expect("aligned lengths")); + } + let socket_receiver = socket_receiver.clone(); let mut bs_transmit = broadcast_stage_run.clone(); let cluster_info = cluster_info.clone(); @@ -328,19 +352,22 @@ impl BroadcastStage { let quic_endpoint_sender = quic_endpoint_sender.clone(); let xdp_sender = xdp_sender.clone(); let run_transmit = move || loop { - let sock = match xdp_sender.as_ref() { - Some(xdp_sender) => BroadcastSocket::Xdp(xdp_sender), - None => BroadcastSocket::Udp(&sock), + let sock_variant = match xdp_sender.as_ref() { + Some(xdp) => BroadcastSocket::Xdp(xdp), + None => { + let active_index = cluster_info.bind_ip_addrs().active_index(); + let active_socket = &group[active_index]; + BroadcastSocket::Udp(active_socket) + } }; let res = bs_transmit.transmit( &socket_receiver, &cluster_info, - sock, + sock_variant, &bank_forks, &quic_endpoint_sender, ); - let res = Self::handle_error(res, "solana-broadcaster-transmit"); - if let Some(res) = res { + if let Some(res) = Self::handle_error(res, "solana-broadcaster-transmit") { return res; } }; @@ -454,8 +481,8 @@ pub enum BroadcastSocket<'a> { /// Broadcasts shreds from the leader (i.e. this node) to the root of the /// turbine retransmit tree for each shred. pub fn broadcast_shreds( - s: BroadcastSocket, - shreds: Arc>, + socket: BroadcastSocket, + shreds: &[Shred], cluster_nodes_cache: &ClusterNodesCache, last_datapoint_submit: &AtomicInterval, transmit_stats: &mut TransmitShredsStats, @@ -471,28 +498,15 @@ pub fn broadcast_shreds( let bank_forks = bank_forks.read().unwrap(); (bank_forks.root_bank(), bank_forks.working_bank()) }; - // Implementation note: - // We are gathering the indexes of the shreds in the `shreds` vector rather than the shred - // payloads themselves. This is because, in the XDP case, the shred payloads will be sent to - // the XDP thread(s) via a channel, and the lifetime of the shred payloads must extend to that - // of the XDP thread(s). - // - // Because the `shreds` vector is behind a shared (`Arc`) reference, we must pass that reference - // along to the XDP thread(s) via the Xdp channel message payload. This allows us to extend the - // lifetime of the shred payloads to the XDP thread(s) without cloning each payload. - // - // When `shred::Payload` is refactored to use `Bytes`, this can be adjusted to simply pass the payload - // `Bytes` directly to the XDP thread(s). let (packets, quic_packets): (Vec<_>, Vec<_>) = shreds .iter() - .enumerate() - .group_by(|(_, shred)| shred.slot()) + .group_by(|shred| shred.slot()) .into_iter() .flat_map(|(slot, shreds)| { let cluster_nodes = cluster_nodes_cache.get(slot, &root_bank, &working_bank, cluster_info); update_peer_stats(&cluster_nodes, last_datapoint_submit); - shreds.filter_map(move |(idx, shred)| { + shreds.filter_map(move |shred| { let key = shred.id(); let protocol = cluster_nodes::get_broadcast_protocol(&key); cluster_nodes @@ -503,7 +517,7 @@ pub fn broadcast_shreds( (match protocol { Protocol::QUIC => Either::Right, Protocol::UDP => Either::Left, - })((idx, addr)) + })((shred.payload(), addr)) }) }) }) @@ -511,15 +525,10 @@ pub fn broadcast_shreds( shred_select.stop(); transmit_stats.shred_select += shred_select.as_us(); let num_udp_packets = packets.len(); - match s { + match socket { BroadcastSocket::Udp(s) => { let mut send_mmsg_time = Measure::start("send_mmsg"); - match batch_send( - s, - packets - .iter() - .map(|(idx, addr)| (shreds[*idx].payload(), *addr)), - ) { + match batch_send(s, packets) { Ok(()) => (), Err(SendPktsError::IoError(ioerr, num_failed)) => { transmit_stats.dropped_packets_udp += num_failed; @@ -531,15 +540,8 @@ pub fn broadcast_shreds( } BroadcastSocket::Xdp(s) => { let mut send_xdp_time = Measure::start("send_xdp"); - for (idx, addr) in packets.into_iter() { - if let Err(e) = s.try_send( - idx, - vec![addr], - XdpShredPayload::Shared { - ptr: Arc::clone(&shreds), - index: idx, - }, - ) { + for (idx, (payload, addr)) in packets.into_iter().enumerate() { + if let Err(e) = s.try_send(idx, addr, payload.clone()) { log::warn!("xdp channel full: {e:?}"); transmit_stats.dropped_packets_xdp += 1; result = Err(Error::XdpChannelFull); @@ -552,10 +554,8 @@ pub fn broadcast_shreds( let mut quic_send_time = Measure::start("send shreds via quic"); transmit_stats.total_packets += num_udp_packets + quic_packets.len(); - for (idx, addr) in quic_packets { - let shred = shreds[idx].payload(); - let shred = Bytes::from(shred::Payload::unwrap_or_clone(shred.clone())); - if let Err(err) = quic_endpoint_sender.blocking_send((addr, shred)) { + for (payload, addr) in quic_packets { + if let Err(err) = quic_endpoint_sender.blocking_send((addr, payload.bytes.clone())) { transmit_stats.dropped_packets_quic += 1; result = Err(Error::from(err)); } @@ -624,7 +624,7 @@ pub mod test { &entries, true, // is_last_in_slot // chained_merkle_root - Some(Hash::new_from_array(rand::thread_rng().gen())), + Hash::new_from_array(rand::thread_rng().gen()), 0, // next_shred_index, 0, // next_code_index &ReedSolomonCache::default(), diff --git a/turbine/src/broadcast_stage/broadcast_duplicates_run.rs b/turbine/src/broadcast_stage/broadcast_duplicates_run.rs index 664123e203c045..6d9592502eed56 100644 --- a/turbine/src/broadcast_stage/broadcast_duplicates_run.rs +++ b/turbine/src/broadcast_stage/broadcast_duplicates_run.rs @@ -185,7 +185,7 @@ impl BroadcastRun for BroadcastDuplicatesRun { keypair, &receive_results.entries, last_tick_height == bank.max_tick_height() && last_entries.is_none(), - Some(self.chained_merkle_root), + self.chained_merkle_root, self.next_shred_index, self.next_code_index, &self.reed_solomon_cache, @@ -204,7 +204,7 @@ impl BroadcastRun for BroadcastDuplicatesRun { keypair, &[original_last_entry], true, - Some(self.chained_merkle_root), + self.chained_merkle_root, self.next_shred_index, self.next_code_index, &self.reed_solomon_cache, @@ -217,7 +217,7 @@ impl BroadcastRun for BroadcastDuplicatesRun { keypair, &duplicate_extra_last_entries, true, - Some(self.chained_merkle_root), + self.chained_merkle_root, self.next_shred_index, self.next_code_index, &self.reed_solomon_cache, diff --git a/turbine/src/broadcast_stage/broadcast_fake_shreds_run.rs b/turbine/src/broadcast_stage/broadcast_fake_shreds_run.rs index e1389dd4bb7cf0..6c70857701e263 100644 --- a/turbine/src/broadcast_stage/broadcast_fake_shreds_run.rs +++ b/turbine/src/broadcast_stage/broadcast_fake_shreds_run.rs @@ -83,7 +83,7 @@ impl BroadcastRun for BroadcastFakeShredsRun { keypair, &receive_results.entries, last_tick_height == bank.max_tick_height(), - Some(chained_merkle_root), + chained_merkle_root, next_shred_index, self.next_code_index, &self.reed_solomon_cache, @@ -104,7 +104,7 @@ impl BroadcastRun for BroadcastFakeShredsRun { keypair, &fake_entries, last_tick_height == bank.max_tick_height(), - Some(chained_merkle_root), + chained_merkle_root, next_shred_index, self.next_code_index, &self.reed_solomon_cache, diff --git a/turbine/src/broadcast_stage/fail_entry_verification_broadcast_run.rs b/turbine/src/broadcast_stage/fail_entry_verification_broadcast_run.rs index edab98f6f29135..37e94fb3ad21dd 100644 --- a/turbine/src/broadcast_stage/fail_entry_verification_broadcast_run.rs +++ b/turbine/src/broadcast_stage/fail_entry_verification_broadcast_run.rs @@ -105,7 +105,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun { keypair, &receive_results.entries, last_tick_height == bank.max_tick_height() && last_entries.is_none(), - Some(self.chained_merkle_root), + self.chained_merkle_root, self.next_shred_index, self.next_code_index, &self.reed_solomon_cache, @@ -124,7 +124,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun { keypair, &[good_last_entry], true, - Some(self.chained_merkle_root), + self.chained_merkle_root, self.next_shred_index, self.next_code_index, &self.reed_solomon_cache, @@ -137,7 +137,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun { keypair, &[bad_last_entry], false, - Some(self.chained_merkle_root), + self.chained_merkle_root, self.next_shred_index, self.next_code_index, &self.reed_solomon_cache, @@ -186,7 +186,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun { let (shreds, _) = receiver.recv()?; broadcast_shreds( sock, - shreds, + &shreds, &self.cluster_nodes_cache, &AtomicInterval::default(), &mut TransmitShredsStats::default(), diff --git a/turbine/src/broadcast_stage/standard_broadcast_run.rs b/turbine/src/broadcast_stage/standard_broadcast_run.rs index b6182a8eab30ab..2d11108a1682db 100644 --- a/turbine/src/broadcast_stage/standard_broadcast_run.rs +++ b/turbine/src/broadcast_stage/standard_broadcast_run.rs @@ -9,9 +9,9 @@ use { solana_entry::entry::Entry, solana_hash::Hash, solana_keypair::Keypair, - solana_ledger::{ - blockstore, - shred::{shred_code, ProcessShredsStats, ReedSolomonCache, Shred, ShredType, Shredder}, + solana_ledger::shred::{ + ProcessShredsStats, ReedSolomonCache, Shred, ShredType, Shredder, MAX_CODE_SHREDS_PER_SLOT, + MAX_DATA_SHREDS_PER_SLOT, }, solana_time_utils::AtomicInterval, std::{borrow::Cow, sync::RwLock}, @@ -92,7 +92,7 @@ impl StandardBroadcastRun { keypair, &[], // entries true, // is_last_in_slot, - Some(self.chained_merkle_root), + self.chained_merkle_root, self.next_shred_index, self.next_code_index, &self.reed_solomon_cache, @@ -126,7 +126,7 @@ impl StandardBroadcastRun { keypair, entries, is_slot_end, - Some(self.chained_merkle_root), + self.chained_merkle_root, self.next_shred_index, self.next_code_index, &self.reed_solomon_cache, @@ -274,8 +274,8 @@ impl StandardBroadcastRun { reference_tick as u8, is_last_in_slot, process_stats, - blockstore::MAX_DATA_SHREDS_PER_SLOT as u32, - shred_code::MAX_CODE_SHREDS_PER_SLOT as u32, + MAX_DATA_SHREDS_PER_SLOT as u32, + MAX_CODE_SHREDS_PER_SLOT as u32, ) .unwrap(); // Insert the first data shred synchronously so that blockstore stores @@ -392,7 +392,7 @@ impl StandardBroadcastRun { broadcast_shreds( sock, - shreds, + &shreds, &self.cluster_nodes_cache, &self.last_datapoint_submit, &mut transmit_stats, diff --git a/turbine/src/cluster_nodes.rs b/turbine/src/cluster_nodes.rs index 4a58da78339b38..787c17da7a0a1d 100644 --- a/turbine/src/cluster_nodes.rs +++ b/turbine/src/cluster_nodes.rs @@ -47,9 +47,6 @@ thread_local! { const DATA_PLANE_FANOUT: usize = 200; pub(crate) const MAX_NUM_TURBINE_HOPS: usize = 4; -// Limit number of nodes per IP address. -const MAX_NUM_NODES_PER_IP_ADDRESS: usize = 10; - #[derive(Debug, Error)] pub enum Error { #[error("Loopback from slot leader: {leader}, shred: {shred:?}")] @@ -398,10 +395,13 @@ fn cmp_nodes_stake(a: &Node, b: &Node) -> Ordering { }) } -// Dedups socket addresses so that if there are 2 nodes in the cluster with the -// same TVU socket-addr, we only send shreds to one of them. -// Additionally limits number of nodes at the same IP address to -// MAX_NUM_NODES_PER_IP_ADDRESS. +/// If set > 1 it allows the nodes to run behind a NAT. +/// This usecase is currently not supported. +const MAX_NUM_NODES_PER_IP_ADDRESS: usize = 1; + +/// Dedups socket addresses so that if there are 2 nodes in the cluster with the +/// same TVU socket-addr, we only send shreds to one of them. +/// Additionally limits number of nodes at the same IP address to 1 fn dedup_tvu_addrs(nodes: &mut Vec) { const TVU_PROTOCOLS: [Protocol; 2] = [Protocol::UDP, Protocol::QUIC]; let capacity = nodes.len().saturating_mul(2); @@ -916,7 +916,7 @@ mod tests { 5, // root 34, 52, 8, // 1st layer // 2nd layar - 44, 18, 2, // 1st neigborhood + 44, 18, 2, // 1st neighborhood 42, 47, 46, // 2nd 11, 26, 28, // 3rd // 3rd layer diff --git a/turbine/src/lib.rs b/turbine/src/lib.rs index fb7cf2c4d96558..50c68d15e98dd3 100644 --- a/turbine/src/lib.rs +++ b/turbine/src/lib.rs @@ -1,16 +1,31 @@ #![allow(clippy::arithmetic_side_effects)] +#[cfg(feature = "agave-unstable-api")] mod addr_cache; + +#[cfg(feature = "agave-unstable-api")] pub mod broadcast_stage; + +#[cfg(feature = "agave-unstable-api")] pub mod cluster_nodes; + +#[cfg(feature = "agave-unstable-api")] pub mod quic_endpoint; + +#[cfg(feature = "agave-unstable-api")] pub mod retransmit_stage; + +#[cfg(feature = "agave-unstable-api")] pub mod sigverify_shreds; + +#[cfg(feature = "agave-unstable-api")] pub mod xdp; +#[cfg(feature = "agave-unstable-api")] #[macro_use] extern crate log; +#[cfg(feature = "agave-unstable-api")] #[macro_use] extern crate solana_metrics; diff --git a/turbine/src/retransmit_stage.rs b/turbine/src/retransmit_stage.rs index 2def9be8dbec18..0f56b0cb6d4f80 100644 --- a/turbine/src/retransmit_stage.rs +++ b/turbine/src/retransmit_stage.rs @@ -4,7 +4,7 @@ use { crate::{ addr_cache::AddrCache, cluster_nodes::{self, ClusterNodes, ClusterNodesCache, Error, MAX_NUM_TURBINE_HOPS}, - xdp::{XdpSender, XdpShredPayload}, + xdp::XdpSender, }, bytes::Bytes, crossbeam_channel::{Receiver, RecvError, TryRecvError}, @@ -163,7 +163,7 @@ impl RetransmitStats { i64 ), ); - // slot_stats are submited at a different cadence. + // slot_stats are submitted at a different cadence. let old = std::mem::replace(self, Self::new(Instant::now())); self.slot_stats = old.slot_stats; } @@ -217,6 +217,58 @@ impl ShredDeduper { enum RetransmitSocket<'a> { Socket(&'a UdpSocket), Xdp(&'a XdpSender), + Multihomed { + sockets: &'a [UdpSocket], + interface_offset: usize, + sockets_per_interface: usize, + thread_index: usize, + }, +} + +impl<'a> RetransmitSocket<'a> { + pub fn new( + thread_index: usize, + retransmit_sockets: &'a [UdpSocket], + xdp_sender: Option<&'a XdpSender>, + cluster_info: &'a ClusterInfo, + ) -> Self { + if let Some(xdp_sender) = xdp_sender { + RetransmitSocket::Xdp(xdp_sender) + } else if cluster_info.bind_ip_addrs().multihoming_enabled() { + let sockets_per_interface = + retransmit_sockets.len() / cluster_info.bind_ip_addrs().len(); + let active_index = cluster_info.bind_ip_addrs().active_index(); + let interface_offset = sockets_per_interface.saturating_mul(active_index); + + RetransmitSocket::Multihomed { + sockets: retransmit_sockets, + interface_offset, + sockets_per_interface, + thread_index, + } + } else { + let socket: &UdpSocket = &retransmit_sockets[thread_index % retransmit_sockets.len()]; + RetransmitSocket::Socket(socket) + } + } + + pub fn get_socket(&self) -> &'a UdpSocket { + match self { + RetransmitSocket::Socket(socket) => socket, + RetransmitSocket::Multihomed { + sockets, + interface_offset, + sockets_per_interface, + thread_index, + } => { + let socket_index = interface_offset + (thread_index % sockets_per_interface); + &sockets[socket_index] + } + RetransmitSocket::Xdp(_) => { + unreachable!("get_socket() should not be called for XDP variants") + } + } + } } /// The number of shreds to pull from the retransmit_receiver at a time. @@ -342,12 +394,8 @@ fn retransmit( ) }; - let retransmit_socket = |index| { - let socket = xdp_sender.map(RetransmitSocket::Xdp).unwrap_or_else(|| { - RetransmitSocket::Socket(&retransmit_sockets[index % retransmit_sockets.len()]) - }); - socket - }; + let retransmit_socket = + |index: usize| RetransmitSocket::new(index, retransmit_sockets, xdp_sender, cluster_info); let slot_stats = if num_shreds < PAR_ITER_MIN_NUM_SHREDS { stats.num_small_batches += 1; @@ -420,7 +468,7 @@ fn retransmit_shred( let num_addrs = addrs.len(); let num_nodes = match cluster_nodes::get_broadcast_protocol(&key) { Protocol::QUIC => { - let shred = Bytes::from(shred::Payload::unwrap_or_clone(shred)); + let shred = shred.bytes; addrs .iter() .filter_map(|&addr| quic_endpoint_sender.try_send((addr, shred.clone())).ok()) @@ -430,11 +478,7 @@ fn retransmit_shred( RetransmitSocket::Xdp(sender) => { let mut sent = num_addrs; if num_addrs > 0 { - if let Err(e) = sender.try_send( - key.index() as usize, - addrs.to_vec(), - XdpShredPayload::Owned(shred), - ) { + if let Err(e) = sender.try_send(key.index() as usize, addrs.to_vec(), shred) { log::warn!("xdp channel full: {e:?}"); stats .num_shreds_dropped_xdp_full @@ -444,16 +488,19 @@ fn retransmit_shred( } sent } - RetransmitSocket::Socket(socket) => match multi_target_send(socket, shred, &addrs) { - Ok(()) => num_addrs, - Err(SendPktsError::IoError(ioerr, num_failed)) => { - error!( - "retransmit_to multi_target_send error: {ioerr:?}, \ - {num_failed}/{num_addrs} packets failed" - ); - num_addrs - num_failed + RetransmitSocket::Socket(_) | RetransmitSocket::Multihomed { .. } => { + let socket = socket.get_socket(); + match multi_target_send(socket, shred, &addrs) { + Ok(()) => num_addrs, + Err(SendPktsError::IoError(ioerr, num_failed)) => { + error!( + "retransmit_to multi_target_send error: {ioerr:?}, \ + {num_failed}/{num_addrs} packets failed" + ); + num_addrs - num_failed + } } - }, + } }, }; retransmit_time.stop(); @@ -853,7 +900,7 @@ mod tests { bs58::decode(KEYPAIR) .into_vec() .as_deref() - .map(Keypair::from_bytes) + .map(Keypair::try_from) .unwrap() .unwrap() } @@ -870,7 +917,7 @@ mod tests { &entries, true, // chained_merkle_root - Some(Hash::new_from_array(rand::thread_rng().gen())), + Hash::new_from_array(rand::thread_rng().gen()), 0, code_index, &rsc, @@ -906,7 +953,7 @@ mod tests { // first shred passed through assert!( !shred_deduper.dedup(shred_dup.id(), shred_dup.payload(), MAX_DUPLICATE_COUNT), - "First time seeing shred X with differnt parent slot (3 instead of 4) => Not dup \ + "First time seeing shred X with different parent slot (3 instead of 4) => Not dup \ because common header is unique & shred ID only seen once" ); // then blocked diff --git a/turbine/src/sigverify_shreds.rs b/turbine/src/sigverify_shreds.rs index 38d2fc6ffb5452..324c2c425091d7 100644 --- a/turbine/src/sigverify_shreds.rs +++ b/turbine/src/sigverify_shreds.rs @@ -17,7 +17,7 @@ use { layout::{get_shred, resign_packet}, wire::is_retransmitter_signed_variant, }, - sigverify_shreds::{verify_shreds_gpu, LruCache}, + sigverify_shreds::{verify_shreds_gpu, LruCache, SlotPubkeys}, }, solana_perf::{ self, @@ -30,7 +30,6 @@ use { solana_signer::Signer, solana_streamer::{evicting_sender::EvictingSender, streamer::ChannelSend}, std::{ - collections::HashMap, num::NonZeroUsize, sync::{ atomic::{AtomicUsize, Ordering}, @@ -254,7 +253,7 @@ fn run_shred_sigverify( } else { // Share the payload between the retransmit-stage and the // window-service. - Either::Left(shred::Payload::from(Arc::new(shred))) + Either::Left(shred::Payload::from(shred)) } }); // Repaired shreds are not retransmitted. @@ -400,9 +399,8 @@ fn verify_packets( packets: &mut [PacketBatch], cache: &RwLock, ) { - let leader_slots: HashMap = + let leader_slots: SlotPubkeys = get_slot_leaders(self_pubkey, packets, leader_schedule_cache, working_bank) - .into_iter() .filter_map(|(slot, pubkey)| Some((slot, pubkey?))) .chain(std::iter::once((Slot::MAX, Pubkey::default()))) .collect(); @@ -410,39 +408,32 @@ fn verify_packets( solana_perf::sigverify::mark_disabled(packets, &out); } -// Returns pubkey of leaders for shred slots refrenced in the packets. +// Returns pubkey of leaders for shred slots referenced in the packets. // Marks packets as discard if: // - fails to deserialize the shred slot. // - slot leader is unknown. // - slot leader is the node itself (circular transmission). -fn get_slot_leaders( - self_pubkey: &Pubkey, - batches: &mut [PacketBatch], - leader_schedule_cache: &LeaderScheduleCache, - bank: &Bank, -) -> HashMap> { - let mut leaders = HashMap::>::new(); +fn get_slot_leaders<'a>( + self_pubkey: &'a Pubkey, + batches: &'a mut [PacketBatch], + leader_schedule_cache: &'a LeaderScheduleCache, + bank: &'a Bank, +) -> impl Iterator)> + 'a { batches .iter_mut() .flat_map(|batch| batch.iter_mut()) .filter(|packet| !packet.meta().discard()) - .filter(|packet| { + .filter_map(move |mut packet| { let shred = shred::layout::get_shred(packet.as_ref()); - let Some(slot) = shred.and_then(shred::layout::get_slot) else { - return true; - }; - leaders - .entry(slot) - .or_insert_with(|| { - // Discard the shred if the slot leader is the node itself. - leader_schedule_cache - .slot_leader_at(slot, Some(bank)) - .filter(|leader| leader != self_pubkey) - }) - .is_none() + let slot = shred.and_then(shred::layout::get_slot)?; + let leader = leader_schedule_cache + .slot_leader_at(slot, Some(bank)) + .filter(|leader| leader != self_pubkey); + if leader.is_none() { + packet.meta_mut().set_discard(true); + } + Some((slot, leader)) }) - .for_each(|mut packet| packet.meta_mut().set_discard(true)); - leaders } fn count_discards(packets: &[PacketBatch]) -> usize { @@ -608,7 +599,7 @@ mod tests { &leader_keypair, &entries, true, - Some(Hash::new_unique()), + Hash::new_unique(), 0, 0, &ReedSolomonCache::default(), @@ -618,7 +609,7 @@ mod tests { &wrong_keypair, &entries, true, - Some(Hash::new_unique()), + Hash::new_unique(), 0, 0, &ReedSolomonCache::default(), @@ -671,8 +662,7 @@ mod tests { let bank_forks = bank_forks.read().unwrap(); (bank_forks.working_bank(), bank_forks.root_bank()) }; - - let chained_merkle_root = Some(Hash::new_from_array(rng.gen())); + let chained_merkle_root = Hash::new_from_array(rng.gen()); let shredder = Shredder::new(root_bank.slot(), root_bank.parent_slot(), 0, 0).unwrap(); let entries = vec![Entry::new(&Hash::default(), 0, vec![])]; diff --git a/turbine/src/xdp.rs b/turbine/src/xdp.rs index 496e007558ed3a..310961e5a0898c 100644 --- a/turbine/src/xdp.rs +++ b/turbine/src/xdp.rs @@ -8,12 +8,12 @@ use { tx_loop::tx_loop, }, crossbeam_channel::TryRecvError, - std::{thread::Builder, time::Duration}, + std::{sync::Arc, thread::Builder, time::Duration}, }; use { crossbeam_channel::{Sender, TrySendError}, solana_ledger::shred, - std::{error::Error, net::SocketAddr, sync::Arc, thread}, + std::{error::Error, net::SocketAddr, thread}, }; #[derive(Clone, Debug)] @@ -53,41 +53,38 @@ impl XdpConfig { } } -/// The shred payload variants of the Xdp channel. -/// -/// This is currently meant to capture the constraints of both the retransmit -/// and broadcast stages. -pub(crate) enum XdpShredPayload { - /// The shreds, and thus their payloads, are owned by the caller. - /// - /// For example, retransmit has its own [`Vec`] of [`shred::Shred`], and can simply - /// pass along the payloads to the XDP thread(s) via the Xdp channel. - Owned(shred::Payload), - /// The shreds, and thus their payloads, are shared between disparate components in the validator. - /// - /// For example, broadcast deals with an `Arc>` due to those shreds being - /// shared with the blockstore (see [`StandardBroadcastRun::process_receive_results`](crate::broadcast_stage::standard_broadcast_run::StandardBroadcastRun::process_receive_results)). - /// To avoid cloning the payloads, we pass along the `Arc` reference and the index of the shred - /// in the `Vec` to the XDP thread(s). - Shared { - ptr: Arc>, - index: usize, - }, +#[derive(Clone)] +pub struct XdpSender { + senders: Vec>, +} + +pub enum XdpAddrs { + Single(SocketAddr), + Multi(Vec), } -impl AsRef<[u8]> for XdpShredPayload { +impl From for XdpAddrs { #[inline] - fn as_ref(&self) -> &[u8] { - match self { - XdpShredPayload::Owned(payload) => payload.as_ref(), - XdpShredPayload::Shared { ptr, index } => ptr[*index].payload().as_ref(), - } + fn from(addr: SocketAddr) -> Self { + XdpAddrs::Single(addr) } } -#[derive(Clone)] -pub struct XdpSender { - senders: Vec, XdpShredPayload)>>, +impl From> for XdpAddrs { + #[inline] + fn from(addrs: Vec) -> Self { + XdpAddrs::Multi(addrs) + } +} + +impl AsRef<[SocketAddr]> for XdpAddrs { + #[inline] + fn as_ref(&self) -> &[SocketAddr] { + match self { + XdpAddrs::Single(addr) => std::slice::from_ref(addr), + XdpAddrs::Multi(addrs) => addrs, + } + } } impl XdpSender { @@ -95,10 +92,10 @@ impl XdpSender { pub(crate) fn try_send( &self, sender_index: usize, - addr: Vec, - payload: XdpShredPayload, - ) -> Result<(), TrySendError<(Vec, XdpShredPayload)>> { - self.senders[sender_index % self.senders.len()].try_send((addr, payload)) + addr: impl Into, + payload: shred::Payload, + ) -> Result<(), TrySendError<(XdpAddrs, shred::Payload)>> { + self.senders[sender_index % self.senders.len()].try_send((addr.into(), payload)) } } @@ -118,6 +115,7 @@ impl XdpRetransmitter { CapSet, Capability::{CAP_BPF, CAP_NET_ADMIN, CAP_NET_RAW}, }; + const DROP_CHANNEL_CAP: usize = 1_000_000; // switch to higher caps while we setup XDP. We assume that an error in // this function is irrecoverable so we don't try to drop on errors. @@ -151,7 +149,7 @@ impl XdpRetransmitter { let mut threads = vec![]; - let (drop_sender, drop_receiver) = crossbeam_channel::bounded(1_000_000); + let (drop_sender, drop_receiver) = crossbeam_channel::bounded(DROP_CHANNEL_CAP); threads.push( Builder::new() .name("solRetransmDrop".to_owned()) diff --git a/udp-client/src/lib.rs b/udp-client/src/lib.rs index 230b4ad78a261a..c0bb2b156057cc 100644 --- a/udp-client/src/lib.rs +++ b/udp-client/src/lib.rs @@ -16,11 +16,9 @@ use { connection_cache_stats::ConnectionCacheStats, }, solana_keypair::Keypair, - solana_net_utils::sockets::{ - bind_with_any_port_with_config, SocketConfiguration as SocketConfig, - }, + solana_net_utils::sockets::{self, SocketConfiguration}, std::{ - net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket}, + net::{SocketAddr, UdpSocket}, sync::Arc, }, }; @@ -65,13 +63,16 @@ pub struct UdpConfig { impl NewConnectionConfig for UdpConfig { fn new() -> Result { - let socket = bind_with_any_port_with_config( - IpAddr::V4(Ipv4Addr::UNSPECIFIED), - SocketConfig::default(), + // This will bind to random ports, but VALIDATOR_PORT_RANGE is outside + // of the range for CI tests when this is running in CI + let socket = sockets::bind_in_range_with_config( + std::net::IpAddr::V4(std::net::Ipv4Addr::LOCALHOST), + solana_net_utils::VALIDATOR_PORT_RANGE, + SocketConfiguration::default(), ) .map_err(Into::::into)?; Ok(Self { - udp_socket: Arc::new(socket), + udp_socket: Arc::new(socket.1), }) } } diff --git a/udp-client/src/nonblocking/udp_client.rs b/udp-client/src/nonblocking/udp_client.rs index 166cd79a50be5f..17ecf1b7f41f5d 100644 --- a/udp-client/src/nonblocking/udp_client.rs +++ b/udp-client/src/nonblocking/udp_client.rs @@ -47,7 +47,8 @@ mod tests { use { super::*, solana_net_utils::sockets::{ - bind_to_async, bind_with_any_port_with_config, SocketConfiguration as SocketConfig, + bind_to_async, bind_to_with_config, unique_port_range_for_tests, + SocketConfiguration as SocketConfig, }, solana_packet::{Packet, PACKET_DATA_SIZE}, solana_streamer::nonblocking::recvmmsg::recv_mmsg, @@ -73,15 +74,20 @@ mod tests { #[tokio::test] async fn test_send_from_addr() { - let addr_str = "0.0.0.0:50100"; - let addr = addr_str.parse().unwrap(); - let socket = bind_with_any_port_with_config( + let mut port_range = unique_port_range_for_tests(4); + let socket = bind_to_with_config( IpAddr::V4(Ipv4Addr::UNSPECIFIED), + port_range.next().unwrap(), SocketConfig::default(), ) .unwrap(); - let connection = UdpClientConnection::new_from_addr(socket, addr); - let reader = bind_to_async(addr.ip(), addr.port()).await.expect("bind"); + + let reader_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + let reader_port = port_range.next().unwrap(); + let connection = + UdpClientConnection::new_from_addr(socket, SocketAddr::new(reader_ip, reader_port)); + + let reader = bind_to_async(reader_ip, reader_port).await.expect("bind"); check_send_one(&connection, &reader).await; check_send_batch(&connection, &reader).await; } diff --git a/udp-client/src/udp_client.rs b/udp-client/src/udp_client.rs index 39f6864d5c153d..7bc69fd7b788c9 100644 --- a/udp-client/src/udp_client.rs +++ b/udp-client/src/udp_client.rs @@ -30,7 +30,7 @@ impl ClientConnection for UdpClientConnection { &self.addr } - fn send_data_async(&self, data: Vec) -> TransportResult<()> { + fn send_data_async(&self, data: Arc>) -> TransportResult<()> { self.socket.send_to(data.as_ref(), self.addr)?; Ok(()) } diff --git a/unified-scheduler-pool/Cargo.toml b/unified-scheduler-pool/Cargo.toml index 58e3b692e09f9a..4996870ea4c90e 100644 --- a/unified-scheduler-pool/Cargo.toml +++ b/unified-scheduler-pool/Cargo.toml @@ -33,7 +33,7 @@ solana-pubkey = { workspace = true } solana-runtime = { workspace = true } solana-runtime-transaction = { workspace = true } solana-svm = { workspace = true } -solana-timings = { workspace = true } +solana-svm-timings = { workspace = true } solana-transaction = { workspace = true } solana-transaction-error = { workspace = true } solana-unified-scheduler-logic = { workspace = true } diff --git a/unified-scheduler-pool/src/lib.rs b/unified-scheduler-pool/src/lib.rs index 506a2526b9849c..4810cfa2cf66cd 100644 --- a/unified-scheduler-pool/src/lib.rs +++ b/unified-scheduler-pool/src/lib.rs @@ -45,7 +45,7 @@ use { }, solana_runtime_transaction::runtime_transaction::RuntimeTransaction, solana_svm::transaction_processing_result::ProcessedTransaction, - solana_timings::ExecuteTimings, + solana_svm_timings::ExecuteTimings, solana_transaction::sanitized::SanitizedTransaction, solana_transaction_error::{TransactionError, TransactionResult as Result}, solana_unified_scheduler_logic::{ @@ -201,7 +201,7 @@ impl, TH: TaskHandler> BlockProductionSchedulerInner S::Inner { let id = { let Self::Pooled(inner) = &self else { - panic!("cannot take: {:?}", self) + panic!("cannot take: {self:?}") }; inner.id() }; @@ -609,8 +609,9 @@ where }; info!( - "Scheduler pool cleaner: dropped {} idle inners, {} trashed inners, triggered {} timeout listeners", - idle_inner_count, trashed_inner_count, triggered_timeout_listener_count, + "Scheduler pool cleaner: dropped {idle_inner_count} idle inners, \ + {trashed_inner_count} trashed inners, triggered \ + {triggered_timeout_listener_count} timeout listeners", ); sleepless_testing::at(CheckPoint::IdleSchedulerCleaned(idle_inner_count)); sleepless_testing::at(CheckPoint::TrashedSchedulerCleaned(trashed_inner_count)); @@ -2328,10 +2329,10 @@ impl, TH: TaskHandler> ThreadManager { let current_thread = thread::current(); error!("handler thread is panicking: {:?}", current_thread); if sender.send(Err(HandlerPanicked)).is_ok() { - info!("notified a panic from {:?}", current_thread); + info!("notified a panic from {current_thread:?}"); } else { // It seems that the scheduler thread has been aborted already... - warn!("failed to notify a panic from {:?}", current_thread); + warn!("failed to notify a panic from {current_thread:?}"); } } let mut task = ExecutedTask::new_boxed(task); @@ -2364,7 +2365,7 @@ impl, TH: TaskHandler> ThreadManager { .map({ |thx| { thread::Builder::new() - .name(format!("solScHandle{mode_char}{:02}", thx)) + .name(format!("solScHandle{mode_char}{thx:02}")) .spawn_tracked(handler_main_loop()) .unwrap() } @@ -2395,13 +2396,13 @@ impl, TH: TaskHandler> ThreadManager { (_, Some(s)) => s, (None, None) => "", }; - panic!("{} (From: {:?})", panic_message, thread); + panic!("{panic_message} (From: {thread:?})"); }) } if let Some(scheduler_thread) = self.scheduler_thread.take() { for thread in self.handler_threads.drain(..) { - debug!("joining...: {:?}", thread); + debug!("joining...: {thread:?}"); () = join_with_panic_message(thread).unwrap(); } () = join_with_panic_message(scheduler_thread).unwrap(); @@ -2739,8 +2740,8 @@ mod tests { installed_scheduler_pool::{BankWithScheduler, SchedulingContext}, prioritization_fee_cache::PrioritizationFeeCache, }, + solana_svm_timings::ExecuteTimingType, solana_system_transaction as system_transaction, - solana_timings::ExecuteTimingType, solana_transaction::sanitized::SanitizedTransaction, solana_transaction_error::TransactionError, solana_unified_scheduler_logic::NO_CONSUMED_BLOCK_SIZE, @@ -3881,7 +3882,14 @@ mod tests { let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap()); let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); let (_banking_packet_sender, banking_packet_receiver) = crossbeam_channel::unbounded(); - let (exit, _poh_recorder, transaction_recorder, poh_service, _signal_receiver) = { + let ( + exit, + _poh_recorder, + _poh_controller, + transaction_recorder, + poh_service, + _signal_receiver, + ) = { // Create a dummy bank to prevent it from being frozen; otherwise, the following panic // will happen: // thread 'solPohTickProd' panicked at runtime/src/bank.rs:LL:CC: @@ -4012,7 +4020,14 @@ mod tests { let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap()); let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); let (_banking_packet_sender, banking_packet_receiver) = crossbeam_channel::unbounded(); - let (exit, poh_recorder, transaction_recorder, poh_service, _signal_receiver) = { + let ( + exit, + _poh_recorder, + mut poh_controller, + transaction_recorder, + poh_service, + _signal_receiver, + ) = { create_test_recorder_with_index_tracking( bank.clone(), blockstore.clone(), @@ -4020,10 +4035,9 @@ mod tests { Some(leader_schedule_cache), ) }; - poh_recorder - .write() - .unwrap() - .reset(bank.clone(), Some((bank.slot(), bank.slot() + 1))); + poh_controller + .reset_sync(bank.clone(), Some((bank.slot(), bank.slot() + 1))) + .unwrap(); pool.register_banking_stage( None, @@ -4045,10 +4059,9 @@ mod tests { let scheduler = pool.take_scheduler(context); let old_scheduler_id = scheduler.id(); let bank = BankWithScheduler::new(bank, Some(scheduler)); - poh_recorder - .write() - .unwrap() - .set_bank(bank.clone_with_scheduler()); + poh_controller + .set_bank_sync(bank.clone_with_scheduler()) + .unwrap(); bank.schedule_transaction_executions([(tx, ORIGINAL_TRANSACTION_INDEX)].into_iter()) .unwrap(); bank.unpause_new_block_production_scheduler(); @@ -4062,10 +4075,9 @@ mod tests { assert_eq!(bank.transaction_count(), 0); // Create new bank to observe behavior difference around session ending - poh_recorder - .write() - .unwrap() - .reset(bank.clone(), Some((bank.slot(), bank.slot() + 1))); + poh_controller + .reset_sync(bank.clone(), Some((bank.slot(), bank.slot() + 1))) + .unwrap(); let bank = Arc::new(Bank::new_from_parent( bank.clone_without_scheduler(), &Pubkey::default(), @@ -4081,10 +4093,9 @@ mod tests { // Make sure the same scheduler is used to test its internal cross-session behavior assert_eq!(scheduler.id(), old_scheduler_id); let bank = BankWithScheduler::new(bank, Some(scheduler)); - poh_recorder - .write() - .unwrap() - .set_bank(bank.clone_with_scheduler()); + poh_controller + .set_bank_sync(bank.clone_with_scheduler()) + .unwrap(); bank.unpause_new_block_production_scheduler(); // Calling wait_for_completed_scheduler() for block production scheduler causes it to be @@ -4527,13 +4538,19 @@ mod tests { let (ledger_path, _blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap()); let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); - let (exit, poh_recorder, transaction_recorder, poh_service, signal_receiver) = - create_test_recorder_with_index_tracking( - bank.clone(), - blockstore.clone(), - None, - Some(leader_schedule_cache), - ); + let ( + exit, + poh_recorder, + _poh_controller, + transaction_recorder, + poh_service, + signal_receiver, + ) = create_test_recorder_with_index_tracking( + bank.clone(), + blockstore.clone(), + None, + Some(leader_schedule_cache), + ); let handler_context = &HandlerContext { thread_count: 0, log_messages_bytes_limit: None, @@ -4578,7 +4595,7 @@ mod tests { receiver.try_recv(), Ok(TransactionStatusMessage::Batch(( TransactionStatusBatch { .. }, - None, // no work sequence + None, // no work id ))) ); assert_matches!( @@ -4639,13 +4656,19 @@ mod tests { let (ledger_path, _blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap()); let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); - let (exit, _poh_recorder, transaction_recorder, poh_service, _signal_receiver) = - create_test_recorder_with_index_tracking( - bank.clone(), - blockstore.clone(), - None, - Some(leader_schedule_cache), - ); + let ( + exit, + _poh_recorder, + _poh_controller, + transaction_recorder, + poh_service, + _signal_receiver, + ) = create_test_recorder_with_index_tracking( + bank.clone(), + blockstore.clone(), + None, + Some(leader_schedule_cache), + ); pool.register_banking_stage( None, banking_packet_receiver, @@ -4708,13 +4731,19 @@ mod tests { let (ledger_path, _blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap()); let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); - let (exit, _poh_recorder, transaction_recorder, poh_service, _signal_receiver) = - create_test_recorder_with_index_tracking( - bank.clone(), - blockstore.clone(), - None, - Some(leader_schedule_cache), - ); + let ( + exit, + _poh_recorder, + _poh_controller, + transaction_recorder, + poh_service, + _signal_receiver, + ) = create_test_recorder_with_index_tracking( + bank.clone(), + blockstore.clone(), + None, + Some(leader_schedule_cache), + ); // send fake packet batch to trigger banking_packet_handler let (banking_packet_sender, banking_packet_receiver) = crossbeam_channel::unbounded(); @@ -4782,13 +4811,19 @@ mod tests { let (ledger_path, _blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap()); let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); - let (exit, _poh_recorder, transaction_recorder, poh_service, _signal_receiver) = - create_test_recorder_with_index_tracking( - bank.clone(), - blockstore.clone(), - None, - Some(leader_schedule_cache), - ); + let ( + exit, + _poh_recorder, + _poh_controller, + transaction_recorder, + poh_service, + _signal_receiver, + ) = create_test_recorder_with_index_tracking( + bank.clone(), + blockstore.clone(), + None, + Some(leader_schedule_cache), + ); // Create a dummy handler which unconditionally sends tx0 back to the scheduler thread let tx0 = RuntimeTransaction::from_transaction_for_tests(system_transaction::transfer( @@ -4871,13 +4906,19 @@ mod tests { let (ledger_path, _blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap()); let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); - let (exit, _poh_recorder, transaction_recorder, poh_service, _signal_receiver) = - create_test_recorder_with_index_tracking( - bank.clone(), - blockstore.clone(), - None, - Some(leader_schedule_cache), - ); + let ( + exit, + _poh_recorder, + _poh_controller, + transaction_recorder, + poh_service, + _signal_receiver, + ) = create_test_recorder_with_index_tracking( + bank.clone(), + blockstore.clone(), + None, + Some(leader_schedule_cache), + ); let (_banking_packet_sender, banking_packet_receiver) = crossbeam_channel::unbounded(); pool.register_banking_stage( None, @@ -4931,13 +4972,19 @@ mod tests { let (ledger_path, _blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap()); let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); - let (exit, _poh_recorder, transaction_recorder, poh_service, _signal_receiver) = - create_test_recorder_with_index_tracking( - bank.clone(), - blockstore.clone(), - None, - Some(leader_schedule_cache), - ); + let ( + exit, + _poh_recorder, + _poh_controller, + transaction_recorder, + poh_service, + _signal_receiver, + ) = create_test_recorder_with_index_tracking( + bank.clone(), + blockstore.clone(), + None, + Some(leader_schedule_cache), + ); let (_banking_packet_sender, banking_packet_receiver) = crossbeam_channel::unbounded(); pool.register_banking_stage( @@ -5017,13 +5064,19 @@ mod tests { let (ledger_path, _blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap()); let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); - let (exit, _poh_recorder, transaction_recorder, poh_service, _signal_receiver) = - create_test_recorder_with_index_tracking( - bank.clone(), - blockstore.clone(), - None, - Some(leader_schedule_cache), - ); + let ( + exit, + _poh_recorder, + _poh_controller, + transaction_recorder, + poh_service, + _signal_receiver, + ) = create_test_recorder_with_index_tracking( + bank.clone(), + blockstore.clone(), + None, + Some(leader_schedule_cache), + ); let (_banking_packet_sender, banking_packet_receiver) = crossbeam_channel::unbounded(); pool.register_banking_stage( @@ -5074,13 +5127,19 @@ mod tests { let (ledger_path, _blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap()); let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); - let (exit, _poh_recorder, transaction_recorder, poh_service, _signal_receiver) = - create_test_recorder_with_index_tracking( - bank.clone(), - blockstore.clone(), - None, - Some(leader_schedule_cache), - ); + let ( + exit, + _poh_recorder, + _poh_controller, + transaction_recorder, + poh_service, + _signal_receiver, + ) = create_test_recorder_with_index_tracking( + bank.clone(), + blockstore.clone(), + None, + Some(leader_schedule_cache), + ); pool.register_banking_stage( None, banking_packet_receiver, @@ -5168,13 +5227,19 @@ mod tests { let (ledger_path, _blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap()); let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); - let (exit, _poh_recorder, transaction_recorder, poh_service, _signal_receiver) = - create_test_recorder_with_index_tracking( - bank.clone(), - blockstore.clone(), - None, - Some(leader_schedule_cache), - ); + let ( + exit, + _poh_recorder, + _poh_controller, + transaction_recorder, + poh_service, + _signal_receiver, + ) = create_test_recorder_with_index_tracking( + bank.clone(), + blockstore.clone(), + None, + Some(leader_schedule_cache), + ); pool.register_banking_stage( None, banking_packet_receiver, diff --git a/upload-perf/.gitignore b/upload-perf/.gitignore deleted file mode 100644 index 5404b132dba6e1..00000000000000 --- a/upload-perf/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -/target/ -/farf/ diff --git a/upload-perf/src/upload-perf.rs b/upload-perf/src/upload-perf.rs deleted file mode 100644 index 8117e12ef66b28..00000000000000 --- a/upload-perf/src/upload-perf.rs +++ /dev/null @@ -1,114 +0,0 @@ -use { - serde_json::Value, - std::{ - collections::HashMap, - env, - fs::File, - io::{BufRead, BufReader}, - process::Command, - }, -}; - -fn get_last_metrics(metric: &str, db: &str, name: &str, branch: &str) -> Result { - let query = format!( - r#"SELECT last("{metric}") FROM "{db}"."autogen"."{name}" WHERE "branch"='{branch}'"# - ); - - let response = solana_metrics::query(&query)?; - - match serde_json::from_str(&response) { - Result::Ok(v) => { - let v: Value = v; - let data = &v["results"][0]["series"][0]["values"][0][1]; - if data.is_null() { - return Result::Err("Key not found".to_string()); - } - Result::Ok(data.to_string()) - } - Result::Err(err) => Result::Err(err.to_string()), - } -} - -fn main() { - let args: Vec = env::args().collect(); - // Open the path in read-only mode, returns `io::Result` - let fname = &args[1]; - let file = match File::open(fname) { - Err(why) => panic!("couldn't open {fname}: {why:?}"), - Ok(file) => file, - }; - - let branch = &args[2]; - let upload_metrics = args.len() > 2; - - let git_output = Command::new("git") - .args(["rev-parse", "HEAD"]) - .output() - .expect("failed to execute git rev-parse"); - let git_commit_hash = String::from_utf8_lossy(&git_output.stdout); - let trimmed_hash = git_commit_hash.trim().to_string(); - - let mut last_commit = None; - let mut results = HashMap::new(); - - let db = env::var("INFLUX_DATABASE").unwrap_or_else(|_| "scratch".to_string()); - - for line in BufReader::new(file).lines() { - if let Ok(v) = serde_json::from_str(&line.unwrap()) { - let v: Value = v; - if v["type"] == "bench" { - let name = v["name"].as_str().unwrap().trim_matches('\"').to_string(); - - if last_commit.is_none() { - last_commit = get_last_metrics("commit", &db, &name, branch).ok(); - } - - let median: i64 = v["median"].to_string().parse().unwrap(); - let deviation: i64 = v["deviation"].to_string().parse().unwrap(); - assert!(!upload_metrics, "TODO"); - /* - solana_metrics::datapoint_info!( - &v["name"].as_str().unwrap().trim_matches('\"'), - ("test", "bench", String), - ("branch", branch.to_string(), String), - ("median", median, i64), - ("deviation", deviation, i64), - ("commit", git_commit_hash.trim().to_string(), String) - ); - */ - - let last_median = - get_last_metrics("median", &db, &name, branch).unwrap_or_default(); - let last_deviation = - get_last_metrics("deviation", &db, &name, branch).unwrap_or_default(); - - results.insert(name, (median, deviation, last_median, last_deviation)); - } - } - } - - if let Some(commit) = last_commit { - println!( - "Comparing current commits: {trimmed_hash} against baseline {commit} on {branch} branch" - ); - println!("bench_name, median, last_median, deviation, last_deviation"); - for (entry, values) in results { - println!( - "{}, {:#10?}, {:#10?}, {:#10?}, {:#10?}", - entry, - values.0, - values.2.parse::().unwrap_or_default(), - values.1, - values.3.parse::().unwrap_or_default(), - ); - } - } else { - println!("No previous results found for {branch} branch"); - println!("hash: {trimmed_hash}"); - println!("bench_name, median, deviation"); - for (entry, values) in results { - println!("{}, {:10?}, {:10?}", entry, values.0, values.1); - } - } - solana_metrics::flush(); -} diff --git a/validator/Cargo.toml b/validator/Cargo.toml index daf38861d60b37..0474480c2f6d11 100644 --- a/validator/Cargo.toml +++ b/validator/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "agave-validator" -description = "Blockchain, Rebuilt for Scale" documentation = "https://docs.rs/agave-validator" default-run = "agave-validator" version = { workspace = true } authors = { workspace = true } +description = { workspace = true } repository = { workspace = true } homepage = { workspace = true } license = { workspace = true } @@ -31,6 +31,7 @@ libc = { workspace = true } libloading = { workspace = true } log = { workspace = true } num_cpus = { workspace = true } +qualifier_attr = { workspace = true } rand = { workspace = true } rayon = { workspace = true } serde = { workspace = true } @@ -50,12 +51,12 @@ solana-epoch-schedule = { workspace = true } solana-faucet = { workspace = true } solana-genesis-utils = { workspace = true } solana-geyser-plugin-manager = { workspace = true } -solana-gossip = { workspace = true } +solana-gossip = { workspace = true, features = ["agave-unstable-api"] } solana-hash = { workspace = true } solana-inflation = { workspace = true } solana-keypair = { workspace = true } solana-ledger = { workspace = true } -solana-logger = "=2.3.1" +solana-logger = "=3.0.0" solana-metrics = { workspace = true } solana-native-token = { workspace = true } solana-net-utils = { workspace = true } @@ -98,7 +99,9 @@ signal-hook = { workspace = true } assert_cmd = { workspace = true } predicates = { workspace = true } pretty_assertions = { workspace = true } +scopeguard = { workspace = true } solana-account-decoder = { workspace = true } +solana-core = { workspace = true, features = ["dev-context-only-utils"] } solana-program-option = { workspace = true } solana-program-pack = { workspace = true } solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } diff --git a/validator/src/admin_rpc_service.rs b/validator/src/admin_rpc_service.rs index ff271bb2b9993a..e3f251fa0c5601 100644 --- a/validator/src/admin_rpc_service.rs +++ b/validator/src/admin_rpc_service.rs @@ -11,14 +11,14 @@ use { solana_accounts_db::accounts_index::AccountIndex, solana_core::{ admin_rpc_post_init::AdminRpcRequestMetadataPostInit, + banking_stage::BankingStage, consensus::{tower_storage::TowerStorage, Tower}, repair::repair_service, - validator::ValidatorStartProgress, + validator::{BlockProductionMethod, TransactionStructure, ValidatorStartProgress}, }, solana_geyser_plugin_manager::GeyserPluginManagerRequest, solana_gossip::contact_info::{ContactInfo, Protocol, SOCKET_ADDR_UNSPECIFIED}, solana_keypair::{read_keypair_file, Keypair}, - solana_net_utils::sockets::bind_to, solana_pubkey::Pubkey, solana_rpc::rpc::verify_pubkey, solana_rpc_client_api::{config::RpcAccountIndex, custom_error::RpcCustomError}, @@ -29,6 +29,7 @@ use { env, error, fmt::{self, Display}, net::{IpAddr, SocketAddr}, + num::NonZeroUsize, path::{Path, PathBuf}, sync::{ atomic::{AtomicBool, Ordering}, @@ -222,8 +223,8 @@ pub trait AdminRpc { #[rpc(meta, name = "contactInfo")] fn contact_info(&self, meta: Self::Metadata) -> Result; - #[rpc(meta, name = "setGossipSocket")] - fn set_gossip_socket(&self, meta: Self::Metadata, ip: String, port: u16) -> Result<()>; + #[rpc(meta, name = "selectActiveInterface")] + fn select_active_interface(&self, meta: Self::Metadata, interface: IpAddr) -> Result<()>; #[rpc(meta, name = "repairShredFromPeer")] fn repair_shred_from_peer( @@ -260,6 +261,15 @@ pub trait AdminRpc { meta: Self::Metadata, public_tpu_forwards_addr: SocketAddr, ) -> Result<()>; + + #[rpc(meta, name = "manageBlockProduction")] + fn manage_block_production( + &self, + meta: Self::Metadata, + block_production_method: BlockProductionMethod, + transaction_struct: TransactionStructure, + num_workers: NonZeroUsize, + ) -> Result<()>; } pub struct AdminRpcImpl; @@ -477,7 +487,7 @@ impl AdminRpc for AdminRpcImpl { ) -> Result<()> { debug!("add_authorized_voter_from_bytes request received"); - let authorized_voter = Keypair::from_bytes(&keypair).map_err(|err| { + let authorized_voter = Keypair::try_from(keypair.as_ref()).map_err(|err| { jsonrpc_core::error::Error::invalid_params(format!( "Failed to read authorized voter keypair from provided byte array: {err}" )) @@ -517,7 +527,7 @@ impl AdminRpc for AdminRpcImpl { ) -> Result<()> { debug!("set_identity_from_bytes request received"); - let identity_keypair = Keypair::from_bytes(&identity_keypair).map_err(|err| { + let identity_keypair = Keypair::try_from(identity_keypair.as_ref()).map_err(|err| { jsonrpc_core::error::Error::invalid_params(format!( "Failed to read identity keypair from provided byte array: {err}" )) @@ -539,8 +549,8 @@ impl AdminRpc for AdminRpcImpl { let mut write_staked_nodes = meta.staked_nodes_overrides.write().unwrap(); write_staked_nodes.clear(); write_staked_nodes.extend(loaded_config); - info!("Staked nodes overrides loaded from {}", path); - debug!("overrides map: {:?}", write_staked_nodes); + info!("Staked nodes overrides loaded from {path}"); + debug!("overrides map: {write_staked_nodes:?}"); Ok(()) } @@ -548,31 +558,20 @@ impl AdminRpc for AdminRpcImpl { meta.with_post_init(|post_init| Ok(post_init.cluster_info.my_contact_info().into())) } - fn set_gossip_socket(&self, meta: Self::Metadata, ip: String, port: u16) -> Result<()> { - let ip: IpAddr = ip - .parse() - .map_err(|e| jsonrpc_core::Error::invalid_params(format!("Invalid IP address: {e}")))?; - let new_addr = SocketAddr::new(ip, port); - + fn select_active_interface(&self, meta: Self::Metadata, interface: IpAddr) -> Result<()> { + debug!("select_active_interface received: {interface}"); meta.with_post_init(|post_init| { - if let Some(socket) = &post_init.gossip_socket { - let new_socket = bind_to(new_addr.ip(), new_addr.port()).map_err(|e| { - jsonrpc_core::Error::invalid_params(format!("Gossip socket rebind failed: {e}")) + let node = post_init.node.as_ref().ok_or_else(|| { + jsonrpc_core::Error::invalid_params("`Node` not initialized in post_init") + })?; + + node.switch_active_interface(interface, &post_init.cluster_info) + .map_err(|e| { + jsonrpc_core::Error::invalid_params(format!( + "Switching failed due to error {e}" + )) })?; - - // hot-swap new socket - socket.swap(new_socket); - - // update gossip socket in cluster info - post_init - .cluster_info - .set_gossip_socket(new_addr) - .map_err(|e| { - jsonrpc_core::Error::invalid_params(format!( - "Failed to refresh gossip ContactInfo: {e}" - )) - })?; - } + info!("Switched primary interface to {interface}"); Ok(()) }) } @@ -634,10 +633,7 @@ impl AdminRpc for AdminRpcImpl { meta: Self::Metadata, pubkey_str: String, ) -> Result> { - debug!( - "get_secondary_index_key_size rpc request received: {:?}", - pubkey_str - ); + debug!("get_secondary_index_key_size rpc request received: {pubkey_str:?}"); let index_key = verify_pubkey(&pubkey_str)?; meta.with_post_init(|post_init| { let bank = post_init.bank_forks.read().unwrap().root_bank(); @@ -754,6 +750,41 @@ impl AdminRpc for AdminRpcImpl { Ok(()) }) } + + fn manage_block_production( + &self, + meta: Self::Metadata, + block_production_method: BlockProductionMethod, + transaction_struct: TransactionStructure, + num_workers: NonZeroUsize, + ) -> Result<()> { + debug!("manage_block_production rpc request received"); + + if num_workers > BankingStage::max_num_workers() { + return Err(jsonrpc_core::error::Error::invalid_params(format!( + "Number of workers ({}) exceeds maximum allowed ({})", + num_workers, + BankingStage::max_num_workers() + ))); + } + + meta.with_post_init(|post_init| { + let mut banking_stage = post_init.banking_stage.write().unwrap(); + let Some(banking_stage) = banking_stage.as_mut() else { + error!("banking stage is not initialized"); + return Err(jsonrpc_core::error::Error::internal_error()); + }; + + banking_stage + .spawn_threads(transaction_struct, block_production_method, num_workers) + .map_err(|err| { + error!("Failed to spawn new non-vote threads: {err:?}"); + jsonrpc_core::error::Error::internal_error() + })?; + + Ok(()) + }) + } } impl AdminRpcImpl { @@ -843,7 +874,7 @@ pub fn run(ledger_path: &Path, metadata: AdminRpcRequestMetadata) { match server { Err(err) => { - warn!("Unable to start admin rpc service: {:?}", err); + warn!("Unable to start admin rpc service: {err:?}"); } Ok(server) => { info!("started admin rpc service!"); @@ -930,7 +961,7 @@ where pub fn load_staked_nodes_overrides( path: &String, ) -> std::result::Result> { - debug!("Loading staked nodes overrides configuration from {}", path); + debug!("Loading staked nodes overrides configuration from {path}"); if Path::new(&path).exists() { let file = std::fs::File::open(path)?; Ok(serde_yaml::from_reader(file)?) @@ -1037,9 +1068,10 @@ mod tests { RwLock, >::default(), cluster_slots: Arc::new( - solana_core::cluster_slots_service::cluster_slots::ClusterSlots::default(), + solana_core::cluster_slots_service::cluster_slots::ClusterSlots::default_for_tests(), ), - gossip_socket: None, + node: None, + banking_stage: Arc::new(RwLock::new(None)), }))), staked_nodes_overrides: Arc::new(RwLock::new(HashMap::new())), rpc_to_plugin_manager_sender: None, diff --git a/validator/src/bin/solana-test-validator.rs b/validator/src/bin/solana-test-validator.rs index 1b51eed38ae2af..059c5399d5de13 100644 --- a/validator/src/bin/solana-test-validator.rs +++ b/validator/src/bin/solana-test-validator.rs @@ -16,11 +16,11 @@ use { solana_clock::Slot, solana_core::consensus::tower_storage::FileTowerStorage, solana_epoch_schedule::EpochSchedule, - solana_faucet::faucet::run_local_faucet_with_port, + solana_faucet::faucet::{run_faucet, Faucet}, solana_inflation::Inflation, solana_keypair::{read_keypair_file, write_keypair_file, Keypair}, solana_logger::redirect_stderr_to_file, - solana_native_token::sol_to_lamports, + solana_native_token::sol_str_to_lamports, solana_pubkey::Pubkey, solana_rent::Rent, solana_rpc::{ @@ -38,7 +38,8 @@ use { net::{IpAddr, Ipv4Addr, SocketAddr}, path::{Path, PathBuf}, process::exit, - sync::{Arc, RwLock}, + sync::{Arc, Mutex, RwLock}, + thread, time::{Duration, SystemTime, UNIX_EPOCH}, }, }; @@ -324,7 +325,10 @@ fn main() { None }; - let faucet_lamports = sol_to_lamports(value_of(&matches, "faucet_sol").unwrap()); + let faucet_lamports = matches + .value_of("faucet_sol") + .and_then(sol_str_to_lamports) + .unwrap(); let faucet_keypair_file = ledger_path.join("faucet-keypair.json"); if !faucet_keypair_file.exists() { write_keypair_file(&Keypair::new(), faucet_keypair_file.to_str().unwrap()).unwrap_or_else( @@ -351,22 +355,27 @@ fn main() { let faucet_pubkey = faucet_keypair.pubkey(); let faucet_time_slice_secs = value_t_or_exit!(matches, "faucet_time_slice_secs", u64); - let faucet_per_time_cap = value_t!(matches, "faucet_per_time_sol_cap", f64) - .ok() - .map(sol_to_lamports); - let faucet_per_request_cap = value_t!(matches, "faucet_per_request_sol_cap", f64) - .ok() - .map(sol_to_lamports); + let faucet_per_time_cap = matches + .value_of("faucet_per_time_sol_cap") + .and_then(sol_str_to_lamports); + let faucet_per_request_cap = matches + .value_of("faucet_per_request_sol_cap") + .and_then(sol_str_to_lamports); let (sender, receiver) = unbounded(); - run_local_faucet_with_port( - faucet_keypair, - sender, - Some(faucet_time_slice_secs), - faucet_per_time_cap, - faucet_per_request_cap, - faucet_addr.port(), - ); + thread::spawn(move || { + let faucet = Arc::new(Mutex::new(Faucet::new( + faucet_keypair, + Some(faucet_time_slice_secs), + faucet_per_time_cap, + faucet_per_request_cap, + ))); + let runtime = tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .unwrap(); + runtime.block_on(run_faucet(faucet, faucet_addr, Some(sender))); + }); let _ = receiver.recv().expect("run faucet").unwrap_or_else(|err| { println!("Error: failed to start faucet: {err}"); exit(1); diff --git a/validator/src/bootstrap.rs b/validator/src/bootstrap.rs index 0852c028547ff8..712ee2569dd17d 100644 --- a/validator/src/bootstrap.rs +++ b/validator/src/bootstrap.rs @@ -25,10 +25,10 @@ use { snapshot_utils, }, solana_signer::Signer, - solana_streamer::{atomic_udp_socket::AtomicUdpSocket, socket::SocketAddrSpace}, + solana_streamer::socket::SocketAddrSpace, std::{ collections::{hash_map::RandomState, HashMap, HashSet}, - net::{SocketAddr, TcpListener, TcpStream}, + net::{SocketAddr, TcpListener, TcpStream, UdpSocket}, path::Path, process::exit, sync::{ @@ -79,8 +79,8 @@ fn verify_reachable_ports( .unwrap_or_default() }; - let gossip_socket = node.sockets.gossip.load(); - let mut udp_sockets = vec![&gossip_socket, &node.sockets.repair]; + let mut udp_sockets = vec![&node.sockets.repair]; + udp_sockets.extend(node.sockets.gossip.iter()); if verify_address(&node.info.serve_repair(Protocol::UDP)) { udp_sockets.push(&node.sockets.serve_repair); @@ -144,7 +144,7 @@ fn start_gossip_node( cluster_entrypoints: &[ContactInfo], ledger_path: &Path, gossip_addr: &SocketAddr, - gossip_socket: AtomicUdpSocket, + gossip_sockets: Arc<[UdpSocket]>, expected_shred_version: u16, gossip_validators: Option>, should_check_duplicate_instance: bool, @@ -164,7 +164,7 @@ fn start_gossip_node( let gossip_service = GossipService::new( &cluster_info, None, - gossip_socket, + gossip_sockets, gossip_validators, should_check_duplicate_instance, None, @@ -193,12 +193,7 @@ fn get_rpc_peers( .unwrap_or_default() ); - let mut rpc_peers = cluster_info - .all_rpc_peers() - .into_iter() - .filter(|contact_info| contact_info.shred_version() == shred_version) - .collect::>(); - + let mut rpc_peers = cluster_info.rpc_peers(); if bootstrap_config.only_known_rpc { rpc_peers.retain(|rpc_peer| { is_known_validator(rpc_peer.pubkey(), &validator_config.known_validators) @@ -470,8 +465,8 @@ fn get_vetted_rpc_nodes( Err(err) => { error!( "Failed to get RPC nodes: {err}. Consider checking system clock, removing \ - `--no-port-check`, or adjusting `--known-validator ...` arguments as \ - applicable" + `--no-port-check`, or adjusting `--known-validator ...` arguments as \ + applicable" ); exit(1); } @@ -958,8 +953,7 @@ fn build_known_snapshot_hashes<'a>( if is_any_same_slot_and_different_hash(&full_snapshot_hash, known_snapshot_hashes.keys()) { warn!( "Ignoring all snapshot hashes from node {node} since we've seen a different full \ - snapshot hash with this slot.\ - \nfull snapshot hash: {full_snapshot_hash:?}" + snapshot hash with this slot. full snapshot hash: {full_snapshot_hash:?}" ); debug!( "known full snapshot hashes: {:#?}", @@ -985,9 +979,9 @@ fn build_known_snapshot_hashes<'a>( ) { warn!( "Ignoring incremental snapshot hash from node {node} since we've seen a \ - different incremental snapshot hash with this slot.\ - \nfull snapshot hash: {full_snapshot_hash:?}\ - \nincremental snapshot hash: {incremental_snapshot_hash:?}" + different incremental snapshot hash with this slot. full snapshot hash: \ + {full_snapshot_hash:?}, incremental snapshot hash: \ + {incremental_snapshot_hash:?}" ); debug!( "known incremental snapshot hashes based on this slot: {:#?}", diff --git a/validator/src/cli.rs b/validator/src/cli.rs index eed8ac009ed638..ba0de96d441224 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -1,7 +1,6 @@ use { crate::commands, clap::{crate_description, crate_name, App, AppSettings, Arg, ArgMatches, SubCommand}, - log::warn, solana_accounts_db::{ accounts_db::{ DEFAULT_ACCOUNTS_SHRINK_OPTIMIZE_TOTAL_SPACE, DEFAULT_ACCOUNTS_SHRINK_RATIO, @@ -21,8 +20,7 @@ use { solana_hash::Hash, solana_net_utils::{MINIMUM_VALIDATOR_PORT_RANGE_WIDTH, VALIDATOR_PORT_RANGE}, solana_quic_definitions::QUIC_PORT_OFFSET, - solana_rayon_threadlimit::get_thread_count, - solana_rpc::{rpc::MAX_REQUEST_BODY_SIZE, rpc_pubsub_service::PubSubConfig}, + solana_rpc::rpc::MAX_REQUEST_BODY_SIZE, solana_rpc_client_api::request::{DELINQUENT_VALIDATOR_SLOT_DISTANCE, MAX_MULTIPLE_ACCOUNTS}, solana_runtime::snapshot_utils::{ SnapshotVersion, DEFAULT_ARCHIVE_COMPRESSION, DEFAULT_FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, @@ -74,7 +72,8 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .subcommand(commands::set_log_filter::command()) .subcommand(commands::staked_nodes_overrides::command()) .subcommand(commands::wait_for_restart_window::command()) - .subcommand(commands::set_public_address::command()); + .subcommand(commands::set_public_address::command()) + .subcommand(commands::manage_block_production::command()); commands::run::add_args(app, default_args) .args(&thread_args(&default_args.thread_args)) @@ -126,6 +125,45 @@ fn deprecated_arguments() -> Vec { (@into-option $v:expr) => { Some($v) }; } + add_arg!( + // deprecated in v3.0.0 + Arg::with_name("accounts_db_clean_threads") + .long("accounts-db-clean-threads") + .takes_value(true) + .value_name("NUMBER") + .conflicts_with("accounts_db_background_threads"), + replaced_by: "accounts-db-background-threads", + ); + add_arg!( + // deprecated in v3.1.0 + Arg::with_name("accounts_db_hash_threads") + .long("accounts-db-hash-threads") + .takes_value(true) + .value_name("NUMBER"), + usage_warning: "There is no more startup background accounts hash calculation", + ); + add_arg!( + // deprecated in v3.0.0 + Arg::with_name("accounts_db_read_cache_limit_mb") + .long("accounts-db-read-cache-limit-mb") + .value_name("MAX | LOW,HIGH") + .takes_value(true) + .min_values(1) + .max_values(2) + .multiple(false) + .require_delimiter(true) + .help("How large the read cache for account data can become, in mebibytes") + .long_help( + "How large the read cache for account data can become, in mebibytes. \ + If given a single value, it will be the maximum size for the cache. \ + If given a pair of values, they will be the low and high watermarks \ + for the cache. When the cache exceeds the high watermark, entries will \ + be evicted until the size reaches the low watermark." + ) + .hidden(hidden_unless_forced()) + .conflicts_with("accounts_db_read_cache_limit"), + replaced_by: "accounts-db-read-cache-limit", + ); add_arg!( // deprecated in v3.0.0 Arg::with_name("accounts_hash_cache_path") @@ -142,6 +180,32 @@ fn deprecated_arguments() -> Vec { .long("disable-accounts-disk-index") .help("Disable the disk-based accounts index if it is enabled by default.")); + add_arg!( + // deprecated in v3.0.0 + Arg::with_name("gossip_host") + .long("gossip-host") + .value_name("HOST") + .takes_value(true) + .validator(solana_net_utils::is_host), + replaced_by : "bind-address", + usage_warning:"Use --bind-address instead", + ); + add_arg!( + // deprecated in v3.0.0 + Arg::with_name("tpu_disable_quic") + .long("tpu-disable-quic") + .takes_value(false) + .help("Do not use QUIC to send transactions."), + usage_warning: "UDP support will be dropped" + ); + add_arg!( + // deprecated in v3.0.0 + Arg::with_name("tpu_enable_udp") + .long("tpu-enable-udp") + .takes_value(false) + .help("Enable UDP for receiving/sending transactions."), + usage_warning: "UDP support will be dropped" + ); res } @@ -177,7 +241,8 @@ pub fn warn_for_deprecated_arguments(matches: &ArgMatches) { msg.push('.'); } } - warn!("{}", msg); + // this can not rely on logger since it is not initialized at the time of call + eprintln!("{msg}"); } } } @@ -194,9 +259,6 @@ pub struct DefaultArgs { pub send_transaction_service_config: send_transaction_service::Config, pub rpc_max_multiple_accounts: String, - pub rpc_pubsub_max_active_subscriptions: String, - pub rpc_pubsub_queue_capacity_items: String, - pub rpc_pubsub_queue_capacity_bytes: String, pub rpc_send_transaction_retry_ms: String, pub rpc_send_transaction_batch_ms: String, pub rpc_send_transaction_leader_forward_count: String, @@ -211,8 +273,6 @@ pub struct DefaultArgs { pub rpc_bigtable_app_profile_id: String, pub rpc_bigtable_max_message_size: String, pub rpc_max_request_body_size: String, - pub rpc_pubsub_worker_threads: String, - pub rpc_pubsub_notification_threads: String, pub maximum_local_snapshot_age: String, pub maximum_full_snapshot_archives_to_retain: String, @@ -269,15 +329,6 @@ impl DefaultArgs { health_check_slot_distance: DELINQUENT_VALIDATOR_SLOT_DISTANCE.to_string(), tower_storage: "file".to_string(), etcd_domain_name: "localhost".to_string(), - rpc_pubsub_max_active_subscriptions: PubSubConfig::default() - .max_active_subscriptions - .to_string(), - rpc_pubsub_queue_capacity_items: PubSubConfig::default() - .queue_capacity_items - .to_string(), - rpc_pubsub_queue_capacity_bytes: PubSubConfig::default() - .queue_capacity_bytes - .to_string(), send_transaction_service_config: send_transaction_service::Config::default(), rpc_send_transaction_retry_ms: default_send_transaction_service_config .retry_rate_ms @@ -306,8 +357,6 @@ impl DefaultArgs { .to_string(), rpc_bigtable_max_message_size: solana_storage_bigtable::DEFAULT_MAX_MESSAGE_SIZE .to_string(), - rpc_pubsub_worker_threads: "4".to_string(), - rpc_pubsub_notification_threads: get_thread_count().to_string(), maximum_full_snapshot_archives_to_retain: DEFAULT_MAX_FULL_SNAPSHOT_ARCHIVES_TO_RETAIN .to_string(), maximum_incremental_snapshot_archives_to_retain: @@ -415,8 +464,8 @@ pub fn test_app<'a>(version: &'a str, default_args: &'a DefaultTestArgs) -> App< .takes_value(true) .validator(is_url_or_moniker) .help( - "URL for Solana's JSON RPC or moniker (or their first letter): \ - [mainnet-beta, testnet, devnet, localhost]", + "URL for Solana's JSON RPC or moniker (or their first letter): [mainnet-beta, \ + testnet, devnet, localhost]", ), ) .arg( @@ -427,8 +476,8 @@ pub fn test_app<'a>(version: &'a str, default_args: &'a DefaultTestArgs) -> App< .takes_value(true) .help( "Address of the mint account that will receive tokens created at genesis. If \ - the ledger already exists then this parameter is silently ignored \ - [default: client keypair]", + the ledger already exists then this parameter is silently ignored [default: \ + client keypair]", ), ) .arg( @@ -659,15 +708,15 @@ pub fn test_app<'a>(version: &'a str, default_args: &'a DefaultTestArgs) -> App< .parse::() .map_err(|err| format!("error parsing '{value}': {err}")) .and_then(|rate| match rate.partial_cmp(&0.0) { - Some(Ordering::Greater) | Some(Ordering::Equal) => Ok(()), - Some(Ordering::Less) | None => Err(String::from("value must be >= 0")), + Some(Ordering::Greater) | Some(Ordering::Equal) => Ok(()), + Some(Ordering::Less) | None => Err(String::from("value must be >= 0")), }) }) .takes_value(true) .allow_hyphen_values(true) .help( - "Override default inflation with fixed rate. If the ledger already exists then \ - this parameter is silently ignored", + "Override default inflation with fixed rate. If the ledger already exists \ + then this parameter is silently ignored", ), ) .arg( @@ -677,15 +726,6 @@ pub fn test_app<'a>(version: &'a str, default_args: &'a DefaultTestArgs) -> App< .takes_value(true) .help("Gossip port number for the validator"), ) - .arg( - Arg::with_name("gossip_host") - .long("gossip-host") - .value_name("HOST") - .takes_value(true) - .validator(solana_net_utils::is_host) - .hidden(hidden_unless_forced()) - .help("DEPRECATED: Use --bind-address instead."), - ) .arg( Arg::with_name("dynamic_port_range") .long("dynamic-port-range") @@ -701,7 +741,18 @@ pub fn test_app<'a>(version: &'a str, default_args: &'a DefaultTestArgs) -> App< .takes_value(true) .validator(solana_net_utils::is_host) .default_value("127.0.0.1") - .help("IP address to bind the validator ports [default: 127.0.0.1]"), + .help( + "IP address to bind the validator ports. Can be repeated. The first \ + --bind-address MUST be your public internet address. ALL protocols (gossip, \ + repair, IP echo, TVU, TPU, etc.) bind to this address on startup. Additional \ + --bind-address values enable multihoming for Gossip/TVU/TPU - these \ + protocols bind to ALL interfaces on startup. Gossip reads/sends from one \ + interface at a time. TVU/TPU read from ALL interfaces simultaneously but \ + send from only one interface at a time. When switching interfaces via \ + AdminRPC: Gossip switches to send/receive from the new interface, while \ + TVU/TPU continue receiving from ALL interfaces but send from the new \ + interface only.", + ), ) .arg( Arg::with_name("clone_account") @@ -726,9 +777,9 @@ pub fn test_app<'a>(version: &'a str, default_args: &'a DefaultTestArgs) -> App< .multiple(true) .requires("json_rpc_url") .help( - "Copy an address lookup table and all accounts it references from the cluster referenced by the --url \ - argument in the genesis configuration. If the ledger already exists then this \ - parameter is silently ignored", + "Copy an address lookup table and all accounts it references from the cluster \ + referenced by the --url argument in the genesis configuration. If the ledger \ + already exists then this parameter is silently ignored", ), ) .arg( @@ -869,9 +920,9 @@ pub fn test_app<'a>(version: &'a str, default_args: &'a DefaultTestArgs) -> App< .takes_value(false) .requires("json_rpc_url") .help( - "Copy a feature set from the cluster referenced by the --url \ - argument in the genesis configuration. If the ledger \ - already exists then this parameter is silently ignored", + "Copy a feature set from the cluster referenced by the --url argument in the \ + genesis configuration. If the ledger already exists then this parameter is \ + silently ignored", ), ) } diff --git a/validator/src/cli/thread_args.rs b/validator/src/cli/thread_args.rs index b1fb47a9d69227..6b24eba60103f0 100644 --- a/validator/src/cli/thread_args.rs +++ b/validator/src/cli/thread_args.rs @@ -4,16 +4,17 @@ use { clap::{value_t_or_exit, Arg, ArgMatches}, solana_accounts_db::{accounts_db, accounts_index}, solana_clap_utils::{hidden_unless_forced, input_validators::is_within_range}, + solana_core::banking_stage::BankingStage, solana_rayon_threadlimit::get_thread_count, std::{num::NonZeroUsize, ops::RangeInclusive}, }; // Need this struct to provide &str whose lifetime matches that of the CLAP Arg's pub struct DefaultThreadArgs { - pub accounts_db_clean_threads: String, + pub accounts_db_background_threads: String, pub accounts_db_foreground_threads: String, - pub accounts_db_hash_threads: String, pub accounts_index_flush_threads: String, + pub block_production_num_workers: String, pub ip_echo_server_threads: String, pub rayon_global_threads: String, pub replay_forks_threads: String, @@ -31,12 +32,13 @@ pub struct DefaultThreadArgs { impl Default for DefaultThreadArgs { fn default() -> Self { Self { - accounts_db_clean_threads: AccountsDbCleanThreadsArg::bounded_default().to_string(), + accounts_db_background_threads: AccountsDbBackgroundThreadsArg::bounded_default() + .to_string(), accounts_db_foreground_threads: AccountsDbForegroundThreadsArg::bounded_default() .to_string(), - accounts_db_hash_threads: AccountsDbHashThreadsArg::bounded_default().to_string(), accounts_index_flush_threads: AccountsIndexFlushThreadsArg::bounded_default() .to_string(), + block_production_num_workers: BankingStage::default_num_workers().to_string(), ip_echo_server_threads: IpEchoServerThreadsArg::bounded_default().to_string(), rayon_global_threads: RayonGlobalThreadsArg::bounded_default().to_string(), replay_forks_threads: ReplayForksThreadsArg::bounded_default().to_string(), @@ -59,10 +61,10 @@ impl Default for DefaultThreadArgs { pub fn thread_args<'a>(defaults: &DefaultThreadArgs) -> Vec> { vec![ - new_thread_arg::(&defaults.accounts_db_clean_threads), + new_thread_arg::(&defaults.accounts_db_background_threads), new_thread_arg::(&defaults.accounts_db_foreground_threads), - new_thread_arg::(&defaults.accounts_db_hash_threads), new_thread_arg::(&defaults.accounts_index_flush_threads), + new_thread_arg::(&defaults.block_production_num_workers), new_thread_arg::(&defaults.ip_echo_server_threads), new_thread_arg::(&defaults.rayon_global_threads), new_thread_arg::(&defaults.replay_forks_threads), @@ -94,10 +96,10 @@ fn new_thread_arg<'a, T: ThreadArg>(default: &str) -> Arg<'_, 'a> { } pub struct NumThreadConfig { - pub accounts_db_clean_threads: NonZeroUsize, + pub accounts_db_background_threads: NonZeroUsize, pub accounts_db_foreground_threads: NonZeroUsize, - pub accounts_db_hash_threads: NonZeroUsize, pub accounts_index_flush_threads: NonZeroUsize, + pub block_production_num_workers: NonZeroUsize, pub ip_echo_server_threads: NonZeroUsize, pub rayon_global_threads: NonZeroUsize, pub replay_forks_threads: NonZeroUsize, @@ -111,25 +113,28 @@ pub struct NumThreadConfig { } pub fn parse_num_threads_args(matches: &ArgMatches) -> NumThreadConfig { + let accounts_db_background_threads = { + if matches.is_present("accounts_db_clean_threads") { + value_t_or_exit!(matches, "accounts_db_clean_threads", NonZeroUsize) + } else { + value_t_or_exit!(matches, AccountsDbBackgroundThreadsArg::NAME, NonZeroUsize) + } + }; NumThreadConfig { - accounts_db_clean_threads: value_t_or_exit!( - matches, - AccountsDbCleanThreadsArg::NAME, - NonZeroUsize - ), + accounts_db_background_threads, accounts_db_foreground_threads: value_t_or_exit!( matches, AccountsDbForegroundThreadsArg::NAME, NonZeroUsize ), - accounts_db_hash_threads: value_t_or_exit!( + accounts_index_flush_threads: value_t_or_exit!( matches, - AccountsDbHashThreadsArg::NAME, + AccountsIndexFlushThreadsArg::NAME, NonZeroUsize ), - accounts_index_flush_threads: value_t_or_exit!( + block_production_num_workers: value_t_or_exit!( matches, - AccountsIndexFlushThreadsArg::NAME, + BlockProductionNumWorkersArg::NAME, NonZeroUsize ), ip_echo_server_threads: value_t_or_exit!( @@ -205,11 +210,11 @@ pub trait ThreadArg { } } -struct AccountsDbCleanThreadsArg; -impl ThreadArg for AccountsDbCleanThreadsArg { - const NAME: &'static str = "accounts_db_clean_threads"; - const LONG_NAME: &'static str = "accounts-db-clean-threads"; - const HELP: &'static str = "Number of threads to use for cleaning AccountsDb"; +struct AccountsDbBackgroundThreadsArg; +impl ThreadArg for AccountsDbBackgroundThreadsArg { + const NAME: &'static str = "accounts_db_background_threads"; + const LONG_NAME: &'static str = "accounts-db-background-threads"; + const HELP: &'static str = "Number of threads to use for AccountsDb background tasks"; fn default() -> usize { accounts_db::quarter_thread_count() @@ -220,24 +225,14 @@ struct AccountsDbForegroundThreadsArg; impl ThreadArg for AccountsDbForegroundThreadsArg { const NAME: &'static str = "accounts_db_foreground_threads"; const LONG_NAME: &'static str = "accounts-db-foreground-threads"; - const HELP: &'static str = "Number of threads to use for AccountsDb block processing"; + const HELP: &'static str = + "Number of threads to use for AccountsDb foreground tasks, e.g. transaction processing"; fn default() -> usize { accounts_db::default_num_foreground_threads() } } -struct AccountsDbHashThreadsArg; -impl ThreadArg for AccountsDbHashThreadsArg { - const NAME: &'static str = "accounts_db_hash_threads"; - const LONG_NAME: &'static str = "accounts-db-hash-threads"; - const HELP: &'static str = "Number of threads to use for background accounts hashing"; - - fn default() -> usize { - accounts_db::default_num_hash_threads().get() - } -} - struct AccountsIndexFlushThreadsArg; impl ThreadArg for AccountsIndexFlushThreadsArg { const NAME: &'static str = "accounts_index_flush_threads"; @@ -249,6 +244,25 @@ impl ThreadArg for AccountsIndexFlushThreadsArg { } } +struct BlockProductionNumWorkersArg; +impl ThreadArg for BlockProductionNumWorkersArg { + const NAME: &'static str = "block_production_num_workers"; + const LONG_NAME: &'static str = "block-production-num-workers"; + const HELP: &'static str = "Number of worker threads to use for block production"; + + fn default() -> usize { + BankingStage::default_num_workers().get() + } + + fn min() -> usize { + 1 + } + + fn max() -> usize { + BankingStage::max_num_workers().get() + } +} + struct IpEchoServerThreadsArg; impl ThreadArg for IpEchoServerThreadsArg { const NAME: &'static str = "ip_echo_server_threads"; diff --git a/validator/src/commands/authorized_voter/mod.rs b/validator/src/commands/authorized_voter/mod.rs index bdbd67d58af9cb..83b13072dab3f2 100644 --- a/validator/src/commands/authorized_voter/mod.rs +++ b/validator/src/commands/authorized_voter/mod.rs @@ -42,11 +42,13 @@ pub fn command<'a>() -> App<'a, 'a> { .takes_value(true) .validator(is_keypair) .help( - "Path to keypair of the authorized voter to add [default: read JSON keypair from stdin]", + "Path to keypair of the authorized voter to add [default: read JSON \ + keypair from stdin]", ), ) .after_help( - "Note: the new authorized voter only applies to the currently running validator instance", + "Note: the new authorized voter only applies to the currently running \ + validator instance", ), ) .subcommand( diff --git a/validator/src/commands/exit/mod.rs b/validator/src/commands/exit/mod.rs index cdccfed4a7907c..3cc3817917b77f 100644 --- a/validator/src/commands/exit/mod.rs +++ b/validator/src/commands/exit/mod.rs @@ -6,7 +6,10 @@ use { commands::{monitor, wait_for_restart_window, Error, FromClapArgMatches, Result}, }, clap::{value_t_or_exit, App, Arg, ArgMatches, SubCommand}, - solana_clap_utils::input_validators::{is_parsable, is_valid_percentage}, + solana_clap_utils::{ + hidden_unless_forced, + input_validators::{is_parsable, is_valid_percentage}, + }, std::path::Path, }; @@ -37,12 +40,27 @@ impl FromClapArgMatches for ExitArgs { fn from_clap_arg_match(matches: &ArgMatches) -> Result { let post_exit_action = if matches.is_present("monitor") { Some(PostExitAction::Monitor) - } else if matches.is_present("wait_for_exit") { - Some(PostExitAction::Wait) - } else { + } else if matches.is_present("no_wait_for_exit") { None + } else { + Some(PostExitAction::Wait) }; + // Deprecated in v3.0.0 + if matches.is_present("wait_for_exit") { + eprintln!( + "WARN: The --wait-for-exit flag has been deprecated, waiting for exit is now the \ + default behavior" + ); + } + // Deprecated in v3.1.0 + if matches.is_present("monitor") { + eprintln!( + "WARN: The --monitor flag has been deprecated, use \"agave-validator monitor\" \ + instead" + ); + } + Ok(ExitArgs { force: matches.is_present("force"), post_exit_action, @@ -63,7 +81,8 @@ pub fn command<'a>() -> App<'a, 'a> { .long("force") .takes_value(false) .help( - "Request the validator exit immediately instead of waiting for a restart window", + "Request the validator exit immediately instead of waiting for a restart \ + window", ), ) .arg( @@ -71,14 +90,24 @@ pub fn command<'a>() -> App<'a, 'a> { .short("m") .long("monitor") .takes_value(false) + .requires("no_wait_for_exit") + .hidden(hidden_unless_forced()) .help("Monitor the validator after sending the exit request"), ) .arg( Arg::with_name("wait_for_exit") .long("wait-for-exit") .conflicts_with("monitor") + .hidden(hidden_unless_forced()) .help("Wait for the validator to terminate after sending the exit request"), ) + .arg( + Arg::with_name("no_wait_for_exit") + .long("no-wait-for-exit") + .takes_value(false) + .conflicts_with("wait_for_exit") + .help("Do not wait for the validator to terminate after sending the exit request"), + ) .arg( Arg::with_name("min_idle_time") .long("min-idle-time") @@ -86,9 +115,7 @@ pub fn command<'a>() -> App<'a, 'a> { .validator(is_parsable::) .value_name("MINUTES") .default_value(DEFAULT_MIN_IDLE_TIME) - .help( - "Minimum time that the validator should not be leader before restarting", - ), + .help("Minimum time that the validator should not be leader before restarting"), ) .arg( Arg::with_name("max_delinquent_stake") @@ -131,11 +158,9 @@ pub fn execute(matches: &ArgMatches, ledger_path: &Path) -> Result<()> { // Additionally, only check the pid() RPC call result if it will be used. // In an upgrade scenario, it is possible that a binary that calls pid() // will be initating exit against a process that doesn't support pid(). - // Since PostExitAction::Wait case is opt-in (via --wait-for-exit), the - // result is checked ONLY in that case to provide a friendlier upgrade - // path for users who are NOT using --wait-for-exit - const WAIT_FOR_EXIT_UNSUPPORTED_ERROR: &str = - "remote process exit cannot be waited on. `--wait-for-exit` is not supported by the remote process"; + const WAIT_FOR_EXIT_UNSUPPORTED_ERROR: &str = "remote process exit cannot be waited on. \ + `--wait-for-exit` is not supported by the \ + remote process"; let post_exit_action = exit_args.post_exit_action.clone(); let validator_pid = admin_rpc_service::runtime().block_on(async move { let admin_client = admin_rpc_service::connect(ledger_path).await?; @@ -232,7 +257,7 @@ mod tests { .parse() .expect("invalid DEFAULT_MAX_DELINQUENT_STAKE"), force: false, - post_exit_action: None, + post_exit_action: Some(PostExitAction::Wait), skip_new_snapshot_check: false, skip_health_check: false, } @@ -260,13 +285,22 @@ mod tests { fn verify_args_struct_by_command_exit_with_post_exit_action() { verify_args_struct_by_command( command(), - vec![COMMAND, "--monitor"], + vec![COMMAND, "--monitor", "--no-wait-for-exit"], ExitArgs { post_exit_action: Some(PostExitAction::Monitor), ..ExitArgs::default() }, ); + verify_args_struct_by_command( + command(), + vec![COMMAND, "--no-wait-for-exit"], + ExitArgs { + post_exit_action: None, + ..ExitArgs::default() + }, + ); + verify_args_struct_by_command( command(), vec![COMMAND, "--wait-for-exit"], diff --git a/validator/src/commands/manage_block_production/mod.rs b/validator/src/commands/manage_block_production/mod.rs new file mode 100644 index 00000000000000..be3a23898d2a05 --- /dev/null +++ b/validator/src/commands/manage_block_production/mod.rs @@ -0,0 +1,141 @@ +use { + crate::{ + admin_rpc_service, + commands::{FromClapArgMatches, Result}, + }, + clap::{value_t, App, Arg, ArgMatches, SubCommand}, + solana_core::{ + banking_stage::BankingStage, + validator::{BlockProductionMethod, TransactionStructure}, + }, + std::{num::NonZeroUsize, path::Path}, +}; + +const COMMAND: &str = "manage-block-production"; + +#[derive(Debug, PartialEq)] +pub struct ManageBlockProductionArgs { + pub block_production_method: BlockProductionMethod, + pub transaction_structure: TransactionStructure, + pub num_workers: NonZeroUsize, +} + +impl FromClapArgMatches for ManageBlockProductionArgs { + fn from_clap_arg_match(matches: &ArgMatches) -> Result { + Ok(ManageBlockProductionArgs { + block_production_method: value_t!( + matches, + "block_production_method", + BlockProductionMethod + ) + .unwrap_or_default(), + transaction_structure: value_t!(matches, "transaction_struct", TransactionStructure) + .unwrap_or_default(), + num_workers: value_t!(matches, "block_production_num_workers", NonZeroUsize) + .unwrap_or(BankingStage::default_num_workers()), + }) + } +} + +pub fn command<'a>() -> App<'a, 'a> { + SubCommand::with_name(COMMAND) + .about("Manage block production") + .arg( + Arg::with_name("block_production_method") + .long("block-production-method") + .alias("method") + .value_name("METHOD") + .takes_value(true) + .possible_values(BlockProductionMethod::cli_names()) + .default_value(BlockProductionMethod::default().into()) + .help(BlockProductionMethod::cli_message()), + ) + .arg( + Arg::with_name("transaction_struct") + .long("transaction-structure") + .alias("struct") + .value_name("STRUCT") + .takes_value(true) + .possible_values(TransactionStructure::cli_names()) + .default_value(TransactionStructure::default().into()) + .help(TransactionStructure::cli_message()), + ) + .arg( + Arg::with_name("block_production_num_workers") + .long("block-production-num-workers") + .alias("num-workers") + .value_name("NUM") + .takes_value(true) + .help("Number of worker threads to use for block production"), + ) +} + +pub fn execute(matches: &ArgMatches, ledger_path: &Path) -> Result<()> { + let manage_block_production_args = ManageBlockProductionArgs::from_clap_arg_match(matches)?; + + println!( + "Respawning block-production threads with method: {}, transaction structure: {} \ + num_workers: {}", + manage_block_production_args.block_production_method, + manage_block_production_args.transaction_structure, + manage_block_production_args.num_workers, + ); + let admin_client = admin_rpc_service::connect(ledger_path); + admin_rpc_service::runtime().block_on(async move { + admin_client + .await? + .manage_block_production( + manage_block_production_args.block_production_method, + manage_block_production_args.transaction_structure, + manage_block_production_args.num_workers, + ) + .await + })?; + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn verify_args_struct_by_command_manage_block_production_default() { + let app = command(); + let matches = app.get_matches_from(vec![COMMAND]); + let args = ManageBlockProductionArgs::from_clap_arg_match(&matches).unwrap(); + + assert_eq!( + args, + ManageBlockProductionArgs { + block_production_method: BlockProductionMethod::default(), + transaction_structure: TransactionStructure::default(), + num_workers: BankingStage::default_num_workers(), + } + ); + } + + #[test] + fn verify_args_struct_by_command_manage_block_production_with_args() { + let app = command(); + let matches = app.get_matches_from(vec![ + COMMAND, + "--block-production-method", + "central-scheduler", + "--transaction-structure", + "sdk", + "--block-production-num-workers", + "4", + ]); + let args = ManageBlockProductionArgs::from_clap_arg_match(&matches).unwrap(); + + assert_eq!( + args, + ManageBlockProductionArgs { + block_production_method: BlockProductionMethod::CentralScheduler, + transaction_structure: TransactionStructure::Sdk, + num_workers: NonZeroUsize::new(4).unwrap(), + } + ); + } +} diff --git a/validator/src/commands/mod.rs b/validator/src/commands/mod.rs index 6a2061b3dd79bb..60f2ef6165d562 100644 --- a/validator/src/commands/mod.rs +++ b/validator/src/commands/mod.rs @@ -1,6 +1,7 @@ pub mod authorized_voter; pub mod contact_info; pub mod exit; +pub mod manage_block_production; pub mod monitor; pub mod plugin; pub mod repair_shred_from_peer; diff --git a/validator/src/commands/repair_whitelist/mod.rs b/validator/src/commands/repair_whitelist/mod.rs index 6db54942f7fa05..c2562abb6ed9c8 100644 --- a/validator/src/commands/repair_whitelist/mod.rs +++ b/validator/src/commands/repair_whitelist/mod.rs @@ -73,14 +73,16 @@ pub fn command<'a>() -> App<'a, 'a> { .help("Set the validator's repair protocol whitelist"), ) .after_help( - "Note: repair protocol whitelist changes only apply to the currently running validator instance", + "Note: repair protocol whitelist changes only apply to the currently running \ + validator instance", ), ) .subcommand( SubCommand::with_name("remove-all") .about("Clear the validator's repair protocol whitelist") .after_help( - "Note: repair protocol whitelist changes only apply to the currently running validator instance", + "Note: repair protocol whitelist changes only apply to the currently running \ + validator instance", ), ) } diff --git a/validator/src/commands/run/args.rs b/validator/src/commands/run/args.rs index 3f96f9845e99dd..da9994777a7a25 100644 --- a/validator/src/commands/run/args.rs +++ b/validator/src/commands/run/args.rs @@ -5,6 +5,7 @@ use { commands::{FromClapArgMatches, Result}, }, clap::{values_t, App, Arg, ArgMatches}, + solana_accounts_db::utils::create_and_canonicalize_directory, solana_clap_utils::{ hidden_unless_forced, input_parsers::keypair_of, @@ -23,31 +24,60 @@ use { solana_keypair::Keypair, solana_ledger::{blockstore_options::BlockstoreOptions, use_snapshot_archives_at_startup}, solana_pubkey::Pubkey, + solana_rpc::{rpc::JsonRpcConfig, rpc_pubsub_service::PubSubConfig}, solana_runtime::snapshot_utils::{SnapshotVersion, SUPPORTED_ARCHIVE_COMPRESSION}, solana_send_transaction_service::send_transaction_service::{ - MAX_BATCH_SEND_RATE_MS, MAX_TRANSACTION_BATCH_SIZE, + Config as SendTransactionServiceConfig, MAX_BATCH_SEND_RATE_MS, MAX_TRANSACTION_BATCH_SIZE, }, solana_signer::Signer, solana_streamer::socket::SocketAddrSpace, solana_unified_scheduler_pool::DefaultSchedulerPool, - std::{collections::HashSet, net::SocketAddr, str::FromStr}, + std::{collections::HashSet, net::SocketAddr, path::PathBuf, str::FromStr}, }; const EXCLUDE_KEY: &str = "account-index-exclude-key"; const INCLUDE_KEY: &str = "account-index-include-key"; +// Declared out of line to allow use of #[rustfmt::skip] +#[rustfmt::skip] +const WEN_RESTART_HELP: &str = + "Only used during coordinated cluster restarts.\n\n\ + Need to also specify the leader's pubkey in --wen-restart-leader.\n\n\ + When specified, the validator will enter Wen Restart mode which pauses normal activity. \ + Validators in this mode will gossip their last vote to reach consensus on a safe restart \ + slot and repair all blocks on the selected fork. The safe slot will be a descendant of the \ + latest optimistically confirmed slot to ensure we do not roll back any optimistically \ + confirmed slots.\n\n\ + The progress in this mode will be saved in the file location provided. If consensus is \ + reached, the validator will automatically exit with 200 status code. Then the operators are \ + expected to restart the validator with --wait_for_supermajority and other arguments \ + (including new shred_version, supermajority slot, and bankhash) given in the error log \ + before the exit so the cluster will resume execution. The progress file will be kept around \ + for future debugging.\n\n\ + If wen_restart fails, refer to the progress file (in proto3 format) for further debugging and \ + watch the discord channel for instructions."; + +pub mod account_secondary_indexes; pub mod blockstore_options; +pub mod json_rpc_config; +pub mod pub_sub_config; +pub mod rpc_bigtable_config; pub mod rpc_bootstrap_config; +pub mod send_transaction_config; #[derive(Debug, PartialEq)] pub struct RunArgs { pub identity_keypair: Keypair, + pub ledger_path: PathBuf, pub logfile: String, pub entrypoints: Vec, pub known_validators: Option>, pub socket_addr_space: SocketAddrSpace, pub rpc_bootstrap_config: RpcBootstrapConfig, pub blockstore_options: BlockstoreOptions, + pub json_rpc_config: JsonRpcConfig, + pub pub_sub_config: PubSubConfig, + pub send_transaction_service_config: SendTransactionServiceConfig, } impl FromClapArgMatches for RunArgs { @@ -58,6 +88,21 @@ impl FromClapArgMatches for RunArgs { clap::ErrorKind::ArgumentNotFound, ))?; + let ledger_path = PathBuf::from(matches.value_of("ledger_path").ok_or( + clap::Error::with_description( + "The --ledger argument is required", + clap::ErrorKind::ArgumentNotFound, + ), + )?); + // Canonicalize ledger path to avoid issues with symlink creation + let ledger_path = + create_and_canonicalize_directory(ledger_path.as_path()).map_err(|err| { + crate::commands::Error::Dynamic(Box::::from(format!( + "failed to create and canonicalize ledger path '{}': {err}", + ledger_path.display(), + ))) + })?; + let logfile = matches .value_of("logfile") .map(|s| s.into()) @@ -89,19 +134,24 @@ impl FromClapArgMatches for RunArgs { Ok(RunArgs { identity_keypair, + ledger_path, logfile, entrypoints, known_validators, socket_addr_space, rpc_bootstrap_config: RpcBootstrapConfig::from_clap_arg_match(matches)?, blockstore_options: BlockstoreOptions::from_clap_arg_match(matches)?, + json_rpc_config: JsonRpcConfig::from_clap_arg_match(matches)?, + pub_sub_config: PubSubConfig::from_clap_arg_match(matches)?, + send_transaction_service_config: SendTransactionServiceConfig::from_clap_arg_match( + matches, + )?, }) } } pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, 'a> { - app - .arg( + app.arg( Arg::with_name(SKIP_SEED_PHRASE_VALIDATION_ARG.name) .long(SKIP_SEED_PHRASE_VALIDATION_ARG.long) .help(SKIP_SEED_PHRASE_VALIDATION_ARG.help), @@ -124,8 +174,8 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .requires("vote_account") .multiple(true) .help( - "Include an additional authorized voter keypair. May be specified multiple \ - times. [default: the --identity keypair]", + "Include an additional authorized voter keypair. May be specified multiple times. \ + [default: the --identity keypair]", ), ) .arg( @@ -136,9 +186,9 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .validator(is_pubkey_or_keypair) .requires("identity") .help( - "Validator vote account public key. If unspecified, voting will be disabled. \ - The authorized voter for the account must either be the --identity keypair \ - or set by the --authorized-voter argument", + "Validator vote account public key. If unspecified, voting will be disabled. The \ + authorized voter for the account must either be the --identity keypair or set by \ + the --authorized-voter argument", ), ) .arg( @@ -147,8 +197,8 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .value_name("FILE") .takes_value(true) .help( - "Create this file if it doesn't already exist once validator initialization \ - is complete", + "Create this file if it doesn't already exist once validator initialization is \ + complete", ), ) .arg( @@ -176,8 +226,8 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .long("no-snapshot-fetch") .takes_value(false) .help( - "Do not attempt to fetch a snapshot from the cluster, start from a local \ - snapshot if present", + "Do not attempt to fetch a snapshot from the cluster, start from a local snapshot \ + if present", ), ) .arg( @@ -209,10 +259,9 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .long("restricted-repair-only-mode") .takes_value(false) .help( - "Do not publish the Gossip, TPU, TVU or Repair Service ports. Doing so causes \ - the node to operate in a limited capacity that reduces its exposure to the \ - rest of the cluster. The --no-voting flag is implicit when this flag is \ - enabled", + "Do not publish the Gossip, TPU, TVU or Repair Service ports. Doing so causes the \ + node to operate in a limited capacity that reduces its exposure to the rest of \ + the cluster. The --no-voting flag is implicit when this flag is enabled", ), ) .arg( @@ -293,8 +342,8 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .takes_value(true) .default_value(&default_args.rpc_max_multiple_accounts) .help( - "Override the default maximum accounts accepted by the getMultipleAccounts \ - JSON RPC method", + "Override the default maximum accounts accepted by the getMultipleAccounts JSON \ + RPC method", ), ) .arg( @@ -304,18 +353,16 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .takes_value(true) .default_value(&default_args.health_check_slot_distance) .help( - "Report this validator as healthy if its latest replayed optimistically \ - confirmed slot is within the specified number of slots from the cluster's \ - latest optimistically confirmed slot", + "Report this validator as healthy if its latest replayed optimistically confirmed \ + slot is within the specified number of slots from the cluster's latest \ + optimistically confirmed slot", ), ) .arg( Arg::with_name("skip_preflight_health_check") .long("skip-preflight-health-check") .takes_value(false) - .help( - "Skip health check when running a preflight check", - ), + .help("Skip health check when running a preflight check"), ) .arg( Arg::with_name("rpc_faucet_addr") @@ -332,9 +379,8 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .takes_value(true) .multiple(true) .help( - "Comma separated persistent accounts location. \ - May be specified multiple times. \ - [default: /accounts]", + "Comma separated persistent accounts location. May be specified multiple times. \ + [default: /accounts]", ), ) .arg( @@ -352,14 +398,12 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .takes_value(true) .help("Use DIR as the base location for snapshots.") .long_help( - "Use DIR as the base location for snapshots. \ - Snapshot archives will use DIR unless --full-snapshot-archive-path or \ - --incremental-snapshot-archive-path is specified. \ - Additionally, a subdirectory named \"snapshots\" will be created in DIR. \ - This subdirectory holds internal files/data that are used when generating \ - snapshot archives. \ - [default: --ledger value]", - ), + "Use DIR as the base location for snapshots. Snapshot archives will use DIR \ + unless --full-snapshot-archive-path or --incremental-snapshot-archive-path is \ + specified. Additionally, a subdirectory named \"snapshots\" will be created in \ + DIR. This subdirectory holds internal files/data that are used when generating \ + snapshot archives. [default: --ledger value]", + ), ) .arg( Arg::with_name(use_snapshot_archives_at_startup::cli::NAME) @@ -375,10 +419,7 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .long("full-snapshot-archive-path") .value_name("DIR") .takes_value(true) - .help( - "Use DIR as full snapshot archives location \ - [default: --snapshots value]", - ), + .help("Use DIR as full snapshot archives location [default: --snapshots value]"), ) .arg( Arg::with_name("incremental_snapshot_archive_path") @@ -386,10 +427,7 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .conflicts_with("no-incremental-snapshots") .value_name("DIR") .takes_value(true) - .help( - "Use DIR as incremental snapshot archives location \ - [default: --snapshots value]", - ), + .help("Use DIR as incremental snapshot archives location [default: --snapshots value]"), ) .arg( Arg::with_name("tower") @@ -405,15 +443,6 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .takes_value(true) .help("Gossip port number for the validator"), ) - .arg( - Arg::with_name("gossip_host") - .long("gossip-host") - .value_name("HOST") - .takes_value(true) - .validator(solana_net_utils::is_host) - .hidden(hidden_unless_forced()) - .help("DEPRECATED: Use --bind-address instead."), - ) .arg( Arg::with_name("public_tpu_addr") .long("public-tpu-address") @@ -422,8 +451,8 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .takes_value(true) .validator(solana_net_utils::is_host_port) .help( - "Specify TPU address to advertise in gossip \ - [default: ask --entrypoint or localhost when --entrypoint is not provided]", + "Specify TPU address to advertise in gossip [default: ask --entrypoint or \ + localhost when --entrypoint is not provided]", ), ) .arg( @@ -433,8 +462,8 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .takes_value(true) .validator(solana_net_utils::is_host_port) .help( - "Specify TPU Forwards address to advertise in gossip [default: ask \ - --entrypoint or localhostwhen --entrypoint is not provided]", + "Specify TPU Forwards address to advertise in gossip [default: ask --entrypoint \ + or localhostwhen --entrypoint is not provided]", ), ) .arg( @@ -444,7 +473,10 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .takes_value(true) .hidden(hidden_unless_forced()) .validator(solana_net_utils::is_host_port) - .help("TPU Vortexor Receiver address to which verified transaction packet will be forwarded."), + .help( + "TPU Vortexor Receiver address to which verified transaction packet will be \ + forwarded.", + ), ) .arg( Arg::with_name("public_rpc_addr") @@ -483,14 +515,18 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, Arg::with_name("no_snapshots") .long("no-snapshots") .takes_value(false) - .conflicts_with_all(&["no_incremental_snapshots", "snapshot_interval_slots", "full_snapshot_interval_slots"]) - .help("Disable all snapshot generation") + .conflicts_with_all(&[ + "no_incremental_snapshots", + "snapshot_interval_slots", + "full_snapshot_interval_slots", + ]) + .help("Disable all snapshot generation"), ) .arg( Arg::with_name("no_incremental_snapshots") .long("no-incremental-snapshots") .takes_value(false) - .help("Disable incremental snapshots") + .help("Disable incremental snapshots"), ) .arg( Arg::with_name("snapshot_interval_slots") @@ -502,10 +538,9 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .validator(is_non_zero) .help("Number of slots between generating snapshots") .long_help( - "Number of slots between generating snapshots. \ - If incremental snapshots are enabled, this sets the incremental snapshot interval. \ - If incremental snapshots are disabled, this sets the full snapshot interval. \ - Must be greater than zero.", + "Number of slots between generating snapshots. If incremental snapshots are \ + enabled, this sets the incremental snapshot interval. If incremental snapshots \ + are disabled, this sets the full snapshot interval. Must be greater than zero.", ), ) .arg( @@ -517,9 +552,8 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .validator(is_non_zero) .help("Number of slots between generating full snapshots") .long_help( - "Number of slots between generating full snapshots. \ - Only used when incremental snapshots are enabled. \ - Must be greater than the incremental snapshot interval. \ + "Number of slots between generating full snapshots. Only used when incremental \ + snapshots are enabled. Must be greater than the incremental snapshot interval. \ Must be greater than zero.", ), ) @@ -532,8 +566,8 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .default_value(&default_args.maximum_full_snapshot_archives_to_retain) .validator(validate_maximum_full_snapshot_archives_to_retain) .help( - "The maximum number of full snapshot archives to hold on to when purging \ - older snapshots.", + "The maximum number of full snapshot archives to hold on to when purging older \ + snapshots.", ), ) .arg( @@ -544,8 +578,8 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .default_value(&default_args.maximum_incremental_snapshot_archives_to_retain) .validator(validate_maximum_incremental_snapshot_archives_to_retain) .help( - "The maximum number of incremental snapshot archives to hold on to when \ - purging older snapshots.", + "The maximum number of incremental snapshot archives to hold on to when purging \ + older snapshots.", ), ) .arg( @@ -556,8 +590,8 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .validator(solana_perf::thread::is_niceness_adjustment_valid) .default_value(&default_args.snapshot_packager_niceness_adjustment) .help( - "Add this value to niceness of snapshot packager thread. Negative value \ - increases priority, positive value decreases priority.", + "Add this value to niceness of snapshot packager thread. Negative value increases \ + priority, positive value decreases priority.", ), ) .arg( @@ -567,9 +601,9 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .takes_value(true) .default_value(&default_args.min_snapshot_download_speed) .help( - "The minimal speed of snapshot downloads measured in bytes/second. If the \ - initial download speed falls below this threshold, the system will retry the \ - download against a different rpc node.", + "The minimal speed of snapshot downloads measured in bytes/second. If the initial \ + download speed falls below this threshold, the system will retry the download \ + against a different rpc node.", ), ) .arg( @@ -579,8 +613,8 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .takes_value(true) .default_value(&default_args.max_snapshot_download_abort) .help( - "The maximum number of times to abort and retry when encountering a slow \ - snapshot download.", + "The maximum number of times to abort and retry when encountering a slow snapshot \ + download.", ), ) .arg( @@ -654,9 +688,8 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .possible_values(&["level"]) .default_value(&default_args.rocksdb_shred_compaction) .help( - "Controls how RocksDB compacts shreds. *WARNING*: You will lose your \ - Blockstore data when you switch between options. Possible values are: \ - 'level': stores shreds using RocksDB's default (level) compaction.", + "Controls how RocksDB compacts shreds. *WARNING*: You will lose your Blockstore \ + data when you switch between options.", ), ) .arg( @@ -681,8 +714,8 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .validator(is_parsable::) .default_value(&default_args.rocksdb_perf_sample_interval) .help( - "Controls how often RocksDB read/write performance samples are collected. \ - Perf samples are collected in 1 / ROCKS_PERF_SAMPLE_INTERVAL sampling rate.", + "Controls how often RocksDB read/write performance samples are collected. Perf \ + samples are collected in 1 / ROCKS_PERF_SAMPLE_INTERVAL sampling rate.", ), ) .arg( @@ -756,8 +789,8 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .long("no-wait-for-vote-to-start-leader") .help( "If the validator starts up with no ledger, it will wait to start block \ - production until it sees a vote land in a rooted slot. This prevents \ - double signing. Turn off to risk double signing a block.", + production until it sees a vote land in a rooted slot. This prevents double \ + signing. Turn off to risk double signing a block.", ), ) .arg( @@ -778,9 +811,9 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .multiple(true) .takes_value(true) .help( - "A snapshot hash must be published in gossip by this validator to be \ - accepted. May be specified multiple times. If unspecified any snapshot hash \ - will be accepted", + "A snapshot hash must be published in gossip by this validator to be accepted. \ + May be specified multiple times. If unspecified any snapshot hash will be \ + accepted", ), ) .arg( @@ -821,9 +854,9 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .multiple(true) .takes_value(true) .help( - "A list of validators to prioritize repairs from. If specified, repair \ - requests from validators in the list will be prioritized over requests from \ - other validators. [default: all validators]", + "A list of validators to prioritize repairs from. If specified, repair requests \ + from validators in the list will be prioritized over requests from other \ + validators. [default: all validators]", ), ) .arg( @@ -834,8 +867,8 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .multiple(true) .takes_value(true) .help( - "A list of validators to gossip with. If specified, gossip will not \ - push/pull from from validators outside this set. [default: all validators]", + "A list of validators to gossip with. If specified, gossip will not push/pull \ + from from validators outside this set. [default: all validators]", ), ) .arg( @@ -846,20 +879,6 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .validator(is_parsable::) .help("Milliseconds to wait in the TPU receiver for packet coalescing."), ) - .arg( - Arg::with_name("tpu_disable_quic") - .long("tpu-disable-quic") - .takes_value(false) - .hidden(hidden_unless_forced()) - .help("DEPRECATED (UDP support will be dropped): Do not use QUIC to send transactions."), - ) - .arg( - Arg::with_name("tpu_enable_udp") - .long("tpu-enable-udp") - .takes_value(false) - .hidden(hidden_unless_forced()) - .help("DEPRECATED (UDP support will be dropped): Enable UDP for receiving/sending transactions."), - ) .arg( Arg::with_name("tpu_connection_pool_size") .long("tpu-connection-pool-size") @@ -946,9 +965,11 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .default_value(&default_args.num_quic_endpoints) .validator(is_parsable::) .hidden(hidden_unless_forced()) - .help("The number of QUIC endpoints used for TPU and TPU-Forward. It can be increased to \ - increase network ingest throughput, at the expense of higher CPU and general \ - validator load."), + .help( + "The number of QUIC endpoints used for TPU and TPU-Forward. It can be increased \ + to increase network ingest throughput, at the expense of higher CPU and general \ + validator load.", + ), ) .arg( Arg::with_name("staked_nodes_overrides") @@ -957,10 +978,10 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .takes_value(true) .help( "Provide path to a yaml file with custom overrides for stakes of specific \ - identities. Overriding the amount of stake this validator considers as valid \ - for other peers in network. The stake amount is used for calculating the \ - number of QUIC streams permitted from the peer and vote packet sender stage. \ - Format of the file: `staked_map_id: {: }", + identities. Overriding the amount of stake this validator considers as valid for \ + other peers in network. The stake amount is used for calculating the number of \ + QUIC streams permitted from the peer and vote packet sender stage. Format of the \ + file: `staked_map_id: {: }", ), ) .arg( @@ -971,8 +992,11 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .validator(solana_net_utils::is_host) .default_value(&default_args.bind_address) .multiple(true) - .help("Repeatable. IP addresses to bind the validator ports on. First is primary (used on startup), the rest may be switched to during operation."), - ) + .help( + "Repeatable. IP addresses to bind the validator ports on. First is primary (used \ + on startup), the rest may be switched to during operation.", + ), + ) .arg( Arg::with_name("rpc_bind_address") .long("rpc-bind-address") @@ -980,8 +1004,8 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .takes_value(true) .validator(solana_net_utils::is_host) .help( - "IP address to bind the RPC port [default: 127.0.0.1 if --private-rpc is \ - present, otherwise use --bind-address]", + "IP address to bind the RPC port [default: 127.0.0.1 if --private-rpc is present, \ + otherwise use --bind-address]", ), ) .arg( @@ -1012,7 +1036,10 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, }) .takes_value(true) .default_value(&default_args.rpc_blocking_threads) - .help("Number of blocking threads to use for servicing CPU bound RPC requests (eg getMultipleAccounts)"), + .help( + "Number of blocking threads to use for servicing CPU bound RPC requests (eg \ + getMultipleAccounts)", + ), ) .arg( Arg::with_name("rpc_niceness_adj") @@ -1022,8 +1049,8 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .validator(solana_perf::thread::is_niceness_adjustment_valid) .default_value(&default_args.rpc_niceness_adjustment) .help( - "Add this value to niceness of RPC threads. Negative value increases \ - priority, positive value decreases priority.", + "Add this value to niceness of RPC threads. Negative value increases priority, \ + positive value decreases priority.", ), ) .arg( @@ -1060,81 +1087,6 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .default_value(&default_args.rpc_bigtable_max_message_size) .help("Max encoding and decoding message size used in Bigtable Grpc client"), ) - .arg( - Arg::with_name("rpc_pubsub_worker_threads") - .long("rpc-pubsub-worker-threads") - .takes_value(true) - .value_name("NUMBER") - .validator(is_parsable::) - .default_value(&default_args.rpc_pubsub_worker_threads) - .help("PubSub worker threads"), - ) - .arg( - Arg::with_name("rpc_pubsub_enable_block_subscription") - .long("rpc-pubsub-enable-block-subscription") - .requires("enable_rpc_transaction_history") - .takes_value(false) - .help("Enable the unstable RPC PubSub `blockSubscribe` subscription"), - ) - .arg( - Arg::with_name("rpc_pubsub_enable_vote_subscription") - .long("rpc-pubsub-enable-vote-subscription") - .takes_value(false) - .help("Enable the unstable RPC PubSub `voteSubscribe` subscription"), - ) - .arg( - Arg::with_name("rpc_pubsub_max_active_subscriptions") - .long("rpc-pubsub-max-active-subscriptions") - .takes_value(true) - .value_name("NUMBER") - .validator(is_parsable::) - .default_value(&default_args.rpc_pubsub_max_active_subscriptions) - .help( - "The maximum number of active subscriptions that RPC PubSub will accept \ - across all connections.", - ), - ) - .arg( - Arg::with_name("rpc_pubsub_queue_capacity_items") - .long("rpc-pubsub-queue-capacity-items") - .takes_value(true) - .value_name("NUMBER") - .validator(is_parsable::) - .default_value(&default_args.rpc_pubsub_queue_capacity_items) - .help( - "The maximum number of notifications that RPC PubSub will store across all \ - connections.", - ), - ) - .arg( - Arg::with_name("rpc_pubsub_queue_capacity_bytes") - .long("rpc-pubsub-queue-capacity-bytes") - .takes_value(true) - .value_name("BYTES") - .validator(is_parsable::) - .default_value(&default_args.rpc_pubsub_queue_capacity_bytes) - .help( - "The maximum total size of notifications that RPC PubSub will store across \ - all connections.", - ), - ) - .arg( - Arg::with_name("rpc_pubsub_notification_threads") - .long("rpc-pubsub-notification-threads") - .requires("full_rpc_api") - .takes_value(true) - .value_name("NUM_THREADS") - .validator(is_parsable::) - .default_value_if( - "full_rpc_api", - None, - &default_args.rpc_pubsub_notification_threads, - ) - .help( - "The maximum number of threads that RPC PubSub will use for generating \ - notifications. 0 will disable RPC PubSub notifications", - ), - ) .arg( Arg::with_name("rpc_send_transaction_retry_ms") .long("rpc-send-retry-ms") @@ -1216,13 +1168,15 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .multiple(true) .value_name("HOST:PORT") .validator(solana_net_utils::is_host_port) - .help("Peer(s) to broadcast transactions to instead of the current leader") + .help("Peer(s) to broadcast transactions to instead of the current leader"), ) .arg( Arg::with_name("rpc_send_transaction_also_leader") .long("rpc-send-transaction-also-leader") .requires("rpc_send_transaction_tpu_peer") - .help("With `--rpc-send-transaction-tpu-peer HOST:PORT`, also send to the current leader") + .help( + "With `--rpc-send-transaction-tpu-peer HOST:PORT`, also send to the current leader", + ), ) .arg( Arg::with_name("rpc_scan_and_fix_roots") @@ -1274,10 +1228,9 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .takes_value(true) .help("The compression level to use when archiving with zstd") .long_help( - "The compression level to use when archiving with zstd. \ - Higher compression levels generally produce higher \ - compression ratio at the expense of speed and memory. \ - See the zstd manpage for more information." + "The compression level to use when archiving with zstd. Higher compression levels \ + generally produce higher compression ratio at the expense of speed and memory. \ + See the zstd manpage for more information.", ), ) .arg( @@ -1360,16 +1313,16 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .multiple(true) .value_name("KEY") .help( - "When account indexes are enabled, only include specific keys in the index. \ - This overrides --account-index-exclude-key.", + "When account indexes are enabled, only include specific keys in the index. This \ + overrides --account-index-exclude-key.", ), ) .arg( Arg::with_name("accounts_db_verify_refcounts") .long("accounts-db-verify-refcounts") .help( - "Debug option to scan all append vecs and verify account index refcounts \ - prior to clean", + "Debug option to scan all append vecs and verify account index refcounts prior to \ + clean", ) .hidden(hidden_unless_forced()), ) @@ -1380,12 +1333,13 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .possible_values(&["all", "only-abnormal", "only-abnormal-with-verify"]) .help( "Debug option to use different type of filtering for accounts index scan in \ - shrinking. \"all\" will scan both in-memory and on-disk accounts index, which is the default. \ - \"only-abnormal\" will scan in-memory accounts index only for abnormal entries and \ - skip scanning on-disk accounts index by assuming that on-disk accounts index contains \ - only normal accounts index entry. \"only-abnormal-with-verify\" is similar to \ - \"only-abnormal\", which will scan in-memory index for abnormal entries, but will also \ - verify that on-disk account entries are indeed normal.", + shrinking. \"all\" will scan both in-memory and on-disk accounts index, which is \ + the default. \"only-abnormal\" will scan in-memory accounts index only for \ + abnormal entries and skip scanning on-disk accounts index by assuming that \ + on-disk accounts index contains only normal accounts index entry. \ + \"only-abnormal-with-verify\" is similar to \"only-abnormal\", which will scan \ + in-memory index for abnormal entries, but will also verify that on-disk account \ + entries are indeed normal.", ) .hidden(hidden_unless_forced()), ) @@ -1401,7 +1355,7 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .value_name("METHOD") .takes_value(true) .possible_values(&["mmap", "file"]) - .help("Access account storages using this method") + .help("Access account storages using this method"), ) .arg( Arg::with_name("accounts_db_ancient_append_vecs") @@ -1440,26 +1394,36 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .validator(is_parsable::) .takes_value(true) .help( - "How large the write cache for account data can become. If this is exceeded, \ - the cache is flushed more aggressively.", + "How large the write cache for account data can become. If this is exceeded, the \ + cache is flushed more aggressively.", ), ) .arg( - Arg::with_name("accounts_db_read_cache_limit_mb") - .long("accounts-db-read-cache-limit-mb") - .value_name("MAX | LOW,HIGH") + Arg::with_name("accounts_db_read_cache_limit") + .long("accounts-db-read-cache-limit") + .value_name("LOW,HIGH") .takes_value(true) - .min_values(1) + .min_values(2) .max_values(2) .multiple(false) .require_delimiter(true) - .help("How large the read cache for account data can become, in mebibytes") + .help("How large the read cache for account data can become, in bytes") .long_help( - "How large the read cache for account data can become, in mebibytes. \ - If given a single value, it will be the maximum size for the cache. \ - If given a pair of values, they will be the low and high watermarks \ - for the cache. When the cache exceeds the high watermark, entries will \ - be evicted until the size reaches the low watermark." + "How large the read cache for account data can become, in bytes. The values will \ + be the low and high watermarks for the cache. When the cache exceeds the high \ + watermark, entries will be evicted until the size reaches the low watermark.", + ) + .hidden(hidden_unless_forced()), + ) + .arg( + Arg::with_name("accounts_db_mark_obsolete_accounts") + .long("accounts-db-mark-obsolete-accounts") + .help("Enables experimental obsolete account tracking") + .long_help( + "Enables experimental obsolete account tracking. This feature tracks obsolete \ + accounts in the account storage entry allowing for earlier cleaning of obsolete \ + accounts in the storages and index. At this time this feature is not compatible \ + with booting from local snapshot state and must unpack from archives.", ) .hidden(hidden_unless_forced()), ) @@ -1470,8 +1434,8 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .validator(is_parsable::) .takes_value(true) .help( - "How large accumulated results from an accounts index scan can become. If \ - this is exceeded, the scan aborts.", + "How large accumulated results from an accounts index scan can become. If this is \ + exceeded, the scan aborts.", ), ) .arg( @@ -1489,9 +1453,8 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .takes_value(true) .multiple(true) .help( - "Persistent accounts-index location. \ - May be specified multiple times. \ - [default: /accounts_index]", + "Persistent accounts-index location. May be specified multiple times. [default: \ + /accounts_index]", ), ) .arg( @@ -1501,10 +1464,9 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .value_name("BOOLEAN") .default_value(&default_args.accounts_shrink_optimize_total_space) .help( - "When this is set to true, the system will shrink the most sparse accounts \ - and when the overall shrink ratio is above the specified \ - accounts-shrink-ratio, the shrink will stop and it will skip all other less \ - sparse accounts.", + "When this is set to true, the system will shrink the most sparse accounts and \ + when the overall shrink ratio is above the specified accounts-shrink-ratio, the \ + shrink will stop and it will skip all other less sparse accounts.", ), ) .arg( @@ -1514,10 +1476,10 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .value_name("RATIO") .default_value(&default_args.accounts_shrink_ratio) .help( - "Specifies the shrink ratio for the accounts to be shrunk. The shrink ratio \ - is defined as the ratio of the bytes alive over the total bytes used. If \ - the account's shrink ratio is less than this ratio it becomes a candidate \ - for shrinking. The value must between 0. and 1.0 inclusive.", + "Specifies the shrink ratio for the accounts to be shrunk. The shrink ratio is \ + defined as the ratio of the bytes alive over the total bytes used. If the \ + account's shrink ratio is less than this ratio it becomes a candidate for \ + shrinking. The value must between 0. and 1.0 inclusive.", ), ) .arg( @@ -1551,9 +1513,8 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .default_value(&default_args.banking_trace_dir_byte_limit) .help( "Enables the banking trace explicitly, which is enabled by default and writes \ - trace files for simulate-leader-blocks, retaining up to the default or \ - specified total bytes in the ledger. This flag can be used to override its \ - byte limit.", + trace files for simulate-leader-blocks, retaining up to the default or specified \ + total bytes in the ledger. This flag can be used to override its byte limit.", ), ) .arg( @@ -1570,11 +1531,11 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .takes_value(false) .help( "Delay leader block creation while replaying a block which descends from the \ - current fork and has a lower slot than our next leader slot. If we don't \ - delay here, our new leader block will be on a different fork from the \ - block we are replaying and there is a high chance that the cluster will \ - confirm that block's fork rather than our leader block's fork because it \ - was created before we started creating ours.", + current fork and has a lower slot than our next leader slot. If we don't delay \ + here, our new leader block will be on a different fork from the block we are \ + replaying and there is a high chance that the cluster will confirm that block's \ + fork rather than our leader block's fork because it was created before we \ + started creating ours.", ), ) .arg( @@ -1621,29 +1582,7 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .required(false) .conflicts_with("wait_for_supermajority") .requires("wen_restart_coordinator") - .help( - "Only used during coordinated cluster restarts.\ - \n\n\ - Need to also specify the leader's pubkey in --wen-restart-leader.\ - \n\n\ - When specified, the validator will enter Wen Restart mode which \ - pauses normal activity. Validators in this mode will gossip their last \ - vote to reach consensus on a safe restart slot and repair all blocks \ - on the selected fork. The safe slot will be a descendant of the latest \ - optimistically confirmed slot to ensure we do not roll back any \ - optimistically confirmed slots. \ - \n\n\ - The progress in this mode will be saved in the file location provided. \ - If consensus is reached, the validator will automatically exit with 200 \ - status code. Then the operators are expected to restart the validator \ - with --wait_for_supermajority and other arguments (including new shred_version, \ - supermajority slot, and bankhash) given in the error log before the exit so \ - the cluster will resume execution. The progress file will be kept around \ - for future debugging. \ - \n\n\ - If wen_restart fails, refer to the progress file (in proto3 format) for \ - further debugging and watch the discord channel for instructions.", - ), + .help(WEN_RESTART_HELP), ) .arg( Arg::with_name("wen_restart_coordinator") @@ -1654,8 +1593,8 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .required(false) .requires("wen_restart") .help( - "Specifies the pubkey of the leader used in wen restart. \ - May get stuck if the leader used is different from others.", + "Specifies the pubkey of the leader used in wen restart. May get stuck if the \ + leader used is different from others.", ), ) .arg( @@ -1691,10 +1630,11 @@ pub fn add_args<'a>(app: App<'a, 'a>, default_args: &'a DefaultArgs) -> App<'a, .long("use-connection-cache") .takes_value(false) .help( - "Use connection-cache crate to send transactions over TPU ports. If not set,\ - tpu-client-next is used by default.", + "Use connection-cache crate to send transactions over TPU ports. If not \ + set,tpu-client-next is used by default.", ), ) + .args(&pub_sub_config::args()) } fn validators_set( @@ -1711,8 +1651,7 @@ fn validators_set( if validators_set.contains(identity_pubkey) { return Err(crate::commands::Error::Dynamic( Box::::from(format!( - "the validator's identity pubkey cannot be a {arg_name}: {}", - identity_pubkey + "the validator's identity pubkey cannot be a {arg_name}: {identity_pubkey}" )), )); } @@ -1728,24 +1667,48 @@ mod tests { use { super::*, crate::cli::thread_args::thread_args, - std::net::{IpAddr, Ipv4Addr}, + scopeguard::defer, + solana_rpc::rpc::MAX_REQUEST_BODY_SIZE, + std::{ + fs, + net::{IpAddr, Ipv4Addr}, + path::{absolute, PathBuf}, + }, }; impl Default for RunArgs { fn default() -> Self { let identity_keypair = Keypair::new(); + let ledger_path = absolute(PathBuf::from("ledger")).unwrap(); let logfile = format!("agave-validator-{}.log", identity_keypair.pubkey()); let entrypoints = vec![]; let known_validators = None; RunArgs { identity_keypair, + ledger_path, logfile, entrypoints, known_validators, socket_addr_space: SocketAddrSpace::Global, rpc_bootstrap_config: RpcBootstrapConfig::default(), blockstore_options: BlockstoreOptions::default(), + json_rpc_config: JsonRpcConfig { + health_check_slot_distance: 128, + max_multiple_accounts: Some(100), + rpc_threads: num_cpus::get(), + rpc_blocking_threads: 1.max(num_cpus::get() / 4), + max_request_body_size: Some(MAX_REQUEST_BODY_SIZE), + ..JsonRpcConfig::default() + }, + pub_sub_config: PubSubConfig { + worker_threads: 4, + notification_threads: None, + queue_capacity_items: + solana_rpc::rpc_pubsub_service::DEFAULT_QUEUE_CAPACITY_ITEMS, + ..PubSubConfig::default_for_tests() + }, + send_transaction_service_config: SendTransactionServiceConfig::default(), } } } @@ -1758,8 +1721,12 @@ mod tests { entrypoints: self.entrypoints.clone(), known_validators: self.known_validators.clone(), socket_addr_space: self.socket_addr_space, + ledger_path: self.ledger_path.clone(), rpc_bootstrap_config: self.rpc_bootstrap_config.clone(), blockstore_options: self.blockstore_options.clone(), + json_rpc_config: self.json_rpc_config.clone(), + pub_sub_config: self.pub_sub_config.clone(), + send_transaction_service_config: self.send_transaction_service_config.clone(), } } } @@ -1857,6 +1824,92 @@ mod tests { ); } + #[test] + fn verify_args_struct_by_command_run_with_ledger_path() { + // nonexistent absolute ledger path + { + let default_run_args = RunArgs::default(); + let tmp_dir = fs::canonicalize(tempfile::tempdir().unwrap()).unwrap(); + let ledger_path = tmp_dir.join("nonexistent_ledger_path"); + assert!(!fs::exists(&ledger_path).unwrap()); + + let expected_args = RunArgs { + ledger_path: ledger_path.clone(), + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec!["--ledger", ledger_path.to_str().unwrap()], + expected_args, + ); + assert!(fs::exists(&ledger_path).unwrap()); + } + + // existing absolute ledger path + { + let default_run_args = RunArgs::default(); + let tmp_dir = tempfile::tempdir().unwrap(); + let ledger_path = tmp_dir.path().join("existing_ledger_path"); + fs::create_dir_all(&ledger_path).unwrap(); + let ledger_path = fs::canonicalize(ledger_path).unwrap(); + assert!(fs::exists(ledger_path.as_path()).unwrap()); + + let expected_args = RunArgs { + ledger_path: ledger_path.clone(), + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec!["--ledger", ledger_path.to_str().unwrap()], + expected_args, + ); + assert!(fs::exists(&ledger_path).unwrap()); + } + + // nonexistent relative ledger path + { + let default_run_args = RunArgs::default(); + let ledger_path = PathBuf::from("nonexistent_ledger_path"); + assert!(!fs::exists(&ledger_path).unwrap()); + defer! { + fs::remove_dir_all(&ledger_path).unwrap() + }; + + let expected_args = RunArgs { + ledger_path: absolute(&ledger_path).unwrap(), + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec!["--ledger", ledger_path.to_str().unwrap()], + expected_args, + ); + assert!(fs::exists(&ledger_path).unwrap()); + } + + // existing relative ledger path + { + let default_run_args = RunArgs::default(); + let ledger_path = PathBuf::from("existing_ledger_path"); + fs::create_dir_all(&ledger_path).unwrap(); + assert!(fs::exists(&ledger_path).unwrap()); + defer! { + fs::remove_dir_all(&ledger_path).unwrap() + }; + + let expected_args = RunArgs { + ledger_path: absolute(&ledger_path).unwrap(), + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec!["--ledger", ledger_path.to_str().unwrap()], + expected_args, + ); + assert!(fs::exists(&ledger_path).unwrap()); + } + } + #[test] fn verify_args_struct_by_command_run_with_log() { let default_run_args = RunArgs::default(); diff --git a/validator/src/commands/run/args/account_secondary_indexes.rs b/validator/src/commands/run/args/account_secondary_indexes.rs new file mode 100644 index 00000000000000..b2c0d9df45bbe8 --- /dev/null +++ b/validator/src/commands/run/args/account_secondary_indexes.rs @@ -0,0 +1,270 @@ +use { + crate::commands::{FromClapArgMatches, Result}, + clap::{values_t, ArgMatches}, + solana_accounts_db::accounts_index::{ + AccountIndex, AccountSecondaryIndexes, AccountSecondaryIndexesIncludeExclude, + }, + solana_pubkey::Pubkey, + std::collections::HashSet, +}; + +impl FromClapArgMatches for AccountSecondaryIndexes { + fn from_clap_arg_match(matches: &ArgMatches) -> Result { + let account_indexes: HashSet = matches + .values_of("account_indexes") + .unwrap_or_default() + .map(|value| match value { + "program-id" => AccountIndex::ProgramId, + "spl-token-mint" => AccountIndex::SplTokenMint, + "spl-token-owner" => AccountIndex::SplTokenOwner, + _ => unreachable!(), + }) + .collect(); + + let account_indexes_include_keys: HashSet = + values_t!(matches, "account_index_include_key", Pubkey) + .unwrap_or_default() + .iter() + .cloned() + .collect(); + + let account_indexes_exclude_keys: HashSet = + values_t!(matches, "account_index_exclude_key", Pubkey) + .unwrap_or_default() + .iter() + .cloned() + .collect(); + + let exclude_keys = !account_indexes_exclude_keys.is_empty(); + let include_keys = !account_indexes_include_keys.is_empty(); + + let keys = if !account_indexes.is_empty() && (exclude_keys || include_keys) { + let account_indexes_keys = AccountSecondaryIndexesIncludeExclude { + exclude: exclude_keys, + keys: if exclude_keys { + account_indexes_exclude_keys + } else { + account_indexes_include_keys + }, + }; + Some(account_indexes_keys) + } else { + None + }; + + Ok(AccountSecondaryIndexes { + keys, + indexes: account_indexes, + }) + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + crate::commands::run::args::{ + tests::verify_args_struct_by_command_run_with_identity_setup, RunArgs, + }, + solana_rpc::rpc::JsonRpcConfig, + test_case::test_case, + }; + + #[test_case("program-id", AccountIndex::ProgramId)] + #[test_case("spl-token-mint", AccountIndex::SplTokenMint)] + #[test_case("spl-token-owner", AccountIndex::SplTokenOwner)] + fn verify_args_struct_by_command_run_with_account_indexes( + arg_value: &str, + expected_index: AccountIndex, + ) { + let default_run_args = crate::commands::run::args::RunArgs::default(); + let expected_args = RunArgs { + json_rpc_config: JsonRpcConfig { + account_indexes: AccountSecondaryIndexes { + keys: None, + indexes: HashSet::from([expected_index]), + }, + ..default_run_args.json_rpc_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec!["--account-index", arg_value], + expected_args, + ); + } + + #[test] + fn verify_args_struct_by_command_run_with_account_indexes_multiple() { + let default_run_args = crate::commands::run::args::RunArgs::default(); + let expected_args = RunArgs { + json_rpc_config: JsonRpcConfig { + account_indexes: AccountSecondaryIndexes { + keys: None, + indexes: HashSet::from([ + AccountIndex::ProgramId, + AccountIndex::SplTokenMint, + AccountIndex::SplTokenOwner, + ]), + }, + ..default_run_args.json_rpc_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec![ + "--account-index", + "program-id", + "--account-index", + "spl-token-mint", + "--account-index", + "spl-token-owner", + ], + expected_args, + ); + } + + #[test] + fn verify_args_struct_by_command_run_with_account_index_include_key() { + // single key + { + let default_run_args = crate::commands::run::args::RunArgs::default(); + let account_pubkey_1 = Pubkey::new_unique(); + let expected_args = RunArgs { + json_rpc_config: JsonRpcConfig { + account_indexes: AccountSecondaryIndexes { + keys: Some(AccountSecondaryIndexesIncludeExclude { + exclude: false, + keys: HashSet::from([account_pubkey_1]), + }), + indexes: HashSet::from([AccountIndex::ProgramId]), + }, + ..default_run_args.json_rpc_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec![ + "--account-index", // required by --account-index-include-key + "program-id", + "--account-index-include-key", + account_pubkey_1.to_string().as_str(), + ], + expected_args, + ); + } + + // multiple keys + { + let default_run_args = crate::commands::run::args::RunArgs::default(); + let account_pubkey_1 = Pubkey::new_unique(); + let account_pubkey_2 = Pubkey::new_unique(); + let account_pubkey_3 = Pubkey::new_unique(); + let expected_args = RunArgs { + json_rpc_config: JsonRpcConfig { + account_indexes: AccountSecondaryIndexes { + keys: Some(AccountSecondaryIndexesIncludeExclude { + exclude: false, + keys: HashSet::from([ + account_pubkey_1, + account_pubkey_2, + account_pubkey_3, + ]), + }), + indexes: HashSet::from([AccountIndex::ProgramId]), + }, + ..default_run_args.json_rpc_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec![ + "--account-index", // required by --account-index-include-key + "program-id", + "--account-index-include-key", + account_pubkey_1.to_string().as_str(), + "--account-index-include-key", + account_pubkey_2.to_string().as_str(), + "--account-index-include-key", + account_pubkey_3.to_string().as_str(), + ], + expected_args, + ); + } + } + + #[test] + fn verify_args_struct_by_command_run_with_account_index_exclude_key() { + // single key + { + let default_run_args = crate::commands::run::args::RunArgs::default(); + let account_pubkey_1 = Pubkey::new_unique(); + let expected_args = RunArgs { + json_rpc_config: JsonRpcConfig { + account_indexes: AccountSecondaryIndexes { + keys: Some(AccountSecondaryIndexesIncludeExclude { + exclude: true, + keys: HashSet::from([account_pubkey_1]), + }), + indexes: HashSet::from([AccountIndex::ProgramId]), + }, + ..default_run_args.json_rpc_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec![ + "--account-index", // required by --account-index-exclude-key + "program-id", + "--account-index-exclude-key", + account_pubkey_1.to_string().as_str(), + ], + expected_args, + ); + } + + // multiple keys + { + let default_run_args = crate::commands::run::args::RunArgs::default(); + let account_pubkey_1 = Pubkey::new_unique(); + let account_pubkey_2 = Pubkey::new_unique(); + let account_pubkey_3 = Pubkey::new_unique(); + let expected_args = RunArgs { + json_rpc_config: JsonRpcConfig { + account_indexes: AccountSecondaryIndexes { + keys: Some(AccountSecondaryIndexesIncludeExclude { + exclude: true, + keys: HashSet::from([ + account_pubkey_1, + account_pubkey_2, + account_pubkey_3, + ]), + }), + indexes: HashSet::from([AccountIndex::ProgramId]), + }, + ..default_run_args.json_rpc_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec![ + "--account-index", // required by --account-index-exclude-key + "program-id", + "--account-index-exclude-key", + account_pubkey_1.to_string().as_str(), + "--account-index-exclude-key", + account_pubkey_2.to_string().as_str(), + "--account-index-exclude-key", + account_pubkey_3.to_string().as_str(), + ], + expected_args, + ); + } + } +} diff --git a/validator/src/commands/run/args/json_rpc_config.rs b/validator/src/commands/run/args/json_rpc_config.rs new file mode 100644 index 00000000000000..b924354197923e --- /dev/null +++ b/validator/src/commands/run/args/json_rpc_config.rs @@ -0,0 +1,322 @@ +use { + crate::commands::{FromClapArgMatches, Result}, + clap::{value_t, ArgMatches}, + solana_accounts_db::accounts_index::AccountSecondaryIndexes, + solana_rpc::rpc::{JsonRpcConfig, RpcBigtableConfig}, +}; + +impl FromClapArgMatches for JsonRpcConfig { + fn from_clap_arg_match(matches: &ArgMatches) -> Result { + let rpc_bigtable_config = if matches.is_present("enable_rpc_bigtable_ledger_storage") + || matches.is_present("enable_bigtable_ledger_upload") + { + Some(RpcBigtableConfig::from_clap_arg_match(matches)?) + } else { + None + }; + + Ok(JsonRpcConfig { + enable_rpc_transaction_history: matches.is_present("enable_rpc_transaction_history"), + enable_extended_tx_metadata_storage: matches + .is_present("enable_extended_tx_metadata_storage"), + faucet_addr: matches + .value_of("rpc_faucet_addr") + .map(|address| { + solana_net_utils::parse_host_port(address).map_err(|err| { + crate::commands::Error::Dynamic(Box::::from( + format!("failed to parse rpc_faucet_addr: {err}"), + )) + }) + }) + .transpose()?, + health_check_slot_distance: value_t!(matches, "health_check_slot_distance", u64)?, + skip_preflight_health_check: matches.is_present("skip_preflight_health_check"), + rpc_bigtable_config, + max_multiple_accounts: Some(value_t!(matches, "rpc_max_multiple_accounts", usize)?), + account_indexes: AccountSecondaryIndexes::from_clap_arg_match(matches)?, + rpc_threads: value_t!(matches, "rpc_threads", usize)?, + rpc_blocking_threads: value_t!(matches, "rpc_blocking_threads", usize)?, + rpc_niceness_adj: value_t!(matches, "rpc_niceness_adj", i8)?, + full_api: matches.is_present("full_rpc_api"), + rpc_scan_and_fix_roots: matches.is_present("rpc_scan_and_fix_roots"), + max_request_body_size: Some(value_t!(matches, "rpc_max_request_body_size", usize)?), + disable_health_check: false, + }) + } +} + +#[cfg(test)] +mod tests { + #[cfg(not(target_os = "linux"))] + use crate::commands::run::args::tests::verify_args_struct_by_command_run_is_error_with_identity_setup; + use { + super::*, + crate::commands::run::args::{ + pub_sub_config::DEFAULT_RPC_PUBSUB_NUM_NOTIFICATION_THREADS, + tests::verify_args_struct_by_command_run_with_identity_setup, RunArgs, + }, + solana_rpc::rpc_pubsub_service::PubSubConfig, + std::{ + net::{Ipv4Addr, SocketAddr}, + num::NonZeroUsize, + }, + }; + + #[test] + fn verify_args_struct_by_command_run_with_enable_rpc_transaction_history() { + { + let default_run_args = crate::commands::run::args::RunArgs::default(); + let expected_args = RunArgs { + json_rpc_config: JsonRpcConfig { + enable_rpc_transaction_history: true, + ..default_run_args.json_rpc_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec!["--enable-rpc-transaction-history"], + expected_args, + ); + } + } + + #[test] + fn verify_args_struct_by_command_run_with_enable_extended_tx_metadata_storage() { + { + let default_run_args = crate::commands::run::args::RunArgs::default(); + let expected_args = RunArgs { + json_rpc_config: JsonRpcConfig { + enable_rpc_transaction_history: true, + enable_extended_tx_metadata_storage: true, + ..default_run_args.json_rpc_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec![ + "--enable-rpc-transaction-history", // required by enable_extended_tx_metadata_storage + "--enable-extended-tx-metadata-storage", + ], + expected_args, + ); + } + } + + #[test] + fn verify_args_struct_by_command_run_with_rpc_faucet_addr() { + { + let default_run_args = crate::commands::run::args::RunArgs::default(); + let expected_args = RunArgs { + json_rpc_config: JsonRpcConfig { + faucet_addr: Some(SocketAddr::from((Ipv4Addr::LOCALHOST, 8000))), + ..default_run_args.json_rpc_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec!["--rpc-faucet-address", "127.0.0.1:8000"], + expected_args, + ); + } + } + + #[test] + fn verify_args_struct_by_command_run_with_health_check_slot_distance() { + { + let default_run_args = crate::commands::run::args::RunArgs::default(); + let expected_args = RunArgs { + json_rpc_config: JsonRpcConfig { + health_check_slot_distance: 100, + ..default_run_args.json_rpc_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec!["--health-check-slot-distance", "100"], + expected_args, + ); + } + } + + #[test] + fn verify_args_struct_by_command_run_with_skip_preflight_health_check() { + { + let default_run_args = crate::commands::run::args::RunArgs::default(); + let expected_args = RunArgs { + json_rpc_config: JsonRpcConfig { + skip_preflight_health_check: true, + ..default_run_args.json_rpc_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec!["--skip-preflight-health-check"], + expected_args, + ); + } + } + + #[test] + fn verify_args_struct_by_command_run_with_max_multiple_accounts() { + { + let default_run_args = crate::commands::run::args::RunArgs::default(); + let expected_args = RunArgs { + json_rpc_config: JsonRpcConfig { + max_multiple_accounts: Some(9999), + ..default_run_args.json_rpc_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec!["--rpc-max-multiple-accounts", "9999"], + expected_args, + ); + } + } + + #[test] + fn verify_args_struct_by_command_run_with_rpc_threads() { + { + let default_run_args = crate::commands::run::args::RunArgs::default(); + let expected_args = RunArgs { + json_rpc_config: JsonRpcConfig { + rpc_threads: 10, + ..default_run_args.json_rpc_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec!["--rpc-threads", "10"], + expected_args, + ); + } + } + + #[test] + fn verify_args_struct_by_command_run_with_rpc_blocking_threads() { + { + let default_run_args = crate::commands::run::args::RunArgs::default(); + let expected_args = RunArgs { + json_rpc_config: JsonRpcConfig { + rpc_blocking_threads: 999, + ..default_run_args.json_rpc_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec!["--rpc-blocking-threads", "999"], + expected_args, + ); + } + } + + #[cfg(target_os = "linux")] + #[test] + fn verify_args_struct_by_command_run_with_rpc_niceness_adj() { + { + let default_run_args = crate::commands::run::args::RunArgs::default(); + let expected_args = RunArgs { + json_rpc_config: JsonRpcConfig { + rpc_niceness_adj: 10, + ..default_run_args.json_rpc_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec!["--rpc-niceness-adjustment", "10"], + expected_args, + ); + } + } + + #[cfg(not(target_os = "linux"))] + #[test] + fn verify_args_struct_by_command_run_with_rpc_niceness_adj() { + verify_args_struct_by_command_run_is_error_with_identity_setup( + crate::commands::run::args::RunArgs::default(), + vec!["--rpc-niceness-adjustment", "10"], + ); + } + + #[test] + fn verify_args_struct_by_command_run_with_full_api() { + { + let default_run_args = crate::commands::run::args::RunArgs::default(); + let expected_args = RunArgs { + json_rpc_config: JsonRpcConfig { + full_api: true, + ..default_run_args.json_rpc_config.clone() + }, + pub_sub_config: PubSubConfig { + notification_threads: Some( + NonZeroUsize::new( + DEFAULT_RPC_PUBSUB_NUM_NOTIFICATION_THREADS + .parse::() + .unwrap(), + ) + .unwrap(), + ), + ..default_run_args.pub_sub_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec!["--full-rpc-api"], + expected_args, + ); + } + } + + #[test] + fn verify_args_struct_by_command_run_with_rpc_scan_and_fix_roots() { + { + let default_run_args = crate::commands::run::args::RunArgs::default(); + let expected_args = RunArgs { + json_rpc_config: JsonRpcConfig { + enable_rpc_transaction_history: true, + rpc_scan_and_fix_roots: true, + ..default_run_args.json_rpc_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec![ + "--enable-rpc-transaction-history", // required by --rpc-scan-and-fix-roots + "--rpc-scan-and-fix-roots", + ], + expected_args, + ); + } + } + + #[test] + fn verify_args_struct_by_command_run_with_rpc_max_request_body_size() { + // long arg + { + let default_run_args = RunArgs::default(); + let expected_args = RunArgs { + json_rpc_config: JsonRpcConfig { + max_request_body_size: Some(999), + ..default_run_args.json_rpc_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec!["--rpc-max-request-body-size", "999"], + expected_args, + ); + } + } +} diff --git a/validator/src/commands/run/args/pub_sub_config.rs b/validator/src/commands/run/args/pub_sub_config.rs new file mode 100644 index 00000000000000..e04b182a587731 --- /dev/null +++ b/validator/src/commands/run/args/pub_sub_config.rs @@ -0,0 +1,256 @@ +#[cfg(test)] +use qualifier_attr::qualifiers; +use { + crate::commands::{FromClapArgMatches, Result}, + clap::{value_t, Arg, ArgMatches}, + solana_clap_utils::input_validators::is_parsable, + solana_rayon_threadlimit::get_thread_count, + solana_rpc::rpc_pubsub_service::PubSubConfig, + std::{num::NonZeroUsize, sync::LazyLock}, +}; + +static DEFAULT_RPC_PUBSUB_MAX_ACTIVE_SUBSCRIPTIONS: LazyLock = + LazyLock::new(|| PubSubConfig::default().max_active_subscriptions.to_string()); + +static DEFAULT_RPC_PUBSUB_QUEUE_CAPACITY_ITEMS: LazyLock = + LazyLock::new(|| PubSubConfig::default().queue_capacity_items.to_string()); + +static DEFAULT_RPC_PUBSUB_QUEUE_CAPACITY_BYTES: LazyLock = + LazyLock::new(|| PubSubConfig::default().queue_capacity_bytes.to_string()); + +const DEFAULT_RPC_PUBSUB_WORKER_THREADS: &str = "4"; + +#[cfg_attr(test, qualifiers(pub(crate)))] +static DEFAULT_RPC_PUBSUB_NUM_NOTIFICATION_THREADS: LazyLock = + LazyLock::new(|| get_thread_count().to_string()); + +pub(crate) fn args<'a, 'b>() -> Vec> { + vec![ + Arg::with_name("rpc_pubsub_enable_block_subscription") + .long("rpc-pubsub-enable-block-subscription") + .requires("enable_rpc_transaction_history") + .takes_value(false) + .help("Enable the unstable RPC PubSub `blockSubscribe` subscription"), + Arg::with_name("rpc_pubsub_enable_vote_subscription") + .long("rpc-pubsub-enable-vote-subscription") + .takes_value(false) + .help("Enable the unstable RPC PubSub `voteSubscribe` subscription"), + Arg::with_name("rpc_pubsub_max_active_subscriptions") + .long("rpc-pubsub-max-active-subscriptions") + .takes_value(true) + .value_name("NUMBER") + .validator(is_parsable::) + .default_value(&DEFAULT_RPC_PUBSUB_MAX_ACTIVE_SUBSCRIPTIONS) + .help( + "The maximum number of active subscriptions that RPC PubSub will accept across \ + all connections.", + ), + Arg::with_name("rpc_pubsub_queue_capacity_items") + .long("rpc-pubsub-queue-capacity-items") + .takes_value(true) + .value_name("NUMBER") + .validator(is_parsable::) + .default_value(&DEFAULT_RPC_PUBSUB_QUEUE_CAPACITY_ITEMS) + .help( + "The maximum number of notifications that RPC PubSub will store across all \ + connections.", + ), + Arg::with_name("rpc_pubsub_queue_capacity_bytes") + .long("rpc-pubsub-queue-capacity-bytes") + .takes_value(true) + .value_name("BYTES") + .validator(is_parsable::) + .default_value(&DEFAULT_RPC_PUBSUB_QUEUE_CAPACITY_BYTES) + .help( + "The maximum total size of notifications that RPC PubSub will store across all \ + connections.", + ), + Arg::with_name("rpc_pubsub_worker_threads") + .long("rpc-pubsub-worker-threads") + .takes_value(true) + .value_name("NUMBER") + .validator(is_parsable::) + .default_value(DEFAULT_RPC_PUBSUB_WORKER_THREADS) + .help("PubSub worker threads"), + Arg::with_name("rpc_pubsub_notification_threads") + .long("rpc-pubsub-notification-threads") + .requires("full_rpc_api") + .takes_value(true) + .value_name("NUM_THREADS") + .validator(is_parsable::) + .default_value_if( + "full_rpc_api", + None, + &DEFAULT_RPC_PUBSUB_NUM_NOTIFICATION_THREADS, + ) + .help( + "The maximum number of threads that RPC PubSub will use for generating \ + notifications. 0 will disable RPC PubSub notifications", + ), + ] +} + +impl FromClapArgMatches for PubSubConfig { + fn from_clap_arg_match(matches: &ArgMatches) -> Result { + Ok(PubSubConfig { + enable_block_subscription: matches.is_present("rpc_pubsub_enable_block_subscription"), + enable_vote_subscription: matches.is_present("rpc_pubsub_enable_vote_subscription"), + max_active_subscriptions: value_t!( + matches, + "rpc_pubsub_max_active_subscriptions", + usize + )?, + queue_capacity_items: value_t!(matches, "rpc_pubsub_queue_capacity_items", usize)?, + queue_capacity_bytes: value_t!(matches, "rpc_pubsub_queue_capacity_bytes", usize)?, + worker_threads: value_t!(matches, "rpc_pubsub_worker_threads", usize)?, + notification_threads: value_t!(matches, "rpc_pubsub_notification_threads", usize) + .ok() + .and_then(NonZeroUsize::new), + }) + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + crate::commands::run::args::{ + tests::verify_args_struct_by_command_run_with_identity_setup, RunArgs, + }, + solana_rpc::rpc::JsonRpcConfig, + }; + + #[test] + fn verify_args_struct_by_command_run_with_enable_block_subscription() { + let default_run_args = crate::commands::run::args::RunArgs::default(); + let expected_args = RunArgs { + json_rpc_config: JsonRpcConfig { + enable_rpc_transaction_history: true, + ..default_run_args.json_rpc_config.clone() + }, + pub_sub_config: PubSubConfig { + enable_block_subscription: true, + ..default_run_args.pub_sub_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec![ + "--enable-rpc-transaction-history", // required by enable-rpc-bigtable-ledger-storage + "--rpc-pubsub-enable-block-subscription", + ], + expected_args, + ); + } + + #[test] + fn verify_args_struct_by_command_run_with_enable_vote_subscription() { + let default_run_args = crate::commands::run::args::RunArgs::default(); + let expected_args = RunArgs { + pub_sub_config: PubSubConfig { + enable_vote_subscription: true, + ..default_run_args.pub_sub_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec!["--rpc-pubsub-enable-vote-subscription"], + expected_args, + ); + } + + #[test] + fn verify_args_struct_by_command_run_with_max_active_subscriptions() { + let default_run_args = crate::commands::run::args::RunArgs::default(); + let expected_args = RunArgs { + pub_sub_config: PubSubConfig { + max_active_subscriptions: 1000, + ..default_run_args.pub_sub_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec!["--rpc-pubsub-max-active-subscriptions", "1000"], + expected_args, + ); + } + + #[test] + fn verify_args_struct_by_command_run_with_queue_capacity_items() { + let default_run_args = crate::commands::run::args::RunArgs::default(); + let expected_args = RunArgs { + pub_sub_config: PubSubConfig { + queue_capacity_items: 9999, + ..default_run_args.pub_sub_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec!["--rpc-pubsub-queue-capacity-items", "9999"], + expected_args, + ); + } + + #[test] + fn verify_args_struct_by_command_run_with_queue_capacity_bytes() { + let default_run_args = crate::commands::run::args::RunArgs::default(); + let expected_args = RunArgs { + pub_sub_config: PubSubConfig { + queue_capacity_bytes: 9999, + ..default_run_args.pub_sub_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec!["--rpc-pubsub-queue-capacity-bytes", "9999"], + expected_args, + ); + } + + #[test] + fn verify_args_struct_by_command_run_with_worker_threads() { + let default_run_args = crate::commands::run::args::RunArgs::default(); + let expected_args = RunArgs { + pub_sub_config: PubSubConfig { + worker_threads: 9999, + ..default_run_args.pub_sub_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec!["--rpc-pubsub-worker-threads", "9999"], + expected_args, + ); + } + + #[test] + fn verify_args_struct_by_command_run_with_notification_threads() { + let default_run_args = crate::commands::run::args::RunArgs::default(); + let expected_args = RunArgs { + json_rpc_config: JsonRpcConfig { + full_api: true, + ..default_run_args.json_rpc_config.clone() + }, + pub_sub_config: PubSubConfig { + notification_threads: Some(NonZeroUsize::new(9999).unwrap()), + ..default_run_args.pub_sub_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec![ + "--full-rpc-api", // required by --rpc-pubsub-notification-threads + "--rpc-pubsub-notification-threads", + "9999", + ], + expected_args, + ); + } +} diff --git a/validator/src/commands/run/args/rpc_bigtable_config.rs b/validator/src/commands/run/args/rpc_bigtable_config.rs new file mode 100644 index 00000000000000..df1391bfb8ab31 --- /dev/null +++ b/validator/src/commands/run/args/rpc_bigtable_config.rs @@ -0,0 +1,193 @@ +use { + crate::commands::{FromClapArgMatches, Result}, + clap::{value_t, ArgMatches}, + solana_rpc::rpc::RpcBigtableConfig, + std::time::Duration, +}; + +impl FromClapArgMatches for RpcBigtableConfig { + fn from_clap_arg_match(matches: &ArgMatches) -> Result { + Ok(RpcBigtableConfig { + enable_bigtable_ledger_upload: matches.is_present("enable_bigtable_ledger_upload"), + bigtable_instance_name: value_t!(matches, "rpc_bigtable_instance_name", String)?, + bigtable_app_profile_id: value_t!(matches, "rpc_bigtable_app_profile_id", String)?, + timeout: value_t!(matches, "rpc_bigtable_timeout", u64) + .ok() + .map(Duration::from_secs), + max_message_size: value_t!(matches, "rpc_bigtable_max_message_size", usize)?, + }) + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + crate::commands::run::args::{ + tests::verify_args_struct_by_command_run_with_identity_setup, RunArgs, + }, + solana_rpc::rpc::JsonRpcConfig, + }; + + fn default_rpc_bigtable_config() -> RpcBigtableConfig { + RpcBigtableConfig { + timeout: Some(Duration::from_secs(30)), + ..RpcBigtableConfig::default() + } + } + + #[test] + fn verify_args_struct_by_command_run_with_enable_rpc_bigtable_ledger_storage() { + let default_run_args = crate::commands::run::args::RunArgs::default(); + let expected_args = RunArgs { + json_rpc_config: JsonRpcConfig { + enable_rpc_transaction_history: true, + rpc_bigtable_config: Some(RpcBigtableConfig { + ..default_rpc_bigtable_config() + }), + ..default_run_args.json_rpc_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec![ + "--enable-rpc-transaction-history", // required by enable-rpc-bigtable-ledger-storage + "--enable-rpc-bigtable-ledger-storage", + ], + expected_args, + ); + } + + #[test] + fn verify_args_struct_by_command_run_with_enable_bigtable_ledger_upload() { + let default_run_args = crate::commands::run::args::RunArgs::default(); + let expected_args = RunArgs { + json_rpc_config: JsonRpcConfig { + enable_rpc_transaction_history: true, + rpc_bigtable_config: Some(RpcBigtableConfig { + enable_bigtable_ledger_upload: true, + ..default_rpc_bigtable_config() + }), + ..default_run_args.json_rpc_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec![ + "--enable-rpc-transaction-history", // required by enable-bigtable-ledger-upload + "--enable-bigtable-ledger-upload", + ], + expected_args, + ); + } + + #[test] + fn verify_args_struct_by_command_run_with_rpc_bigtable_instance_name() { + let default_run_args = crate::commands::run::args::RunArgs::default(); + let expected_args = RunArgs { + json_rpc_config: JsonRpcConfig { + enable_rpc_transaction_history: true, + rpc_bigtable_config: Some(RpcBigtableConfig { + enable_bigtable_ledger_upload: true, + bigtable_instance_name: "my-custom-instance-name".to_string(), + ..default_rpc_bigtable_config() + }), + ..default_run_args.json_rpc_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec![ + "--enable-rpc-transaction-history", // required by enable-bigtable-ledger-upload + "--enable-bigtable-ledger-upload", // required by all rpc_bigtable_config + "--rpc-bigtable-instance-name", + "my-custom-instance-name", + ], + expected_args, + ); + } + + #[test] + fn verify_args_struct_by_command_run_with_rpc_bigtable_app_profile_id() { + let default_run_args = crate::commands::run::args::RunArgs::default(); + let expected_args = RunArgs { + json_rpc_config: JsonRpcConfig { + enable_rpc_transaction_history: true, + rpc_bigtable_config: Some(RpcBigtableConfig { + enable_bigtable_ledger_upload: true, + bigtable_app_profile_id: "my-custom-app-profile-id".to_string(), + ..default_rpc_bigtable_config() + }), + ..default_run_args.json_rpc_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec![ + "--enable-rpc-transaction-history", // required by enable-bigtable-ledger-upload + "--enable-bigtable-ledger-upload", // required by all rpc_bigtable_config + "--rpc-bigtable-app-profile-id", + "my-custom-app-profile-id", + ], + expected_args, + ); + } + + #[test] + fn verify_args_struct_by_command_run_with_rpc_bigtable_timeout() { + let default_run_args = crate::commands::run::args::RunArgs::default(); + let expected_args = RunArgs { + json_rpc_config: JsonRpcConfig { + enable_rpc_transaction_history: true, + rpc_bigtable_config: Some(RpcBigtableConfig { + enable_bigtable_ledger_upload: true, + timeout: Some(Duration::from_secs(99999)), + ..default_rpc_bigtable_config() + }), + ..default_run_args.json_rpc_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec![ + "--enable-rpc-transaction-history", // required by enable-bigtable-ledger-upload + "--enable-bigtable-ledger-upload", // required by all rpc_bigtable_config + "--rpc-bigtable-timeout", + "99999", + ], + expected_args, + ); + } + + #[test] + fn verify_args_struct_by_command_run_with_rpc_bigtable_max_message_size() { + let default_run_args = crate::commands::run::args::RunArgs::default(); + let expected_args = RunArgs { + json_rpc_config: JsonRpcConfig { + enable_rpc_transaction_history: true, + rpc_bigtable_config: Some(RpcBigtableConfig { + enable_bigtable_ledger_upload: true, + max_message_size: 99999, + ..default_rpc_bigtable_config() + }), + ..default_run_args.json_rpc_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec![ + "--enable-rpc-transaction-history", // required by enable-bigtable-ledger-upload + "--enable-bigtable-ledger-upload", // required by all rpc_bigtable_config + "--rpc-bigtable-max-message-size", + "99999", + ], + expected_args, + ); + } +} diff --git a/validator/src/commands/run/args/send_transaction_config.rs b/validator/src/commands/run/args/send_transaction_config.rs new file mode 100644 index 00000000000000..7e10a80f013dae --- /dev/null +++ b/validator/src/commands/run/args/send_transaction_config.rs @@ -0,0 +1,324 @@ +use { + crate::commands::{Error, FromClapArgMatches, Result}, + clap::{value_t, ArgMatches}, + solana_send_transaction_service::send_transaction_service::{ + Config as SendTransactionServiceConfig, MAX_TRANSACTION_SENDS_PER_SECOND, + }, +}; + +impl FromClapArgMatches for SendTransactionServiceConfig { + fn from_clap_arg_match(matches: &ArgMatches) -> Result { + let batch_send_rate_ms = value_t!(matches, "rpc_send_transaction_batch_ms", u64)?; + let retry_rate_ms = value_t!(matches, "rpc_send_transaction_retry_ms", u64)?; + if batch_send_rate_ms > retry_rate_ms { + return Err(Error::Dynamic(Box::::from(format!( + "the specified rpc-send-batch-ms ({batch_send_rate_ms}) is invalid, it must be <= \ + rpc-send-retry-ms ({retry_rate_ms})" + )))); + } + + let batch_size = value_t!(matches, "rpc_send_transaction_batch_size", usize)?; + let millis_per_second = 1000; + let tps = batch_size as u64 * millis_per_second / batch_send_rate_ms; + if tps > MAX_TRANSACTION_SENDS_PER_SECOND { + return Err(Error::Dynamic(Box::::from(format!( + "either the specified rpc-send-batch-size ({batch_size}) or rpc-send-batch-ms \ + ({batch_send_rate_ms}) is invalid, 'rpc-send-batch-size * 1000 / \ + rpc-send-batch-ms' must be smaller than ({MAX_TRANSACTION_SENDS_PER_SECOND}) .", + )))); + } + + let tpu_peers = matches + .values_of("rpc_send_transaction_tpu_peer") + .map(|values| values.map(solana_net_utils::parse_host_port).collect()) + .transpose() + .map_err(|e| { + Error::Dynamic(Box::::from(format!( + "Invalid tpu peer address: {e}", + ))) + })?; + + let default_max_retries = + value_t!(matches, "rpc_send_transaction_default_max_retries", usize).ok(); + + let service_max_retries = + value_t!(matches, "rpc_send_transaction_service_max_retries", usize)?; + + let retry_pool_max_size = + value_t!(matches, "rpc_send_transaction_retry_pool_max_size", usize)?; + + let rpc_send_transaction_also_leader = + matches.is_present("rpc_send_transaction_also_leader"); + let leader_forward_count = if tpu_peers.is_some() && !rpc_send_transaction_also_leader { + // rpc-sts is configured to send only to specific tpu peers. disable leader forwards + 0 + } else { + value_t!(matches, "rpc_send_transaction_leader_forward_count", u64)? + }; + + Ok(SendTransactionServiceConfig { + retry_rate_ms, + batch_size, + batch_send_rate_ms, + default_max_retries, + service_max_retries, + retry_pool_max_size, + tpu_peers, + leader_forward_count, + }) + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + crate::commands::run::args::{ + tests::verify_args_struct_by_command_run_with_identity_setup, RunArgs, + }, + std::net::{Ipv4Addr, SocketAddr}, + }; + + #[test] + fn verify_args_struct_by_command_run_with_retry_rate_ms() { + let default_run_args = RunArgs::default(); + let expected_args = RunArgs { + send_transaction_service_config: SendTransactionServiceConfig { + retry_rate_ms: 99999, + ..default_run_args.send_transaction_service_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec!["--rpc-send-retry-ms", "99999"], + expected_args, + ); + } + + #[test] + fn verify_args_struct_by_command_run_with_batch_size() { + { + let default_run_args = RunArgs::default(); + let expected_args = RunArgs { + send_transaction_service_config: SendTransactionServiceConfig { + batch_size: 1, + ..default_run_args.send_transaction_service_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec!["--rpc-send-batch-size", "1"], + expected_args, + ); + } + + { + let default_run_args = RunArgs::default(); + let expected_args = RunArgs { + send_transaction_service_config: SendTransactionServiceConfig { + batch_size: 999, + batch_send_rate_ms: 1000, + ..default_run_args.send_transaction_service_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec![ + "--rpc-send-batch-size", + "999", + "--rpc-send-batch-ms", + "1000", + ], + expected_args, + ); + } + } + + #[test] + fn verify_args_struct_by_command_run_with_batch_send_rate_ms() { + let default_run_args = RunArgs::default(); + let expected_args = RunArgs { + send_transaction_service_config: SendTransactionServiceConfig { + batch_send_rate_ms: 1999, + ..default_run_args.send_transaction_service_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec!["--rpc-send-batch-ms", "1999"], + expected_args, + ); + } + + #[test] + fn verify_args_struct_by_command_run_with_default_max_retries() { + let default_run_args = RunArgs::default(); + let expected_args = RunArgs { + send_transaction_service_config: SendTransactionServiceConfig { + default_max_retries: Some(9999), + ..default_run_args.send_transaction_service_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec!["--rpc-send-default-max-retries", "9999"], + expected_args, + ); + } + + #[test] + fn verify_args_struct_by_command_run_with_service_max_retries() { + let default_run_args = RunArgs::default(); + let expected_args = RunArgs { + send_transaction_service_config: SendTransactionServiceConfig { + service_max_retries: 9999, + ..default_run_args.send_transaction_service_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec!["--rpc-send-service-max-retries", "9999"], + expected_args, + ); + } + + #[test] + fn verify_args_struct_by_command_run_with_retry_pool_max_size() { + let default_run_args = RunArgs::default(); + let expected_args = RunArgs { + send_transaction_service_config: SendTransactionServiceConfig { + retry_pool_max_size: 9999, + ..default_run_args.send_transaction_service_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec!["--rpc-send-transaction-retry-pool-max-size", "9999"], + expected_args, + ); + } + + #[test] + fn verify_args_struct_by_command_run_with_tpu_peers() { + // single tpu peer + { + let default_run_args = RunArgs::default(); + let expected_args = RunArgs { + send_transaction_service_config: SendTransactionServiceConfig { + tpu_peers: Some(vec![SocketAddr::from((Ipv4Addr::LOCALHOST, 8000))]), + leader_forward_count: 0, // see SendTransactionServiceConfig::from_clap_arg_match for more details + ..default_run_args.send_transaction_service_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec!["--rpc-send-transaction-tpu-peer", "127.0.0.1:8000"], + expected_args, + ); + } + + // multiple tpu peers + { + let default_run_args = RunArgs::default(); + let expected_args = RunArgs { + send_transaction_service_config: SendTransactionServiceConfig { + tpu_peers: Some(vec![ + SocketAddr::from((Ipv4Addr::LOCALHOST, 8000)), + SocketAddr::from((Ipv4Addr::LOCALHOST, 8001)), + SocketAddr::from((Ipv4Addr::LOCALHOST, 8002)), + ]), + leader_forward_count: 0, // see SendTransactionServiceConfig::from_clap_arg_match for more details + ..default_run_args.send_transaction_service_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec![ + "--rpc-send-transaction-tpu-peer", + "127.0.0.1:8000", + "--rpc-send-transaction-tpu-peer", + "127.0.0.1:8001", + "--rpc-send-transaction-tpu-peer", + "127.0.0.1:8002", + ], + expected_args, + ); + } + } + + #[test] + fn verify_args_struct_by_command_run_with_rpc_send_transaction_leader_forward_count() { + // rpc-send-transaction-leader-forward-count + { + let default_run_args = RunArgs::default(); + let expected_args = RunArgs { + send_transaction_service_config: SendTransactionServiceConfig { + leader_forward_count: 100, + ..default_run_args.send_transaction_service_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec!["--rpc-send-leader-count", "100"], + expected_args, + ); + } + + // rpc-send-transaction-leader-forward-count + rpc-send-transaction-tpu-peer + { + let default_run_args = RunArgs::default(); + let expected_args = RunArgs { + send_transaction_service_config: SendTransactionServiceConfig { + leader_forward_count: 0, + tpu_peers: Some(vec![SocketAddr::from((Ipv4Addr::LOCALHOST, 8000))]), + ..default_run_args.send_transaction_service_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec![ + "--rpc-send-transaction-tpu-peer", + "127.0.0.1:8000", + "--rpc-send-leader-count", + "100", + ], + expected_args, + ); + } + + // rpc-send-transaction-leader-forward-count + rpc-send-transaction-also-leader + rpc-send-transaction-tpu-peer + { + let default_run_args = RunArgs::default(); + let expected_args = RunArgs { + send_transaction_service_config: SendTransactionServiceConfig { + tpu_peers: Some(vec![SocketAddr::from((Ipv4Addr::LOCALHOST, 8000))]), + leader_forward_count: 100, + ..default_run_args.send_transaction_service_config.clone() + }, + ..default_run_args.clone() + }; + verify_args_struct_by_command_run_with_identity_setup( + default_run_args, + vec![ + "--rpc-send-transaction-tpu-peer", + "127.0.0.1:8000", + "--rpc-send-transaction-also-leader", + "--rpc-send-leader-count", + "100", + ], + expected_args, + ); + } + } +} diff --git a/validator/src/commands/run/execute.rs b/validator/src/commands/run/execute.rs index a3693e820e5e69..586df119a42c37 100644 --- a/validator/src/commands/run/execute.rs +++ b/validator/src/commands/run/execute.rs @@ -11,12 +11,9 @@ use { log::*, rand::{seq::SliceRandom, thread_rng}, solana_accounts_db::{ - accounts_db::{AccountShrinkThreshold, AccountsDbConfig}, + accounts_db::{AccountShrinkThreshold, AccountsDbConfig, MarkObsoleteAccounts}, accounts_file::StorageAccess, - accounts_index::{ - AccountIndex, AccountSecondaryIndexes, AccountSecondaryIndexesIncludeExclude, - AccountsIndexConfig, IndexLimitMb, ScanFilter, - }, + accounts_index::{AccountSecondaryIndexes, AccountsIndexConfig, IndexLimitMb, ScanFilter}, hardened_unpack::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE, utils::{ create_all_accounts_run_and_snapshot_dirs, create_and_canonicalize_directories, @@ -40,7 +37,7 @@ use { }, }, solana_gossip::{ - cluster_info::{BindIpAddrs, NodeConfig, DEFAULT_CONTACT_SAVE_INTERVAL_MILLIS}, + cluster_info::{NodeConfig, DEFAULT_CONTACT_SAVE_INTERVAL_MILLIS}, contact_info::ContactInfo, node::Node, }, @@ -51,19 +48,17 @@ use { use_snapshot_archives_at_startup::{self, UseSnapshotArchivesAtStartup}, }, solana_logger::redirect_stderr_to_file, + solana_net_utils::multihomed_sockets::BindIpAddrs, solana_perf::recycler::enable_recycler_warming, solana_poh::poh_service, solana_pubkey::Pubkey, - solana_rpc::{ - rpc::{JsonRpcConfig, RpcBigtableConfig}, - rpc_pubsub_service::PubSubConfig, - }, solana_runtime::{ runtime_config::RuntimeConfig, snapshot_config::{SnapshotConfig, SnapshotUsage}, - snapshot_utils::{self, ArchiveFormat, SnapshotInterval, SnapshotVersion}, + snapshot_utils::{ + self, ArchiveFormat, SnapshotInterval, SnapshotVersion, BANK_SNAPSHOTS_DIR, + }, }, - solana_send_transaction_service::send_transaction_service, solana_signer::Signer, solana_streamer::quic::{QuicServerParams, DEFAULT_TPU_COALESCE}, solana_tpu_client::tpu_client::DEFAULT_TPU_ENABLE_UDP, @@ -91,21 +86,18 @@ pub enum Operation { Run, } -const MILLIS_PER_SECOND: u64 = 1000; - pub fn execute( matches: &ArgMatches, solana_version: &str, - ledger_path: &Path, operation: Operation, ) -> Result<(), Box> { let run_args = RunArgs::from_clap_arg_match(matches)?; let cli::thread_args::NumThreadConfig { - accounts_db_clean_threads, + accounts_db_background_threads, accounts_db_foreground_threads, - accounts_db_hash_threads, accounts_index_flush_threads, + block_production_num_workers, ip_echo_server_threads, rayon_global_threads, replay_forks_threads, @@ -153,7 +145,7 @@ pub fn execute( match &staked_nodes_overrides_path { None => StakedNodesOverrides::default(), Some(p) => load_staked_nodes_overrides(p).unwrap_or_else(|err| { - error!("Failed to load stake-nodes-overrides from {}: {}", p, err); + error!("Failed to load stake-nodes-overrides from {p}: {err}"); clap::Error::with_description( "Failed to load configuration of stake-nodes-overrides argument", clap::ErrorKind::InvalidValue, @@ -172,13 +164,7 @@ pub fn execute( .map(Duration::from_millis) .unwrap_or(DEFAULT_TPU_COALESCE); - // Canonicalize ledger path to avoid issues with symlink creation - let ledger_path = create_and_canonicalize_directory(ledger_path).map_err(|err| { - format!( - "unable to access ledger path '{}': {err}", - ledger_path.display(), - ) - })?; + let ledger_path = run_args.ledger_path; let max_ledger_shreds = if matches.is_present("limit_ledger_size") { let limit_ledger_size = match matches.value_of("limit_ledger_size") { @@ -235,25 +221,34 @@ pub fn execute( BindIpAddrs::new(parsed).map_err(|err| format!("invalid bind_addresses: {err}"))? }; + if bind_addresses.len() > 1 && matches.is_present("use_connection_cache") { + Err(String::from( + "Connection cache can not be used in a multihoming context", + ))?; + } + let rpc_bind_address = if matches.is_present("rpc_bind_address") { solana_net_utils::parse_host(matches.value_of("rpc_bind_address").unwrap()) .expect("invalid rpc_bind_address") } else if private_rpc { solana_net_utils::parse_host("127.0.0.1").unwrap() } else { - bind_addresses.primary() + bind_addresses.active() }; let contact_debug_interval = value_t_or_exit!(matches, "contact_debug_interval", u64); - let account_indexes = process_account_indexes(matches); + let account_indexes = AccountSecondaryIndexes::from_clap_arg_match(matches)?; let restricted_repair_only_mode = matches.is_present("restricted_repair_only_mode"); let accounts_shrink_optimize_total_space = value_t_or_exit!(matches, "accounts_shrink_optimize_total_space", bool); let tpu_use_quic = !matches.is_present("tpu_disable_quic"); if !tpu_use_quic { - warn!("TPU QUIC was disabled via --tpu_disable_quic, this will prevent validator from receiving transactions!"); + warn!( + "TPU QUIC was disabled via --tpu_disable_quic, this will prevent validator from \ + receiving transactions!" + ); } let vote_use_quic = value_t_or_exit!(matches, "vote_use_quic", bool); @@ -291,7 +286,7 @@ pub fn execute( // version can then be deleted from gossip and get_rpc_node above. let expected_shred_version = value_t!(matches, "expected_shred_version", u16) .ok() - .or_else(|| get_cluster_shred_version(&entrypoint_addrs, bind_addresses.primary())); + .or_else(|| get_cluster_shred_version(&entrypoint_addrs, bind_addresses.active())); let tower_path = value_t!(matches, "tower", PathBuf) .ok() @@ -354,8 +349,19 @@ pub fn execute( .transpose()? .unzip(); - let read_cache_limit_bytes = values_of::(matches, "accounts_db_read_cache_limit_mb") - .map(|limits| { + let read_cache_limit_bytes = + values_of::(matches, "accounts_db_read_cache_limit").map(|limits| { + match limits.len() { + 2 => (limits[0], limits[1]), + _ => { + // clap will enforce two values are given + unreachable!("invalid number of values given to accounts-db-read-cache-limit") + } + } + }); + // accounts-db-read-cache-limit-mb was deprecated in v3.0.0 + let read_cache_limit_mb = + values_of::(matches, "accounts_db_read_cache_limit_mb").map(|limits| { match limits.len() { // we were given explicit low and high watermark values, so use them 2 => (limits[0] * MB, limits[1] * MB), @@ -369,6 +375,9 @@ pub fn execute( } } }); + // clap will enforce only one cli arg is provided, so pick whichever is Some + let read_cache_limit_bytes = read_cache_limit_bytes.or(read_cache_limit_mb); + let storage_access = matches .value_of("accounts_db_access_storages_method") .map(|method| match method { @@ -394,6 +403,12 @@ pub fn execute( }) .unwrap_or_default(); + let mark_obsolete_accounts = if matches.is_present("accounts_db_mark_obsolete_accounts") { + MarkObsoleteAccounts::Enabled + } else { + MarkObsoleteAccounts::Disabled + }; + let accounts_db_config = AccountsDbConfig { index: Some(accounts_index_config), account_indexes: Some(account_indexes.clone()), @@ -415,14 +430,13 @@ pub fn execute( exhaustively_verify_refcounts: matches.is_present("accounts_db_verify_refcounts"), storage_access, scan_filter_for_shrinking, - num_clean_threads: Some(accounts_db_clean_threads), + num_background_threads: Some(accounts_db_background_threads), num_foreground_threads: Some(accounts_db_foreground_threads), - num_hash_threads: Some(accounts_db_hash_threads), + mark_obsolete_accounts, + memlock_budget_size: solana_accounts_db::accounts_db::DEFAULT_MEMLOCK_BUDGET_SIZE, ..AccountsDbConfig::default() }; - let accounts_db_config = Some(accounts_db_config); - let on_start_geyser_plugin_config_files = if matches.is_present("geyser_plugin_config") { Some( values_t_or_exit!(matches, "geyser_plugin_config", String) @@ -436,70 +450,6 @@ pub fn execute( let starting_with_geyser_plugins: bool = on_start_geyser_plugin_config_files.is_some() || matches.is_present("geyser_plugin_always_enabled"); - let rpc_bigtable_config = if matches.is_present("enable_rpc_bigtable_ledger_storage") - || matches.is_present("enable_bigtable_ledger_upload") - { - Some(RpcBigtableConfig { - enable_bigtable_ledger_upload: matches.is_present("enable_bigtable_ledger_upload"), - bigtable_instance_name: value_t_or_exit!(matches, "rpc_bigtable_instance_name", String), - bigtable_app_profile_id: value_t_or_exit!( - matches, - "rpc_bigtable_app_profile_id", - String - ), - timeout: value_t!(matches, "rpc_bigtable_timeout", u64) - .ok() - .map(Duration::from_secs), - max_message_size: value_t_or_exit!(matches, "rpc_bigtable_max_message_size", usize), - }) - } else { - None - }; - - let rpc_send_retry_rate_ms = value_t_or_exit!(matches, "rpc_send_transaction_retry_ms", u64); - let rpc_send_batch_size = value_t_or_exit!(matches, "rpc_send_transaction_batch_size", usize); - let rpc_send_batch_send_rate_ms = - value_t_or_exit!(matches, "rpc_send_transaction_batch_ms", u64); - - if rpc_send_batch_send_rate_ms > rpc_send_retry_rate_ms { - Err(format!( - "the specified rpc-send-batch-ms ({rpc_send_batch_send_rate_ms}) is invalid, it must \ - be <= rpc-send-retry-ms ({rpc_send_retry_rate_ms})" - ))?; - } - - let tps = rpc_send_batch_size as u64 * MILLIS_PER_SECOND / rpc_send_batch_send_rate_ms; - if tps > send_transaction_service::MAX_TRANSACTION_SENDS_PER_SECOND { - Err(format!( - "either the specified rpc-send-batch-size ({}) or rpc-send-batch-ms ({}) is invalid, \ - 'rpc-send-batch-size * 1000 / rpc-send-batch-ms' must be smaller than ({}) .", - rpc_send_batch_size, - rpc_send_batch_send_rate_ms, - send_transaction_service::MAX_TRANSACTION_SENDS_PER_SECOND - ))?; - } - let rpc_send_transaction_tpu_peers = matches - .values_of("rpc_send_transaction_tpu_peer") - .map(|values| { - values - .map(solana_net_utils::parse_host_port) - .collect::, String>>() - }) - .transpose() - .map_err(|err| { - format!("failed to parse rpc send-transaction-service tpu peer address: {err}") - })?; - let rpc_send_transaction_also_leader = matches.is_present("rpc_send_transaction_also_leader"); - let leader_forward_count = - if rpc_send_transaction_tpu_peers.is_some() && !rpc_send_transaction_also_leader { - // rpc-sts is configured to send only to specific tpu peers. disable leader forwards - 0 - } else { - value_t_or_exit!(matches, "rpc_send_transaction_leader_forward_count", u64) - }; - - let full_api = matches.is_present("full_rpc_api"); - let xdp_interface = matches.value_of("retransmit_xdp_interface"); let xdp_zero_copy = matches.is_present("retransmit_xdp_zero_copy"); let retransmit_xdp = matches.value_of("retransmit_xdp_cpu_cores").map(|cpus| { @@ -546,6 +496,23 @@ pub fn execute( run_args.rpc_bootstrap_config.incremental_snapshot_fetch, )?; + let use_snapshot_archives_at_startup = value_t_or_exit!( + matches, + use_snapshot_archives_at_startup::cli::NAME, + UseSnapshotArchivesAtStartup + ); + + if mark_obsolete_accounts == MarkObsoleteAccounts::Enabled + && use_snapshot_archives_at_startup != UseSnapshotArchivesAtStartup::Always + { + Err(format!( + "The --accounts-db-mark-obsolete-accounts option requires the \ + --use-snapshot-archives-at-startup option to be set to {}. Current value: {}", + UseSnapshotArchivesAtStartup::Always, + use_snapshot_archives_at_startup + ))?; + } + let mut validator_config = ValidatorConfig { require_tower: matches.is_present("require_tower"), tower_storage, @@ -560,38 +527,7 @@ pub fn execute( .map(|s| Hash::from_str(s).unwrap()), expected_shred_version, new_hard_forks: hardforks_of(matches, "hard_forks"), - rpc_config: JsonRpcConfig { - enable_rpc_transaction_history: matches.is_present("enable_rpc_transaction_history"), - enable_extended_tx_metadata_storage: matches - .is_present("enable_extended_tx_metadata_storage"), - rpc_bigtable_config, - faucet_addr: matches.value_of("rpc_faucet_addr").map(|address| { - solana_net_utils::parse_host_port(address).expect("failed to parse faucet address") - }), - full_api, - max_multiple_accounts: Some(value_t_or_exit!( - matches, - "rpc_max_multiple_accounts", - usize - )), - health_check_slot_distance: value_t_or_exit!( - matches, - "health_check_slot_distance", - u64 - ), - disable_health_check: false, - rpc_threads: value_t_or_exit!(matches, "rpc_threads", usize), - rpc_blocking_threads: value_t_or_exit!(matches, "rpc_blocking_threads", usize), - rpc_niceness_adj: value_t_or_exit!(matches, "rpc_niceness_adj", i8), - account_indexes: account_indexes.clone(), - rpc_scan_and_fix_roots: matches.is_present("rpc_scan_and_fix_roots"), - max_request_body_size: Some(value_t_or_exit!( - matches, - "rpc_max_request_body_size", - usize - )), - skip_preflight_health_check: matches.is_present("skip_preflight_health_check"), - }, + rpc_config: run_args.json_rpc_config, on_start_geyser_plugin_config_files, geyser_plugin_always_enabled: matches.is_present("geyser_plugin_always_enabled"), rpc_addrs: value_t!(matches, "rpc_port", u16).ok().map(|rpc_port| { @@ -603,29 +539,7 @@ pub fn execute( // https://github.com/solana-labs/solana/issues/12250 ) }), - pubsub_config: PubSubConfig { - enable_block_subscription: matches.is_present("rpc_pubsub_enable_block_subscription"), - enable_vote_subscription: matches.is_present("rpc_pubsub_enable_vote_subscription"), - max_active_subscriptions: value_t_or_exit!( - matches, - "rpc_pubsub_max_active_subscriptions", - usize - ), - queue_capacity_items: value_t_or_exit!( - matches, - "rpc_pubsub_queue_capacity_items", - usize - ), - queue_capacity_bytes: value_t_or_exit!( - matches, - "rpc_pubsub_queue_capacity_bytes", - usize - ), - worker_threads: value_t_or_exit!(matches, "rpc_pubsub_worker_threads", usize), - notification_threads: value_t!(matches, "rpc_pubsub_notification_threads", usize) - .ok() - .and_then(NonZeroUsize::new), - }, + pubsub_config: run_args.pub_sub_config, voting_disabled: matches.is_present("no_voting") || restricted_repair_only_mode, wait_for_supermajority: value_t!(matches, "wait_for_supermajority", Slot).ok(), known_validators: run_args.known_validators, @@ -641,29 +555,7 @@ pub fn execute( generator_config: None, contact_debug_interval, contact_save_interval: DEFAULT_CONTACT_SAVE_INTERVAL_MILLIS, - send_transaction_service_config: send_transaction_service::Config { - retry_rate_ms: rpc_send_retry_rate_ms, - leader_forward_count, - default_max_retries: value_t!( - matches, - "rpc_send_transaction_default_max_retries", - usize - ) - .ok(), - service_max_retries: value_t_or_exit!( - matches, - "rpc_send_transaction_service_max_retries", - usize - ), - batch_send_rate_ms: rpc_send_batch_send_rate_ms, - batch_size: rpc_send_batch_size, - retry_pool_max_size: value_t_or_exit!( - matches, - "rpc_send_transaction_retry_pool_max_size", - usize - ), - tpu_peers: rpc_send_transaction_tpu_peers, - }, + send_transaction_service_config: run_args.send_transaction_service_config, no_poh_speed_test: matches.is_present("no_poh_speed_test"), no_os_memory_stats_reporting: matches.is_present("no_os_memory_stats_reporting"), no_os_network_stats_reporting: matches.is_present("no_os_network_stats_reporting"), @@ -688,11 +580,7 @@ pub fn execute( ..RuntimeConfig::default() }, staked_nodes_overrides: staked_nodes_overrides.clone(), - use_snapshot_archives_at_startup: value_t_or_exit!( - matches, - use_snapshot_archives_at_startup::cli::NAME, - UseSnapshotArchivesAtStartup - ), + use_snapshot_archives_at_startup, ip_echo_server_threads, rayon_global_threads, replay_forks_threads, @@ -722,6 +610,7 @@ pub fn execute( "block_production_method", BlockProductionMethod ), + block_production_num_workers, transaction_struct: value_t_or_exit!(matches, "transaction_struct", TransactionStructure), enable_block_production_forwarding: staked_nodes_overrides_path.is_some(), banking_trace_dir_byte_limit: parse_banking_trace_dir_byte_limit(matches), @@ -773,9 +662,9 @@ pub fn execute( BlockVerificationMethod::BlockstoreProcessor => { warn!( "The value \"blockstore-processor\" for --block-verification-method has been \ - deprecated. The value \"blockstore-processor\" is still allowed for now, but \ - is planned for removal in the near future. To update, either set the value \ - \"unified-scheduler\" or remove the --block-verification-method argument" + deprecated. The value \"blockstore-processor\" is still allowed for now, but is \ + planned for removal in the near future. To update, either set the value \ + \"unified-scheduler\" or remove the --block-verification-method argument" ); } BlockVerificationMethod::UnifiedScheduler => {} @@ -794,7 +683,7 @@ pub fn execute( info!("OS network limits test passed."); } else { Err("OS network limit test failed. See \ - https://docs.solanalabs.com/operations/guides/validator-start#system-tuning" + https://docs.anza.xyz/operations/guides/validator-start#system-tuning" .to_string())?; } } @@ -830,7 +719,10 @@ pub fn execute( let gossip_host = matches .value_of("gossip_host") .map(|gossip_host| { - warn!("--gossip-host is deprecated. Use --bind-address or rely on automatic public IP discovery instead."); + warn!( + "--gossip-host is deprecated. Use --bind-address or rely on automatic public IP \ + discovery instead." + ); solana_net_utils::parse_host(gossip_host) .map_err(|err| format!("failed to parse --gossip-host: {err}")) }) @@ -838,9 +730,8 @@ pub fn execute( let advertised_ip = if let Some(ip) = gossip_host { ip - } else if !bind_addresses.primary().is_unspecified() && !bind_addresses.primary().is_loopback() - { - bind_addresses.primary() + } else if !bind_addresses.active().is_unspecified() && !bind_addresses.active().is_loopback() { + bind_addresses.active() } else if !entrypoint_addrs.is_empty() { let mut order: Vec<_> = (0..entrypoint_addrs.len()).collect(); order.shuffle(&mut thread_rng()); @@ -850,12 +741,11 @@ pub fn execute( .find_map(|i| { let entrypoint_addr = &entrypoint_addrs[i]; info!( - "Contacting {} to determine the validator's public IP address", - entrypoint_addr + "Contacting {entrypoint_addr} to determine the validator's public IP address" ); solana_net_utils::get_public_ip_addr_with_binding( entrypoint_addr, - bind_addresses.primary(), + bind_addresses.active(), ) .map_or_else( |err| { @@ -870,7 +760,7 @@ pub fn execute( IpAddr::V4(Ipv4Addr::LOCALHOST) }; let gossip_port = value_t!(matches, "gossip_port", u16).or_else(|_| { - solana_net_utils::find_available_port_in_range(bind_addresses.primary(), (0, 1)) + solana_net_utils::find_available_port_in_range(bind_addresses.active(), (0, 1)) .map_err(|err| format!("unable to find an available gossip port: {err}")) })?; @@ -924,7 +814,7 @@ pub fn execute( advertised_ip, gossip_port, port_range: dynamic_port_range, - bind_ip_addrs: bind_addresses, + bind_ip_addrs: Arc::new(bind_addresses), public_tpu_addr, public_tpu_forwards_addr, num_tvu_receive_sockets: tvu_receive_threads, @@ -1087,7 +977,10 @@ pub fn execute( ) { // 200 is a special error code, see // https://github.com/solana-foundation/solana-improvement-documents/pull/46 - error!("Please remove --wen_restart and use --wait_for_supermajority as instructed above"); + error!( + "Please remove --wen_restart and use --wait_for_supermajority as instructed \ + above" + ); exit(200); } Err(format!("{err:?}")) @@ -1145,10 +1038,7 @@ fn get_cluster_shred_version(entrypoints: &[SocketAddr], bind_address: IpAddr) - Err(err) => eprintln!("get_cluster_shred_version failed: {entrypoint}, {err}"), Ok(0) => eprintln!("entrypoint {entrypoint} returned shred-version zero"), Ok(shred_version) => { - info!( - "obtained shred-version {} from {}", - shred_version, entrypoint - ); + info!("obtained shred-version {shred_version} from {entrypoint}"); return Some(shred_version); } } @@ -1201,10 +1091,9 @@ fn new_snapshot_config( if matches.occurrences_of("full_snapshot_interval_slots") > 0 { warn!( "Incremental snapshots are disabled, yet \ - --full-snapshot-interval-slots was specified! \ - Note that --full-snapshot-interval-slots is *ignored* \ - when incremental snapshots are disabled. \ - Use --snapshot-interval-slots instead.", + --full-snapshot-interval-slots was specified! Note that \ + --full-snapshot-interval-slots is *ignored* when incremental \ + snapshots are disabled. Use --snapshot-interval-slots instead.", ); } ( @@ -1232,9 +1121,9 @@ fn new_snapshot_config( let full_snapshot_interval_slots = full_snapshot_interval_slots.get(); if full_snapshot_interval_slots > DEFAULT_SLOTS_PER_EPOCH { warn!( - "The full snapshot interval is excessively large: {}! This will negatively \ - impact the background cleanup tasks in accounts-db. Consider a smaller value.", - full_snapshot_interval_slots, + "The full snapshot interval is excessively large: {full_snapshot_interval_slots}! \ + This will negatively impact the background cleanup tasks in accounts-db. \ + Consider a smaller value.", ); } } @@ -1255,13 +1144,13 @@ fn new_snapshot_config( .any(|account_path| account_path == &snapshots_dir) { Err( - "the --accounts and --snapshots paths must be unique since they \ - both create 'snapshots' subdirectories, otherwise there may be collisions" + "the --accounts and --snapshots paths must be unique since they both create \ + 'snapshots' subdirectories, otherwise there may be collisions" .to_string(), )?; } - let bank_snapshots_dir = snapshots_dir.join("snapshots"); + let bank_snapshots_dir = snapshots_dir.join(BANK_SNAPSHOTS_DIR); fs::create_dir_all(&bank_snapshots_dir).map_err(|err| { format!( "failed to create bank snapshots directory '{}': {err}", @@ -1348,61 +1237,11 @@ fn new_snapshot_config( if !is_snapshot_config_valid(&snapshot_config) { Err( - "invalid snapshot configuration provided: snapshot intervals are incompatible. \ - \n\t- full snapshot interval MUST be larger than incremental snapshot interval \ - (if enabled)" + "invalid snapshot configuration provided: snapshot intervals are incompatible. full \ + snapshot interval MUST be larger than incremental snapshot interval (if enabled)" .to_string(), )?; } Ok(snapshot_config) } - -fn process_account_indexes(matches: &ArgMatches) -> AccountSecondaryIndexes { - let account_indexes: HashSet = matches - .values_of("account_indexes") - .unwrap_or_default() - .map(|value| match value { - "program-id" => AccountIndex::ProgramId, - "spl-token-mint" => AccountIndex::SplTokenMint, - "spl-token-owner" => AccountIndex::SplTokenOwner, - _ => unreachable!(), - }) - .collect(); - - let account_indexes_include_keys: HashSet = - values_t!(matches, "account_index_include_key", Pubkey) - .unwrap_or_default() - .iter() - .cloned() - .collect(); - - let account_indexes_exclude_keys: HashSet = - values_t!(matches, "account_index_exclude_key", Pubkey) - .unwrap_or_default() - .iter() - .cloned() - .collect(); - - let exclude_keys = !account_indexes_exclude_keys.is_empty(); - let include_keys = !account_indexes_include_keys.is_empty(); - - let keys = if !account_indexes.is_empty() && (exclude_keys || include_keys) { - let account_indexes_keys = AccountSecondaryIndexesIncludeExclude { - exclude: exclude_keys, - keys: if exclude_keys { - account_indexes_exclude_keys - } else { - account_indexes_include_keys - }, - }; - Some(account_indexes_keys) - } else { - None - }; - - AccountSecondaryIndexes { - keys, - indexes: account_indexes, - } -} diff --git a/validator/src/commands/set_public_address/mod.rs b/validator/src/commands/set_public_address/mod.rs index a01bb28c736632..d7c2d39b948829 100644 --- a/validator/src/commands/set_public_address/mod.rs +++ b/validator/src/commands/set_public_address/mod.rs @@ -17,21 +17,22 @@ pub struct SetPublicAddressArgs { impl FromClapArgMatches for SetPublicAddressArgs { fn from_clap_arg_match(matches: &ArgMatches) -> Result { - let parse_arg_addr = |arg_name: &str, - arg_long: &str| - -> std::result::Result< - Option, - Box, - > { - Ok(matches.value_of(arg_name).map(|host_port| { - solana_net_utils::parse_host_port(host_port).map_err(|err| { - format!( - "failed to parse --{arg_long} address. It must be in the HOST:PORT format. {err}" - ) - }) - }) - .transpose()?) - }; + let parse_arg_addr = + |arg_name: &str, + arg_long: &str| + -> std::result::Result, Box> { + Ok(matches + .value_of(arg_name) + .map(|host_port| { + solana_net_utils::parse_host_port(host_port).map_err(|err| { + format!( + "failed to parse --{arg_long} address. It must be in the \ + HOST:PORT format. {err}" + ) + }) + }) + .transpose()?) + }; Ok(SetPublicAddressArgs { tpu_addr: parse_arg_addr("tpu_addr", "tpu")?, tpu_forwards_addr: parse_arg_addr("tpu_forwards_addr", "tpu-forwards")?, diff --git a/validator/src/commands/staked_nodes_overrides/mod.rs b/validator/src/commands/staked_nodes_overrides/mod.rs index bdad30f13c4ade..5f9646caf33a75 100644 --- a/validator/src/commands/staked_nodes_overrides/mod.rs +++ b/validator/src/commands/staked_nodes_overrides/mod.rs @@ -34,11 +34,13 @@ pub fn command<'a>() -> App<'a, 'a> { .takes_value(true) .required(true) .help( - "Provide path to a file with custom overrides for stakes of specific validator identities.", + "Provide path to a file with custom overrides for stakes of specific \ + validator identities.", ), ) .after_help( - "Note: the new staked nodes overrides only applies to the currently running validator instance", + "Note: the new staked nodes overrides only applies to the currently running validator \ + instance", ) } diff --git a/validator/src/commands/wait_for_restart_window/mod.rs b/validator/src/commands/wait_for_restart_window/mod.rs index a451b6d2e2596c..49bab6f12396c4 100644 --- a/validator/src/commands/wait_for_restart_window/mod.rs +++ b/validator/src/commands/wait_for_restart_window/mod.rs @@ -58,9 +58,7 @@ pub(crate) fn command<'a>() -> App<'a, 'a> { .validator(is_parsable::) .value_name("MINUTES") .default_value(DEFAULT_MIN_IDLE_TIME) - .help( - "Minimum time that the validator should not be leader before restarting", - ), + .help("Minimum time that the validator should not be leader before restarting"), ) .arg( Arg::with_name("identity") @@ -90,7 +88,8 @@ pub(crate) fn command<'a>() -> App<'a, 'a> { .help("Skip health check"), ) .after_help( - "Note: If this command exits with a non-zero status then this not a good time for a restart", + "Note: If this command exits with a non-zero status then this not a good time for a \ + restart", ) } @@ -218,9 +217,9 @@ pub fn wait_for_restart_window( } if !leader_schedule.is_empty() && upcoming_idle_windows.is_empty() { return Err(format!( - "Validator has no idle window of at least {} slots. Largest idle window \ - for epoch {} is {} slots", - min_idle_slots, epoch_info.epoch, max_idle_window + "Validator has no idle window of at least {min_idle_slots} slots. Largest \ + idle window for epoch {} is {max_idle_window} slots", + epoch_info.epoch, ) .into()); } @@ -274,7 +273,7 @@ pub fn wait_for_restart_window( } None => format!( "Validator will be leader soon. Next leader slot is \ - {next_leader_slot}" + {next_leader_slot}" ), }) } diff --git a/validator/src/main.rs b/validator/src/main.rs index dfbcae815fe5f5..17d22b43c8bff3 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -27,7 +27,6 @@ pub fn main() { ("init", _) => commands::run::execute( &matches, solana_version, - &ledger_path, commands::run::execute::Operation::Initialize, ) .inspect_err(|err| error!("Failed to initialize validator: {err}")) @@ -35,7 +34,6 @@ pub fn main() { ("", _) | ("run", _) => commands::run::execute( &matches, solana_version, - &ledger_path, commands::run::execute::Operation::Run, ) .inspect_err(|err| error!("Failed to start validator: {err}")) @@ -74,6 +72,9 @@ pub fn main() { ("set-public-address", Some(subcommand_matches)) => { commands::set_public_address::execute(subcommand_matches, &ledger_path) } + ("manage-block-production", Some(subcommand_matches)) => { + commands::manage_block_production::execute(subcommand_matches, &ledger_path) + } _ => unreachable!(), } .unwrap_or_else(|err| { diff --git a/version/build.rs b/version/build.rs index d1ea1b5d1e15e5..7a03c0a85429ec 100644 --- a/version/build.rs +++ b/version/build.rs @@ -5,7 +5,7 @@ fn main() { if git_output.status.success() { if let Ok(git_commit_hash) = String::from_utf8(git_output.stdout) { let trimmed_hash = git_commit_hash.trim().to_string(); - println!("cargo:rustc-env=AGAVE_GIT_COMMIT_HASH={}", trimmed_hash); + println!("cargo:rustc-env=AGAVE_GIT_COMMIT_HASH={trimmed_hash}"); } } } diff --git a/vortexor/Cargo.toml b/vortexor/Cargo.toml index de3ea9a62c24b9..e2afbc9339cd5c 100644 --- a/vortexor/Cargo.toml +++ b/vortexor/Cargo.toml @@ -18,9 +18,11 @@ targets = ["x86_64-unknown-linux-gnu"] crate-type = ["lib"] name = "solana_vortexor" +[features] +dev-context-only-utils = [] + [dependencies] agave-banking-stage-ingress-types = { workspace = true } -async-channel = { workspace = true } bytes = { workspace = true } clap = { version = "4.5.31", features = ["cargo", "derive", "error-context"] } crossbeam-channel = { workspace = true } @@ -62,11 +64,12 @@ solana-transaction-metrics-tracker = { workspace = true } solana-version = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true, features = ["full"] } +tokio-util = { workspace = true } url = { workspace = true } x509-parser = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } -solana-local-cluster = { workspace = true } +solana-local-cluster = { workspace = true, features = ["dev-context-only-utils"] } solana-native-token = { workspace = true } solana-streamer = { workspace = true, features = ["dev-context-only-utils"] } diff --git a/vortexor/src/main.rs b/vortexor/src/main.rs index d35cd499937471..e266efd0d41bbb 100644 --- a/vortexor/src/main.rs +++ b/vortexor/src/main.rs @@ -26,6 +26,7 @@ use { sync::{atomic::AtomicBool, Arc, RwLock}, time::Duration, }, + tokio_util::sync::CancellationToken, }; const DEFAULT_CHANNEL_SIZE: usize = 100_000; @@ -83,6 +84,7 @@ pub fn main() { let tpu_forward_address = args.tpu_forward_address; let max_streams_per_ms = args.max_streams_per_ms; let exit = Arc::new(AtomicBool::new(false)); + let cancel = CancellationToken::new(); // To be linked with the Tpu sigverify and forwarder service let (tpu_sender, tpu_receiver) = bounded(DEFAULT_CHANNEL_SIZE); let (tpu_fwd_sender, _tpu_fwd_receiver) = bounded(DEFAULT_CHANNEL_SIZE); @@ -202,7 +204,7 @@ pub fn main() { max_connections_per_ipaddr_per_min, tpu_coalesce, &identity_keypair, - exit, + cancel.clone(), ); vortexor.join().unwrap(); sigverify_stage.join().unwrap(); diff --git a/vortexor/src/vortexor.rs b/vortexor/src/vortexor.rs index 8a1669e5f726b6..9aa8fb8156f9c4 100644 --- a/vortexor/src/vortexor.rs +++ b/vortexor/src/vortexor.rs @@ -12,15 +12,16 @@ use { solana_quic_definitions::NotifyKeyUpdate, solana_streamer::{ nonblocking::quic::DEFAULT_WAIT_FOR_CHUNK_TIMEOUT, - quic::{spawn_server_multi, EndpointKeyUpdater, QuicServerParams}, + quic::{spawn_server_with_cancel, EndpointKeyUpdater, QuicServerParams}, streamer::StakedNodes, }, std::{ net::{SocketAddr, UdpSocket}, - sync::{atomic::AtomicBool, Arc, Mutex, RwLock}, + sync::{Arc, Mutex, RwLock}, thread::{self, JoinHandle}, time::Duration, }, + tokio_util::sync::CancellationToken, }; pub struct TpuSockets { @@ -115,7 +116,7 @@ impl Vortexor { max_connections_per_ipaddr_per_min: u64, tpu_coalesce: Duration, identity_keypair: &Keypair, - exit: Arc, + cancel: CancellationToken, ) -> Self { let mut quic_server_params = QuicServerParams { max_connections_per_peer, @@ -133,15 +134,15 @@ impl Vortexor { tpu_quic_fwd, } = tpu_sockets; - let tpu_result = spawn_server_multi( + let tpu_result = spawn_server_with_cancel( "solVtxTpu", "quic_vortexor_tpu", tpu_quic, identity_keypair, tpu_sender.clone(), - exit.clone(), staked_nodes.clone(), quic_server_params.clone(), + cancel.clone(), ) .unwrap(); @@ -149,15 +150,15 @@ impl Vortexor { // for staked connections: quic_server_params.max_staked_connections = max_fwd_staked_connections; quic_server_params.max_unstaked_connections = max_fwd_unstaked_connections; - let tpu_fwd_result = spawn_server_multi( + let tpu_fwd_result = spawn_server_with_cancel( "solVtxTpuFwd", "quic_vortexor_tpu_forwards", tpu_quic_fwd, identity_keypair, tpu_fwd_sender, - exit.clone(), staked_nodes.clone(), quic_server_params, + cancel.clone(), ) .unwrap(); diff --git a/vortexor/tests/vortexor.rs b/vortexor/tests/vortexor.rs index 6a18f4788e589e..0b5437f66b30a8 100644 --- a/vortexor/tests/vortexor.rs +++ b/vortexor/tests/vortexor.rs @@ -33,6 +33,7 @@ use { }, time::Duration, }, + tokio_util::sync::CancellationToken, url::Url, }; @@ -42,7 +43,7 @@ async fn test_vortexor() { let bind_address = solana_net_utils::parse_host("127.0.0.1").expect("invalid bind_address"); let keypair = Keypair::new(); - let exit = Arc::new(AtomicBool::new(false)); + let cancel = CancellationToken::new(); let (tpu_sender, tpu_receiver) = unbounded(); let (tpu_fwd_sender, tpu_fwd_receiver) = unbounded(); @@ -77,13 +78,13 @@ async fn test_vortexor() { DEFAULT_MAX_CONNECTIONS_PER_IPADDR_PER_MINUTE, DEFAULT_TPU_COALESCE, &keypair, - exit.clone(), + cancel.clone(), ); check_multiple_streams(tpu_receiver, tpu_address, Some(&keypair)).await; check_multiple_streams(tpu_fwd_receiver, tpu_fwd_address, Some(&keypair)).await; - exit.store(true, Ordering::Relaxed); + cancel.cancel(); vortexor.join().unwrap(); } diff --git a/vote/benches/vote_account.rs b/vote/benches/vote_account.rs index 014ef37f8cba94..6e0d3e70d01d3d 100644 --- a/vote/benches/vote_account.rs +++ b/vote/benches/vote_account.rs @@ -4,13 +4,13 @@ use { solana_account::AccountSharedData, solana_pubkey::Pubkey, solana_vote::vote_account::VoteAccount, - solana_vote_interface::state::{VoteInit, VoteState, VoteStateVersions}, + solana_vote_interface::state::{VoteInit, VoteStateV3, VoteStateVersions}, }; fn new_rand_vote_account( rng: &mut R, node_pubkey: Option, -) -> (AccountSharedData, VoteState) { +) -> (AccountSharedData, VoteStateV3) { let vote_init = VoteInit { node_pubkey: node_pubkey.unwrap_or_else(Pubkey::new_unique), authorized_voter: Pubkey::new_unique(), @@ -24,11 +24,11 @@ fn new_rand_vote_account( leader_schedule_epoch: rng.gen(), unix_timestamp: rng.gen(), }; - let mut vote_state = VoteState::new(&vote_init, &clock); + let mut vote_state = VoteStateV3::new(&vote_init, &clock); vote_state.process_next_vote_slot(0, 0, 1); let account = AccountSharedData::new_data( rng.gen(), // lamports - &VoteStateVersions::new_current(vote_state.clone()), + &VoteStateVersions::new_v3(vote_state.clone()), &solana_sdk_ids::vote::id(), // owner ) .unwrap(); diff --git a/vote/src/vote_account.rs b/vote/src/vote_account.rs index 55204a8ef3ce9f..9c150c581108fe 100644 --- a/vote/src/vote_account.rs +++ b/vote/src/vote_account.rs @@ -96,7 +96,7 @@ impl VoteAccount { use { rand::Rng as _, solana_clock::Clock, - solana_vote_interface::state::{VoteInit, VoteState, VoteStateVersions}, + solana_vote_interface::state::{VoteInit, VoteStateV3, VoteStateVersions}, }; let mut rng = rand::thread_rng(); @@ -114,10 +114,10 @@ impl VoteAccount { leader_schedule_epoch: rng.gen(), unix_timestamp: rng.gen(), }; - let vote_state = VoteState::new(&vote_init, &clock); + let vote_state = VoteStateV3::new(&vote_init, &clock); let account = AccountSharedData::new_data( rng.gen(), // lamports - &VoteStateVersions::new_current(vote_state.clone()), + &VoteStateVersions::new_v3(vote_state.clone()), &solana_sdk_ids::vote::id(), // owner ) .unwrap(); @@ -453,7 +453,7 @@ mod tests { solana_account::WritableAccount, solana_clock::Clock, solana_pubkey::Pubkey, - solana_vote_interface::state::{VoteInit, VoteState, VoteStateVersions}, + solana_vote_interface::state::{VoteInit, VoteStateV3, VoteStateVersions}, std::iter::repeat_with, }; @@ -474,10 +474,10 @@ mod tests { leader_schedule_epoch: rng.gen(), unix_timestamp: rng.gen(), }; - let vote_state = VoteState::new(&vote_init, &clock); + let vote_state = VoteStateV3::new(&vote_init, &clock); AccountSharedData::new_data( rng.gen(), // lamports - &VoteStateVersions::new_current(vote_state.clone()), + &VoteStateVersions::new_v3(vote_state.clone()), &solana_sdk_ids::vote::id(), // owner ) .unwrap() diff --git a/vote/src/vote_state_view.rs b/vote/src/vote_state_view.rs index b4c6dc313a2a67..b08016da4b59d4 100644 --- a/vote/src/vote_state_view.rs +++ b/vote/src/vote_state_view.rs @@ -1,7 +1,8 @@ use { self::{ field_frames::{ - AuthorizedVotersListFrame, EpochCreditsItem, EpochCreditsListFrame, RootSlotFrame, + AuthorizedVotersListFrame, BlsPubkeyCompressedFrame, BlsPubkeyCompressedView, + EpochCreditsItem, EpochCreditsListFrame, PendingDelegatorRewardsView, RootSlotFrame, RootSlotView, VotesFrame, }, frame_v1_14_11::VoteStateFrameV1_14_11, @@ -9,20 +10,23 @@ use { list_view::ListView, }, core::fmt::Debug, + field_frames::{CommissionFrame, CommissionView}, + frame_v4::VoteStateFrameV4, solana_clock::{Epoch, Slot}, solana_pubkey::Pubkey, - solana_vote_interface::state::{BlockTimestamp, Lockout}, + solana_vote_interface::state::{BlockTimestamp, Lockout, BLS_PUBLIC_KEY_COMPRESSED_SIZE}, std::sync::Arc, }; #[cfg(feature = "dev-context-only-utils")] use { bincode, - solana_vote_interface::state::{VoteState, VoteStateVersions}, + solana_vote_interface::state::{VoteStateV3, VoteStateVersions}, }; mod field_frames; mod frame_v1_14_11; mod frame_v3; +mod frame_v4; mod list_view; #[derive(Debug, PartialEq, Eq)] @@ -30,6 +34,7 @@ pub enum VoteStateViewError { AccountDataTooSmall, InvalidVotesLength, InvalidRootSlotOption, + InvalidBlsPubkeyCompressedOption, InvalidAuthorizedVotersLength, InvalidEpochCreditsLength, OldVersion, @@ -48,6 +53,14 @@ enum Field { LastTimestamp, } +enum Simd185Field { + InflationRewardsCollector, + BlockRevenueCollector, + BlockRevenueCommission, + PendingDelegatorRewards, + BlsPubkeyCompressed, +} + /// A view into a serialized VoteState. /// /// This struct provides access to the VoteState data without @@ -73,9 +86,45 @@ impl VoteStateView { } pub fn commission(&self) -> u8 { - let offset = self.frame.offset(Field::Commission); + self.inflation_rewards_commission_view() + .commission_percent() + } + + pub fn block_revenue_collector(&self) -> Option<&Pubkey> { + let offset = self + .frame + .simd185_field_offset(Simd185Field::BlockRevenueCollector)?; // SAFETY: `frame` was created from `data`. - self.data[offset] + unsafe { Some(&*(self.data.as_ptr().add(offset) as *const Pubkey)) } + } + + pub fn inflation_rewards_collector(&self) -> Option<&Pubkey> { + let offset = self + .frame + .simd185_field_offset(Simd185Field::InflationRewardsCollector)?; + // SAFETY: `frame` was created from `data`. + unsafe { Some(&*(self.data.as_ptr().add(offset) as *const Pubkey)) } + } + + pub fn inflation_rewards_commission(&self) -> u16 { + self.inflation_rewards_commission_view().commission_bps() + } + + pub fn block_revenue_commission(&self) -> u16 { + self.block_revenue_commission_view() + .map(|view| view.commission_bps()) + .unwrap_or(10_000) + } + + pub fn pending_delegator_rewards(&self) -> u64 { + self.pending_delegator_rewards_view() + .map(|view| view.value()) + .unwrap_or(0) + } + + pub fn bls_pubkey_compressed(&self) -> Option<[u8; BLS_PUBLIC_KEY_COMPRESSED_SIZE]> { + self.bls_pubkey_compressed_view() + .and_then(|view| view.pubkey()) } pub fn votes_iter(&self) -> impl Iterator + '_ { @@ -128,6 +177,40 @@ impl VoteStateView { } } + fn inflation_rewards_commission_view(&self) -> CommissionView { + let offset = self.frame.offset(Field::Commission); + // SAFETY: `frame` was created from `data`. + CommissionView::new(self.frame.commission_frame(), &self.data[offset..]) + } + + fn block_revenue_commission_view(&self) -> Option { + let offset = self + .frame + .simd185_field_offset(Simd185Field::BlockRevenueCommission)?; + // SAFETY: `frame` was created from `data`. + Some(CommissionView::new( + CommissionFrame::new_bps(), + &self.data[offset..], + )) + } + + fn pending_delegator_rewards_view(&self) -> Option { + let offset = self + .frame + .simd185_field_offset(Simd185Field::PendingDelegatorRewards)?; + // SAFETY: `frame` was created from `data`. + Some(PendingDelegatorRewardsView::new(&self.data[offset..])) + } + + fn bls_pubkey_compressed_view(&self) -> Option { + let offset = self + .frame + .simd185_field_offset(Simd185Field::BlsPubkeyCompressed)?; + let frame = self.frame.bls_pubkey_compressed_frame()?; + // SAFETY: `frame` was created from `data`. + Some(BlsPubkeyCompressedView::new(frame, &self.data[offset..])) + } + fn votes_view(&self) -> ListView { let offset = self.frame.offset(Field::Votes); // SAFETY: `frame` was created from `data`. @@ -154,10 +237,9 @@ impl VoteStateView { } #[cfg(feature = "dev-context-only-utils")] -impl From for VoteStateView { - fn from(vote_state: VoteState) -> Self { - let vote_account_data = - bincode::serialize(&VoteStateVersions::new_current(vote_state)).unwrap(); +impl From for VoteStateView { + fn from(vote_state: VoteStateV3) -> Self { + let vote_account_data = bincode::serialize(&VoteStateVersions::new_v3(vote_state)).unwrap(); VoteStateView::try_new(Arc::new(vote_account_data)).unwrap() } } @@ -167,6 +249,7 @@ impl From for VoteStateView { enum VoteStateFrame { V1_14_11(VoteStateFrameV1_14_11), V3(VoteStateFrameV3), + V4(VoteStateFrameV4), } impl VoteStateFrame { @@ -182,6 +265,7 @@ impl VoteStateFrame { 0 => return Err(VoteStateViewError::OldVersion), 1 => Self::V1_14_11(VoteStateFrameV1_14_11::try_new(bytes)?), 2 => Self::V3(VoteStateFrameV3::try_new(bytes)?), + 3 => Self::V4(VoteStateFrameV4::try_new(bytes)?), _ => return Err(VoteStateViewError::UnsupportedVersion), }) } @@ -190,6 +274,30 @@ impl VoteStateFrame { match &self { Self::V1_14_11(frame) => frame.field_offset(field), Self::V3(frame) => frame.field_offset(field), + Self::V4(frame) => frame.field_offset(field), + } + } + + fn simd185_field_offset(&self, field: Simd185Field) -> Option { + match &self { + Self::V1_14_11(_frame) => None, + Self::V3(_frame) => None, + Self::V4(frame) => Some(frame.simd185_field_offset(field)), + } + } + + fn commission_frame(&self) -> CommissionFrame { + match &self { + Self::V1_14_11(_) => CommissionFrame::new_percent(), + Self::V3(_) => CommissionFrame::new_percent(), + Self::V4(_) => CommissionFrame::new_bps(), + } + } + + fn bls_pubkey_compressed_frame(&self) -> Option { + match &self { + Self::V1_14_11 { .. } | Self::V3 { .. } => None, + Self::V4(frame) => Some(frame.bls_pubkey_compressed_frame), } } @@ -197,6 +305,7 @@ impl VoteStateFrame { match &self { Self::V1_14_11(frame) => VotesFrame::Lockout(frame.votes_frame), Self::V3(frame) => VotesFrame::Landed(frame.votes_frame), + Self::V4(frame) => VotesFrame::Landed(frame.votes_frame), } } @@ -204,6 +313,7 @@ impl VoteStateFrame { match &self { Self::V1_14_11(vote_frame) => vote_frame.root_slot_frame, Self::V3(vote_frame) => vote_frame.root_slot_frame, + Self::V4(vote_frame) => vote_frame.root_slot_frame, } } @@ -211,6 +321,7 @@ impl VoteStateFrame { match &self { Self::V1_14_11(frame) => frame.authorized_voters_frame, Self::V3(frame) => frame.authorized_voters_frame, + Self::V4(frame) => frame.authorized_voters_frame, } } @@ -218,6 +329,7 @@ impl VoteStateFrame { match &self { Self::V1_14_11(frame) => frame.epoch_credits_frame, Self::V3(frame) => frame.epoch_credits_frame, + Self::V4(frame) => frame.epoch_credits_frame, } } } @@ -231,15 +343,51 @@ mod tests { solana_vote_interface::{ authorized_voters::AuthorizedVoters, state::{ - vote_state_1_14_11::VoteState1_14_11, LandedVote, VoteInit, VoteState, + LandedVote, VoteInit, VoteState1_14_11, VoteStateV3, VoteStateV4, VoteStateVersions, MAX_EPOCH_CREDITS_HISTORY, MAX_LOCKOUT_HISTORY, }, }, std::collections::VecDeque, }; - fn new_test_vote_state() -> VoteState { - let mut target_vote_state = VoteState::new( + #[derive(Debug, Clone, Deserialize, Serialize)] + enum TestVoteStateVersions { + V0_23_5, + V1_14_11, + V3, + V4(Box), + } + + fn new_test_vote_state_v4() -> VoteStateV4 { + let votes = (0..MAX_LOCKOUT_HISTORY) + .map(|i| LandedVote { + latency: i as u8, + lockout: Lockout::new_with_confirmation_count(i as u64, i as u32), + }) + .collect(); + + VoteStateV4 { + node_pubkey: Pubkey::new_unique(), + authorized_withdrawer: Pubkey::new_unique(), + inflation_rewards_collector: Pubkey::new_unique(), + block_revenue_collector: Pubkey::new_unique(), + inflation_rewards_commission_bps: 42, + block_revenue_commission_bps: 42, + pending_delegator_rewards: 42, + bls_pubkey_compressed: Some([42; BLS_PUBLIC_KEY_COMPRESSED_SIZE]), + votes, + root_slot: Some(42), + authorized_voters: AuthorizedVoters::new(42, Pubkey::new_unique()), + epoch_credits: vec![(42, 42, 42)], + last_timestamp: BlockTimestamp { + slot: 42, + timestamp: 42, + }, + } + } + + fn new_test_vote_state_v3() -> VoteStateV3 { + let mut target_vote_state = VoteStateV3::new( &VoteInit { node_pubkey: Pubkey::new_unique(), authorized_voter: Pubkey::new_unique(), @@ -275,10 +423,54 @@ mod tests { } #[test] - fn test_vote_state_view_v3() { - let target_vote_state = new_test_vote_state(); + fn test_vote_state_view_v4() { + let target_vote_state = new_test_vote_state_v4(); let target_vote_state_versions = - VoteStateVersions::Current(Box::new(target_vote_state.clone())); + TestVoteStateVersions::V4(Box::new(target_vote_state.clone())); + let vote_state_buf = bincode::serialize(&target_vote_state_versions).unwrap(); + let vote_state_view = VoteStateView::try_new(Arc::new(vote_state_buf)).unwrap(); + assert_eq_vote_state_v4(&vote_state_view, &target_vote_state); + } + + #[test] + fn test_vote_state_view_v4_default() { + let target_vote_state = VoteStateV4::default(); + let target_vote_state_versions = + TestVoteStateVersions::V4(Box::new(target_vote_state.clone())); + let vote_state_buf = bincode::serialize(&target_vote_state_versions).unwrap(); + let vote_state_view = VoteStateView::try_new(Arc::new(vote_state_buf)).unwrap(); + assert_eq_vote_state_v4(&vote_state_view, &target_vote_state); + } + + #[test] + fn test_vote_state_view_v4_arbitrary() { + // variant + // provide 4x the minimum struct size in bytes to ensure we typically touch every field + let struct_bytes_x4 = VoteStateV3::size_of() * 4; + for _ in 0..100 { + let raw_data: Vec = (0..struct_bytes_x4).map(|_| rand::random::()).collect(); + let mut unstructured = Unstructured::new(&raw_data); + + let mut target_vote_state = VoteStateV4::arbitrary(&mut unstructured).unwrap(); + target_vote_state.votes.truncate(MAX_LOCKOUT_HISTORY); + target_vote_state + .epoch_credits + .truncate(MAX_EPOCH_CREDITS_HISTORY); + if target_vote_state.authorized_voters.len() >= u8::MAX as usize { + continue; + } + + let target_vote_state_versions = + TestVoteStateVersions::V4(Box::new(target_vote_state.clone())); + let vote_state_buf = bincode::serialize(&target_vote_state_versions).unwrap(); + let vote_state_view = VoteStateView::try_new(Arc::new(vote_state_buf)).unwrap(); + assert_eq_vote_state_v4(&vote_state_view, &target_vote_state); + } + } + #[test] + fn test_vote_state_view_v3() { + let target_vote_state = new_test_vote_state_v3(); + let target_vote_state_versions = VoteStateVersions::V3(Box::new(target_vote_state.clone())); let vote_state_buf = bincode::serialize(&target_vote_state_versions).unwrap(); let vote_state_view = VoteStateView::try_new(Arc::new(vote_state_buf)).unwrap(); assert_eq_vote_state_v3(&vote_state_view, &target_vote_state); @@ -286,9 +478,8 @@ mod tests { #[test] fn test_vote_state_view_v3_default() { - let target_vote_state = VoteState::default(); - let target_vote_state_versions = - VoteStateVersions::Current(Box::new(target_vote_state.clone())); + let target_vote_state = VoteStateV3::default(); + let target_vote_state_versions = VoteStateVersions::V3(Box::new(target_vote_state.clone())); let vote_state_buf = bincode::serialize(&target_vote_state_versions).unwrap(); let vote_state_view = VoteStateView::try_new(Arc::new(vote_state_buf)).unwrap(); assert_eq_vote_state_v3(&vote_state_view, &target_vote_state); @@ -298,12 +489,12 @@ mod tests { fn test_vote_state_view_v3_arbitrary() { // variant // provide 4x the minimum struct size in bytes to ensure we typically touch every field - let struct_bytes_x4 = VoteState::size_of() * 4; + let struct_bytes_x4 = VoteStateV3::size_of() * 4; for _ in 0..100 { let raw_data: Vec = (0..struct_bytes_x4).map(|_| rand::random::()).collect(); let mut unstructured = Unstructured::new(&raw_data); - let mut target_vote_state = VoteState::arbitrary(&mut unstructured).unwrap(); + let mut target_vote_state = VoteStateV3::arbitrary(&mut unstructured).unwrap(); target_vote_state.votes.truncate(MAX_LOCKOUT_HISTORY); target_vote_state .epoch_credits @@ -313,7 +504,7 @@ mod tests { } let target_vote_state_versions = - VoteStateVersions::Current(Box::new(target_vote_state.clone())); + VoteStateVersions::V3(Box::new(target_vote_state.clone())); let vote_state_buf = bincode::serialize(&target_vote_state_versions).unwrap(); let vote_state_view = VoteStateView::try_new(Arc::new(vote_state_buf)).unwrap(); assert_eq_vote_state_v3(&vote_state_view, &target_vote_state); @@ -322,7 +513,7 @@ mod tests { #[test] fn test_vote_state_view_1_14_11() { - let target_vote_state: VoteState1_14_11 = new_test_vote_state().into(); + let target_vote_state: VoteState1_14_11 = new_test_vote_state_v3().into(); let target_vote_state_versions = VoteStateVersions::V1_14_11(Box::new(target_vote_state.clone())); let vote_state_buf = bincode::serialize(&target_vote_state_versions).unwrap(); @@ -371,7 +562,78 @@ mod tests { } } - fn assert_eq_vote_state_v3(vote_state_view: &VoteStateView, vote_state: &VoteState) { + fn assert_eq_vote_state_v4(vote_state_view: &VoteStateView, vote_state: &VoteStateV4) { + assert_eq!(vote_state_view.node_pubkey(), &vote_state.node_pubkey); + assert_eq!( + vote_state_view.inflation_rewards_collector(), + Some(&vote_state.inflation_rewards_collector) + ); + assert_eq!( + vote_state_view.block_revenue_collector(), + Some(&vote_state.block_revenue_collector) + ); + assert_eq!( + vote_state_view.inflation_rewards_commission(), + vote_state.inflation_rewards_commission_bps + ); + assert_eq!( + vote_state_view.block_revenue_commission(), + vote_state.block_revenue_commission_bps + ); + assert_eq!( + vote_state_view.pending_delegator_rewards(), + vote_state.pending_delegator_rewards + ); + assert_eq!( + vote_state_view.bls_pubkey_compressed(), + vote_state.bls_pubkey_compressed + ); + let view_votes = vote_state_view.votes_iter().collect::>(); + let state_votes = vote_state + .votes + .iter() + .map(|vote| vote.lockout) + .collect::>(); + assert_eq!(view_votes, state_votes); + assert_eq!(vote_state_view.root_slot(), vote_state.root_slot); + + if let Some((first_voter_epoch, first_voter)) = vote_state.authorized_voters.first() { + assert_eq!( + vote_state_view.get_authorized_voter(*first_voter_epoch), + Some(first_voter) + ); + + let (last_voter_epoch, last_voter) = vote_state.authorized_voters.last().unwrap(); + assert_eq!( + vote_state_view.get_authorized_voter(*last_voter_epoch), + Some(last_voter) + ); + assert_eq!( + vote_state_view.get_authorized_voter(u64::MAX), + Some(last_voter) + ); + } else { + assert_eq!(vote_state_view.get_authorized_voter(u64::MAX), None); + } + + assert_eq!( + vote_state_view.num_epoch_credits(), + vote_state.epoch_credits.len() + ); + let view_credits: Vec<(Epoch, u64, u64)> = vote_state_view + .epoch_credits_iter() + .map(Into::into) + .collect::>(); + assert_eq!(view_credits, vote_state.epoch_credits); + + assert_eq!( + vote_state_view.credits(), + vote_state.epoch_credits.last().map(|x| x.1).unwrap_or(0) + ); + assert_eq!(vote_state_view.last_timestamp(), vote_state.last_timestamp); + } + + fn assert_eq_vote_state_v3(vote_state_view: &VoteStateView, vote_state: &VoteStateV3) { assert_eq!(vote_state_view.node_pubkey(), &vote_state.node_pubkey); assert_eq!(vote_state_view.commission(), vote_state.commission); let view_votes = vote_state_view.votes_iter().collect::>(); @@ -499,7 +761,7 @@ mod tests { #[test] fn test_vote_state_view_unsupported_version() { - let vote_data = Arc::new(3u32.to_le_bytes().to_vec()); + let vote_data = Arc::new(4u32.to_le_bytes().to_vec()); let vote_state_view_err = VoteStateView::try_new(vote_data).unwrap_err(); assert_eq!(vote_state_view_err, VoteStateViewError::UnsupportedVersion); } diff --git a/vote/src/vote_state_view/field_frames.rs b/vote/src/vote_state_view/field_frames.rs index d25bdf57a683cd..dd1c49ba1169ba 100644 --- a/vote/src/vote_state_view/field_frames.rs +++ b/vote/src/vote_state_view/field_frames.rs @@ -2,7 +2,8 @@ use { super::{list_view::ListView, Result, VoteStateViewError}, solana_clock::{Epoch, Slot}, solana_pubkey::Pubkey, - std::io::BufRead, + solana_vote_interface::state::BLS_PUBLIC_KEY_COMPRESSED_SIZE, + std::io::{BufRead, Read}, }; pub(super) trait ListFrame { @@ -119,6 +120,65 @@ impl ListFrame for LockoutListFrame { } } +pub(super) struct BlsPubkeyCompressedView<'a> { + frame: BlsPubkeyCompressedFrame, + buffer: &'a [u8], +} + +impl<'a> BlsPubkeyCompressedView<'a> { + pub(super) fn new(frame: BlsPubkeyCompressedFrame, buffer: &'a [u8]) -> Self { + Self { frame, buffer } + } +} + +impl BlsPubkeyCompressedView<'_> { + pub(super) fn pubkey(&self) -> Option<[u8; BLS_PUBLIC_KEY_COMPRESSED_SIZE]> { + if !self.frame.has_pubkey { + None + } else { + let mut cursor = std::io::Cursor::new(self.buffer); + cursor.consume(1); + let mut buf = [0; BLS_PUBLIC_KEY_COMPRESSED_SIZE]; + cursor.read_exact(&mut buf).unwrap(); + Some(buf) + } + } +} + +#[derive(Debug, PartialEq, Clone, Copy)] +#[cfg_attr(feature = "frozen-abi", derive(AbiExample))] +pub(super) struct BlsPubkeyCompressedFrame { + pub(super) has_pubkey: bool, +} + +impl BlsPubkeyCompressedFrame { + pub(super) fn total_size(&self) -> usize { + 1 + self.size() + } + + pub(super) fn size(&self) -> usize { + if self.has_pubkey { + BLS_PUBLIC_KEY_COMPRESSED_SIZE + } else { + 0 + } + } + + pub(super) fn read(cursor: &mut std::io::Cursor<&[u8]>) -> Result { + let byte = solana_serialize_utils::cursor::read_u8(cursor) + .map_err(|_err| VoteStateViewError::AccountDataTooSmall)?; + let has_pubkey = match byte { + 0 => Ok(false), + 1 => Ok(true), + _ => Err(VoteStateViewError::InvalidBlsPubkeyCompressedOption), + }?; + + let frame = Self { has_pubkey }; + cursor.consume(frame.size()); + Ok(frame) + } +} + #[derive(Debug, PartialEq, Clone, Copy)] #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] pub(super) struct LandedVotesListFrame { @@ -271,6 +331,68 @@ impl From<&EpochCreditsItem> for (Epoch, u64, u64) { (item.epoch(), item.credits(), item.prev_credits()) } } +pub(super) struct CommissionView<'a> { + frame: CommissionFrame, + buffer: &'a [u8], +} + +impl<'a> CommissionView<'a> { + pub(super) fn new(frame: CommissionFrame, buffer: &'a [u8]) -> Self { + Self { frame, buffer } + } +} + +impl CommissionView<'_> { + pub(super) fn commission_percent(&self) -> u8 { + if !self.frame.use_bps { + self.buffer[0] + } else { + let data = unsafe { *(self.buffer.as_ptr() as *const [u8; 2]) }; + let bps = u16::from_le_bytes(data); + let percent = (bps / 100).min(u8::MAX as u16); + percent as u8 + } + } + + pub(super) fn commission_bps(&self) -> u16 { + if !self.frame.use_bps { + 100 * self.buffer[0] as u16 + } else { + let data = unsafe { *(self.buffer.as_ptr() as *const [u8; 2]) }; + u16::from_le_bytes(data) + } + } +} + +pub(super) struct CommissionFrame { + use_bps: bool, +} + +impl CommissionFrame { + pub(super) const fn new_percent() -> Self { + Self { use_bps: false } + } + pub(super) const fn new_bps() -> Self { + Self { use_bps: true } + } +} + +pub(super) struct PendingDelegatorRewardsView<'a> { + buffer: &'a [u8], +} + +impl<'a> PendingDelegatorRewardsView<'a> { + pub(super) fn new(buffer: &'a [u8]) -> Self { + Self { buffer } + } +} + +impl PendingDelegatorRewardsView<'_> { + pub(super) fn value(&self) -> u64 { + let data = unsafe { *(self.buffer.as_ptr() as *const [u8; 8]) }; + u64::from_le_bytes(data) + } +} pub(super) struct RootSlotView<'a> { frame: RootSlotFrame, @@ -347,6 +469,19 @@ impl PriorVotersFrame { mod tests { use {super::*, solana_vote_interface::state::CircBuf}; + #[test] + fn test_bls_pubkey_view() { + let frame = BlsPubkeyCompressedFrame { has_pubkey: true }; + let buffer = [1; 49]; // 1 byte for has_pubkey + 48 bytes for the pubkey + let view = BlsPubkeyCompressedView::new(frame, &buffer); + assert!(view.pubkey().is_some()); + + let frame = BlsPubkeyCompressedFrame { has_pubkey: false }; + let buffer = [0; 1]; + let view = BlsPubkeyCompressedView::new(frame, &buffer); + assert!(view.pubkey().is_none()); + } + #[test] fn test_prior_voters_total_size() { #[repr(C)] @@ -362,4 +497,36 @@ mod tests { core::mem::size_of::() /* is_empty */; assert_eq!(PriorVotersFrame::total_size(), expected_total_size); } + + #[test] + fn test_commission_view() { + let frame = CommissionFrame::new_percent(); + let buffer = [0; 1]; + let commission_view = CommissionView::new(frame, &buffer); + assert_eq!(commission_view.commission_percent(), 0); + + // base case + let frame = CommissionFrame::new_bps(); + let buffer = [0, 0]; + let commission_view = CommissionView::new(frame, &buffer); + assert_eq!(commission_view.commission_percent(), 0); + + // 1% commission + let frame = CommissionFrame::new_bps(); + let buffer = 100u16.to_le_bytes(); + let commission_view = CommissionView::new(frame, &buffer); + assert_eq!(commission_view.commission_percent(), 1); + + // round down to 1% + let frame = CommissionFrame::new_bps(); + let buffer = 101u16.to_le_bytes(); + let commission_view = CommissionView::new(frame, &buffer); + assert_eq!(commission_view.commission_percent(), 1); + + // over u8 max + let frame = CommissionFrame::new_bps(); + let buffer = u16::MAX.to_le_bytes(); + let commission_view = CommissionView::new(frame, &buffer); + assert_eq!(commission_view.commission_percent(), u8::MAX); + } } diff --git a/vote/src/vote_state_view/frame_v1_14_11.rs b/vote/src/vote_state_view/frame_v1_14_11.rs index d35b1b4260a169..eb64fb0cbb8411 100644 --- a/vote/src/vote_state_view/frame_v1_14_11.rs +++ b/vote/src/vote_state_view/frame_v1_14_11.rs @@ -100,7 +100,7 @@ mod tests { super::*, solana_clock::Clock, solana_vote_interface::state::{ - LandedVote, Lockout, VoteInit, VoteState, VoteState1_14_11, VoteStateVersions, + LandedVote, Lockout, VoteInit, VoteState1_14_11, VoteStateV3, VoteStateVersions, }, }; @@ -138,7 +138,7 @@ mod tests { #[test] fn test_try_new_simple() { - let mut target_vote_state = VoteState::new(&VoteInit::default(), &Clock::default()); + let mut target_vote_state = VoteStateV3::new(&VoteInit::default(), &Clock::default()); target_vote_state.root_slot = Some(42); target_vote_state.epoch_credits.push((1, 2, 3)); target_vote_state.votes.push_back(LandedVote { diff --git a/vote/src/vote_state_view/frame_v3.rs b/vote/src/vote_state_view/frame_v3.rs index 69fe1b434b9bed..b3313c84971f78 100644 --- a/vote/src/vote_state_view/frame_v3.rs +++ b/vote/src/vote_state_view/frame_v3.rs @@ -101,14 +101,14 @@ mod tests { super::*, solana_clock::Clock, solana_vote_interface::state::{ - LandedVote, Lockout, VoteInit, VoteState, VoteStateVersions, + LandedVote, Lockout, VoteInit, VoteStateV3, VoteStateVersions, }, }; #[test] fn test_try_new_zeroed() { - let target_vote_state = VoteState::default(); - let target_vote_state_versions = VoteStateVersions::Current(Box::new(target_vote_state)); + let target_vote_state = VoteStateV3::default(); + let target_vote_state_versions = VoteStateVersions::V3(Box::new(target_vote_state)); let mut bytes = bincode::serialize(&target_vote_state_versions).unwrap(); for i in 0..bytes.len() { @@ -139,7 +139,7 @@ mod tests { #[test] fn test_try_new_simple() { - let mut target_vote_state = VoteState::new(&VoteInit::default(), &Clock::default()); + let mut target_vote_state = VoteStateV3::new(&VoteInit::default(), &Clock::default()); target_vote_state.root_slot = Some(42); target_vote_state.epoch_credits.push((1, 2, 3)); target_vote_state.votes.push_back(LandedVote { @@ -147,7 +147,7 @@ mod tests { lockout: Lockout::default(), }); - let target_vote_state_versions = VoteStateVersions::Current(Box::new(target_vote_state)); + let target_vote_state_versions = VoteStateVersions::V3(Box::new(target_vote_state)); let mut bytes = bincode::serialize(&target_vote_state_versions).unwrap(); for i in 0..bytes.len() { diff --git a/vote/src/vote_state_view/frame_v4.rs b/vote/src/vote_state_view/frame_v4.rs new file mode 100644 index 00000000000000..9c9d6efbb07cf0 --- /dev/null +++ b/vote/src/vote_state_view/frame_v4.rs @@ -0,0 +1,280 @@ +use { + super::{ + field_frames::{BlsPubkeyCompressedFrame, LandedVotesListFrame, ListFrame}, + AuthorizedVotersListFrame, EpochCreditsListFrame, Field, Result, RootSlotFrame, + Simd185Field, VoteStateViewError, + }, + solana_pubkey::Pubkey, + solana_vote_interface::state::BlockTimestamp, + std::io::BufRead, +}; + +#[derive(Debug, PartialEq, Clone)] +#[cfg_attr(feature = "frozen-abi", derive(AbiExample))] +pub(crate) struct VoteStateFrameV4 { + pub(super) bls_pubkey_compressed_frame: BlsPubkeyCompressedFrame, + pub(super) votes_frame: LandedVotesListFrame, + pub(super) root_slot_frame: RootSlotFrame, + pub(super) authorized_voters_frame: AuthorizedVotersListFrame, + pub(super) epoch_credits_frame: EpochCreditsListFrame, +} + +impl VoteStateFrameV4 { + pub(crate) fn try_new(bytes: &[u8]) -> Result { + let bls_pubkey_offset = Self::bls_pubkey_compressed_offset(); + let mut cursor = std::io::Cursor::new(bytes); + cursor.set_position(bls_pubkey_offset as u64); + + let bls_pubkey_compressed_frame = BlsPubkeyCompressedFrame::read(&mut cursor)?; + let votes_frame = LandedVotesListFrame::read(&mut cursor)?; + let root_slot_frame = RootSlotFrame::read(&mut cursor)?; + let authorized_voters_frame = AuthorizedVotersListFrame::read(&mut cursor)?; + let epoch_credits_frame = EpochCreditsListFrame::read(&mut cursor)?; + cursor.consume(core::mem::size_of::()); + if cursor.position() as usize <= bytes.len() { + Ok(Self { + bls_pubkey_compressed_frame, + votes_frame, + root_slot_frame, + authorized_voters_frame, + epoch_credits_frame, + }) + } else { + Err(VoteStateViewError::AccountDataTooSmall) + } + } + + pub(super) fn field_offset(&self, field: Field) -> usize { + match field { + Field::NodePubkey => Self::node_pubkey_offset(), + Field::Commission => Self::inflation_rewards_commission_offset(), + Field::Votes => self.votes_offset(), + Field::RootSlot => self.root_slot_offset(), + Field::AuthorizedVoters => self.authorized_voters_offset(), + Field::EpochCredits => self.epoch_credits_offset(), + Field::LastTimestamp => self.last_timestamp_offset(), + } + } + + pub(super) fn simd185_field_offset(&self, field: Simd185Field) -> usize { + match field { + Simd185Field::InflationRewardsCollector => Self::inflation_rewards_collector_offset(), + Simd185Field::BlockRevenueCollector => Self::block_revenue_collector_offset(), + Simd185Field::BlockRevenueCommission => Self::block_revenue_commission_offset(), + Simd185Field::PendingDelegatorRewards => Self::pending_delegator_rewards_offset(), + Simd185Field::BlsPubkeyCompressed => Self::bls_pubkey_compressed_offset(), + } + } + + const fn node_pubkey_offset() -> usize { + core::mem::size_of::() // version + } + + const fn authorized_withdrawer_offset() -> usize { + Self::node_pubkey_offset() + core::mem::size_of::() + } + + const fn inflation_rewards_collector_offset() -> usize { + Self::authorized_withdrawer_offset() + core::mem::size_of::() + } + + const fn block_revenue_collector_offset() -> usize { + Self::inflation_rewards_collector_offset() + core::mem::size_of::() + } + + const fn inflation_rewards_commission_offset() -> usize { + Self::block_revenue_collector_offset() + core::mem::size_of::() + } + + const fn block_revenue_commission_offset() -> usize { + Self::inflation_rewards_commission_offset() + core::mem::size_of::() + } + + const fn pending_delegator_rewards_offset() -> usize { + Self::block_revenue_commission_offset() + core::mem::size_of::() + } + + const fn bls_pubkey_compressed_offset() -> usize { + Self::pending_delegator_rewards_offset() + core::mem::size_of::() + } + + fn votes_offset(&self) -> usize { + Self::bls_pubkey_compressed_offset() + self.bls_pubkey_compressed_frame.total_size() + } + + fn root_slot_offset(&self) -> usize { + self.votes_offset() + self.votes_frame.total_size() + } + + fn authorized_voters_offset(&self) -> usize { + self.root_slot_offset() + self.root_slot_frame.total_size() + } + + fn epoch_credits_offset(&self) -> usize { + self.authorized_voters_offset() + self.authorized_voters_frame.total_size() + } + + fn last_timestamp_offset(&self) -> usize { + self.epoch_credits_offset() + self.epoch_credits_frame.total_size() + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + solana_vote_interface::{ + authorized_voters::AuthorizedVoters, + state::{LandedVote, Lockout, VoteStateV4, BLS_PUBLIC_KEY_COMPRESSED_SIZE}, + }, + std::collections::VecDeque, + }; + + #[derive(Debug, Clone, Deserialize, Serialize)] + enum TestVoteStateVersions { + V0_23_5, + V1_14_11, + V3, + V4(VoteStateV4), + } + + #[test] + fn test_try_new_zeroed() { + let target_vote_state = VoteStateV4::default(); + let target_vote_state_versions = TestVoteStateVersions::V4(target_vote_state); + let mut bytes = bincode::serialize(&target_vote_state_versions).unwrap(); + + for i in 0..bytes.len() { + let vote_state_frame = VoteStateFrameV4::try_new(&bytes[..i]); + assert_eq!( + vote_state_frame, + Err(VoteStateViewError::AccountDataTooSmall) + ); + } + + for has_trailing_bytes in [false, true] { + if has_trailing_bytes { + bytes.extend_from_slice(&[0; 42]); + } + assert_eq!( + VoteStateFrameV4::try_new(&bytes), + Ok(VoteStateFrameV4 { + bls_pubkey_compressed_frame: BlsPubkeyCompressedFrame { has_pubkey: false }, + votes_frame: LandedVotesListFrame { len: 0 }, + root_slot_frame: RootSlotFrame { + has_root_slot: false, + }, + authorized_voters_frame: AuthorizedVotersListFrame { len: 0 }, + epoch_credits_frame: EpochCreditsListFrame { len: 0 }, + }) + ); + } + } + + #[test] + fn test_try_new_simple() { + let target_vote_state = VoteStateV4 { + authorized_voters: AuthorizedVoters::new(0, Pubkey::default()), + epoch_credits: vec![(1, 2, 3)], + bls_pubkey_compressed: Some([42; BLS_PUBLIC_KEY_COMPRESSED_SIZE]), + votes: VecDeque::from([LandedVote { + latency: 0, + lockout: Lockout::default(), + }]), + root_slot: Some(42), + ..VoteStateV4::default() + }; + + let target_vote_state_versions = TestVoteStateVersions::V4(target_vote_state); + let mut bytes = bincode::serialize(&target_vote_state_versions).unwrap(); + + for i in 0..bytes.len() { + let vote_state_frame = VoteStateFrameV4::try_new(&bytes[..i]); + assert_eq!( + vote_state_frame, + Err(VoteStateViewError::AccountDataTooSmall) + ); + } + + for has_trailing_bytes in [false, true] { + if has_trailing_bytes { + bytes.extend_from_slice(&[0; 42]); + } + assert_eq!( + VoteStateFrameV4::try_new(&bytes), + Ok(VoteStateFrameV4 { + bls_pubkey_compressed_frame: BlsPubkeyCompressedFrame { has_pubkey: true }, + votes_frame: LandedVotesListFrame { len: 1 }, + root_slot_frame: RootSlotFrame { + has_root_slot: true, + }, + authorized_voters_frame: AuthorizedVotersListFrame { len: 1 }, + epoch_credits_frame: EpochCreditsListFrame { len: 1 }, + }) + ); + } + } + + #[test] + fn test_try_new_invalid_values() { + let mut bytes = vec![0; VoteStateFrameV4::bls_pubkey_compressed_offset()]; + + { + let mut bytes = bytes.clone(); + bytes.extend_from_slice(&(2u8.to_le_bytes())); + let vote_state_frame = VoteStateFrameV4::try_new(&bytes); + assert_eq!( + vote_state_frame, + Err(VoteStateViewError::InvalidBlsPubkeyCompressedOption) + ); + } + + bytes.extend_from_slice(&[0; 1]); + + { + let mut bytes = bytes.clone(); + bytes.extend_from_slice(&(256u64.to_le_bytes())); + let vote_state_frame = VoteStateFrameV4::try_new(&bytes); + assert_eq!( + vote_state_frame, + Err(VoteStateViewError::InvalidVotesLength) + ); + } + + bytes.extend_from_slice(&[0; core::mem::size_of::()]); + + { + let mut bytes = bytes.clone(); + bytes.extend_from_slice(&(2u8.to_le_bytes())); + let vote_state_frame = VoteStateFrameV4::try_new(&bytes); + assert_eq!( + vote_state_frame, + Err(VoteStateViewError::InvalidRootSlotOption) + ); + } + + bytes.extend_from_slice(&[0; 1]); + + { + let mut bytes = bytes.clone(); + bytes.extend_from_slice(&(256u64.to_le_bytes())); + let vote_state_frame = VoteStateFrameV4::try_new(&bytes); + assert_eq!( + vote_state_frame, + Err(VoteStateViewError::InvalidAuthorizedVotersLength) + ); + } + + bytes.extend_from_slice(&[0; core::mem::size_of::()]); + + { + let mut bytes = bytes.clone(); + bytes.extend_from_slice(&(256u64.to_le_bytes())); + let vote_state_frame = VoteStateFrameV4::try_new(&bytes); + assert_eq!( + vote_state_frame, + Err(VoteStateViewError::InvalidEpochCreditsLength) + ); + } + } +} diff --git a/vote/src/vote_transaction.rs b/vote/src/vote_transaction.rs index bc5752fa62025e..1a396fc4c294cc 100644 --- a/vote/src/vote_transaction.rs +++ b/vote/src/vote_transaction.rs @@ -11,7 +11,7 @@ use { #[cfg_attr( feature = "frozen-abi", derive(AbiExample, AbiEnumVisitor), - frozen_abi(digest = "FpMQMRgU1GJS1jyt69r2aHYRa8etuhzNkcDiw5oKtCiv") + frozen_abi(digest = "3FNgMe2aFiwfwo97HvdFKGBXHK3PT1M87gA8utHY5VEG") )] #[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] pub enum VoteTransaction { diff --git a/votor-messages/Cargo.toml b/votor-messages/Cargo.toml new file mode 100644 index 00000000000000..2e0c003aba9880 --- /dev/null +++ b/votor-messages/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "solana-votor-messages" +documentation = "https://docs.rs/solana-votor-messages" +readme = "../README.md" +version = { workspace = true } +authors = { workspace = true } +description = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[features] +frozen-abi = ["dep:solana-frozen-abi", "dep:solana-frozen-abi-macro"] + +[dependencies] +serde = { workspace = true } +solana-bls-signatures = { workspace = true, features = [ + "bytemuck", "solana-signer-derive", +] } +solana-clock = { workspace = true } +solana-frozen-abi = { workspace = true, optional = true, features = [ + "frozen-abi", +] } +solana-frozen-abi-macro = { workspace = true, optional = true, features = [ + "frozen-abi", +] } +solana-hash = { workspace = true, features = ["serde"] } +solana-logger = { workspace = true } + +[lints] +workspace = true diff --git a/votor-messages/src/consensus_message.rs b/votor-messages/src/consensus_message.rs new file mode 100644 index 00000000000000..9a69f8dd6af66a --- /dev/null +++ b/votor-messages/src/consensus_message.rs @@ -0,0 +1,193 @@ +//! Put Alpenglow consensus messages here so all clients can agree on the format. +use { + crate::vote::Vote, + serde::{Deserialize, Serialize}, + solana_bls_signatures::Signature as BLSSignature, + solana_clock::Slot, + solana_hash::Hash, +}; + +/// The seed used to derive the BLS keypair +pub const BLS_KEYPAIR_DERIVE_SEED: &[u8; 9] = b"alpenglow"; + +/// Block, a (slot, hash) tuple +pub type Block = (Slot, Hash); + +#[cfg_attr( + feature = "frozen-abi", + derive(AbiExample), + frozen_abi(digest = "HXCJbLTULqcqGqs5WBG31YPJQiVzMm6qYWKPzb1Uhb14") +)] +#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] +/// BLS vote message, we need rank to look up pubkey +pub struct VoteMessage { + /// The vote + pub vote: Vote, + /// The signature + pub signature: BLSSignature, + /// The rank of the validator + pub rank: u16, +} + +#[cfg_attr( + feature = "frozen-abi", + derive(AbiExample, AbiEnumVisitor), + frozen_abi(digest = "APmpbbqEiJtCrxgjSs8FuMNcM1Qyzc5HtMW7KR79DGcF") +)] +/// Certificate details +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Deserialize, Serialize)] +pub enum Certificate { + /// Finalize certificate + Finalize(Slot), + /// Fast finalize certificate + FinalizeFast(Slot, Hash), + /// Notarize certificate + Notarize(Slot, Hash), + /// Notarize fallback certificate + NotarizeFallback(Slot, Hash), + /// Skip certificate + Skip(Slot), +} + +#[cfg_attr( + feature = "frozen-abi", + derive(AbiExample, AbiEnumVisitor), + frozen_abi(digest = "3en2tmFekuD3SWbBnNPqeJSrxDeTJkKJe3CCimANrrpQ") +)] +/// Certificate type +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Deserialize, Serialize)] +pub enum CertificateType { + /// Finalize certificate + Finalize, + /// Fast finalize certificate + FinalizeFast, + /// Notarize certificate + Notarize, + /// Notarize fallback certificate + NotarizeFallback, + /// Skip certificate + Skip, +} + +impl Certificate { + /// Create a new certificate from a CertificateType, Slot, and Option + pub fn new(certificate_type: CertificateType, slot: Slot, hash: Option) -> Self { + match (certificate_type, hash) { + (CertificateType::Finalize, None) => Certificate::Finalize(slot), + (CertificateType::FinalizeFast, Some(hash)) => Certificate::FinalizeFast(slot, hash), + (CertificateType::Notarize, Some(hash)) => Certificate::Notarize(slot, hash), + (CertificateType::NotarizeFallback, Some(hash)) => { + Certificate::NotarizeFallback(slot, hash) + } + (CertificateType::Skip, None) => Certificate::Skip(slot), + _ => panic!("Invalid certificate type and hash combination"), + } + } + + /// Get the certificate type + pub fn certificate_type(&self) -> CertificateType { + match self { + Certificate::Finalize(_) => CertificateType::Finalize, + Certificate::FinalizeFast(_, _) => CertificateType::FinalizeFast, + Certificate::Notarize(_, _) => CertificateType::Notarize, + Certificate::NotarizeFallback(_, _) => CertificateType::NotarizeFallback, + Certificate::Skip(_) => CertificateType::Skip, + } + } + + /// Get the slot of the certificate + pub fn slot(&self) -> Slot { + match self { + Certificate::Finalize(slot) + | Certificate::FinalizeFast(slot, _) + | Certificate::Notarize(slot, _) + | Certificate::NotarizeFallback(slot, _) + | Certificate::Skip(slot) => *slot, + } + } + + /// Is this a fast finalize certificate? + pub fn is_fast_finalization(&self) -> bool { + matches!(self, Self::FinalizeFast(_, _)) + } + + /// Is this a finalize / fast finalize certificate? + pub fn is_finalization(&self) -> bool { + matches!(self, Self::Finalize(_) | Self::FinalizeFast(_, _)) + } + + /// Is this a notarize fallback certificate? + pub fn is_notarize_fallback(&self) -> bool { + matches!(self, Self::NotarizeFallback(_, _)) + } + + /// Is this a skip certificate? + pub fn is_skip(&self) -> bool { + matches!(self, Self::Skip(_)) + } + + /// Gets the block associated with this certificate, if present + pub fn to_block(self) -> Option { + match self { + Certificate::Finalize(_) | Certificate::Skip(_) => None, + Certificate::Notarize(slot, block_id) + | Certificate::NotarizeFallback(slot, block_id) + | Certificate::FinalizeFast(slot, block_id) => Some((slot, block_id)), + } + } +} + +#[cfg_attr( + feature = "frozen-abi", + derive(AbiExample), + frozen_abi(digest = "GHpyrcTVqbJxBSMEdsjod8iN6fUatmDUbUqaBSfd1DRv") +)] +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +/// BLS vote message, we need rank to look up pubkey +pub struct CertificateMessage { + /// The certificate + pub certificate: Certificate, + /// The signature + pub signature: BLSSignature, + /// The bitmap for validators, see solana-signer-store for encoding format + pub bitmap: Vec, +} + +#[cfg_attr( + feature = "frozen-abi", + derive(AbiExample, AbiEnumVisitor), + frozen_abi(digest = "F8MaTvE1eMbVnRHerBhiwo6nuWg7gjpM19FHdggoZyiA") +)] +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[allow(clippy::large_enum_variant)] +/// BLS message data in Alpenglow +pub enum ConsensusMessage { + /// Vote message, with the vote and the rank of the validator. + Vote(VoteMessage), + /// Certificate message + Certificate(CertificateMessage), +} + +impl ConsensusMessage { + /// Create a new vote message + pub fn new_vote(vote: Vote, signature: BLSSignature, rank: u16) -> Self { + Self::Vote(VoteMessage { + vote, + signature, + rank, + }) + } + + /// Create a new certificate message + pub fn new_certificate( + certificate: Certificate, + bitmap: Vec, + signature: BLSSignature, + ) -> Self { + Self::Certificate(CertificateMessage { + certificate, + signature, + bitmap, + }) + } +} diff --git a/votor-messages/src/lib.rs b/votor-messages/src/lib.rs new file mode 100644 index 00000000000000..ba2ee9995d3c7c --- /dev/null +++ b/votor-messages/src/lib.rs @@ -0,0 +1,10 @@ +//! Alpenglow vote message types +#![cfg_attr(feature = "frozen-abi", feature(min_specialization))] +#![deny(missing_docs)] + +pub mod consensus_message; +pub mod vote; + +#[cfg_attr(feature = "frozen-abi", macro_use)] +#[cfg(feature = "frozen-abi")] +extern crate solana_frozen_abi_macro; diff --git a/votor-messages/src/vote.rs b/votor-messages/src/vote.rs new file mode 100644 index 00000000000000..7aaedc501a8f00 --- /dev/null +++ b/votor-messages/src/vote.rs @@ -0,0 +1,263 @@ +//! Vote data types for use by clients +use { + serde::{Deserialize, Serialize}, + solana_clock::Slot, + solana_hash::Hash, +}; + +/// Enum that clients can use to parse and create the vote +/// structures expected by the program +#[cfg_attr( + feature = "frozen-abi", + derive(AbiExample, AbiEnumVisitor), + frozen_abi(digest = "FRn4f3PTtbvw3uv2r3qF8K49a5UF4QqDuVdyeshtipTW") +)] +#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] +pub enum Vote { + /// A notarization vote + Notarize(NotarizationVote), + /// A finalization vote + Finalize(FinalizationVote), + /// A skip vote + Skip(SkipVote), + /// A notarization fallback vote + NotarizeFallback(NotarizationFallbackVote), + /// A skip fallback vote + SkipFallback(SkipFallbackVote), +} + +impl Vote { + /// Create a new notarization vote + pub fn new_notarization_vote(slot: Slot, block_id: Hash) -> Self { + Self::from(NotarizationVote::new(slot, block_id)) + } + + /// Create a new finalization vote + pub fn new_finalization_vote(slot: Slot) -> Self { + Self::from(FinalizationVote::new(slot)) + } + + /// Create a new skip vote + pub fn new_skip_vote(slot: Slot) -> Self { + Self::from(SkipVote::new(slot)) + } + + /// Create a new notarization fallback vote + pub fn new_notarization_fallback_vote(slot: Slot, block_id: Hash) -> Self { + Self::from(NotarizationFallbackVote::new(slot, block_id)) + } + + /// Create a new skip fallback vote + pub fn new_skip_fallback_vote(slot: Slot) -> Self { + Self::from(SkipFallbackVote::new(slot)) + } + + /// The slot which was voted for + pub fn slot(&self) -> Slot { + match self { + Self::Notarize(vote) => vote.slot(), + Self::Finalize(vote) => vote.slot(), + Self::Skip(vote) => vote.slot(), + Self::NotarizeFallback(vote) => vote.slot(), + Self::SkipFallback(vote) => vote.slot(), + } + } + + /// The block id associated with the block which was voted for + pub fn block_id(&self) -> Option<&Hash> { + match self { + Self::Notarize(vote) => Some(vote.block_id()), + Self::NotarizeFallback(vote) => Some(vote.block_id()), + Self::Finalize(_) | Self::Skip(_) | Self::SkipFallback(_) => None, + } + } + + /// Whether the vote is a notarization vote + pub fn is_notarization(&self) -> bool { + matches!(self, Self::Notarize(_)) + } + + /// Whether the vote is a finalization vote + pub fn is_finalize(&self) -> bool { + matches!(self, Self::Finalize(_)) + } + + /// Whether the vote is a skip vote + pub fn is_skip(&self) -> bool { + matches!(self, Self::Skip(_)) + } + + /// Whether the vote is a notarization fallback vote + pub fn is_notarize_fallback(&self) -> bool { + matches!(self, Self::NotarizeFallback(_)) + } + + /// Whether the vote is a skip fallback vote + pub fn is_skip_fallback(&self) -> bool { + matches!(self, Self::SkipFallback(_)) + } + + /// Whether the vote is a notarization or finalization + pub fn is_notarization_or_finalization(&self) -> bool { + matches!(self, Self::Notarize(_) | Self::Finalize(_)) + } +} + +impl From for Vote { + fn from(vote: NotarizationVote) -> Self { + Self::Notarize(vote) + } +} + +impl From for Vote { + fn from(vote: FinalizationVote) -> Self { + Self::Finalize(vote) + } +} + +impl From for Vote { + fn from(vote: SkipVote) -> Self { + Self::Skip(vote) + } +} + +impl From for Vote { + fn from(vote: NotarizationFallbackVote) -> Self { + Self::NotarizeFallback(vote) + } +} + +impl From for Vote { + fn from(vote: SkipFallbackVote) -> Self { + Self::SkipFallback(vote) + } +} + +/// A notarization vote +#[cfg_attr( + feature = "frozen-abi", + derive(AbiExample), + frozen_abi(digest = "5AdwChAjsj5QUXLdpDnGGK2L2nA8y8EajVXi6jsmTv1m") +)] +#[derive(Clone, Copy, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct NotarizationVote { + slot: Slot, + block_id: Hash, +} + +impl NotarizationVote { + /// Construct a notarization vote for `slot` + pub fn new(slot: Slot, block_id: Hash) -> Self { + Self { slot, block_id } + } + + /// The slot to notarize + pub fn slot(&self) -> Slot { + self.slot + } + + /// The block_id of the notarization slot + pub fn block_id(&self) -> &Hash { + &self.block_id + } +} + +/// A finalization vote +#[cfg_attr( + feature = "frozen-abi", + derive(AbiExample), + frozen_abi(digest = "2XQ5N6YLJjF28w7cMFFUQ9SDgKuf9JpJNtAiXSPA8vR2") +)] +#[derive(Clone, Copy, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct FinalizationVote { + slot: Slot, +} + +impl FinalizationVote { + /// Construct a finalization vote for `slot` + pub fn new(slot: Slot) -> Self { + Self { slot } + } + + /// The slot to finalize + pub fn slot(&self) -> Slot { + self.slot + } +} + +/// A skip vote +/// Represents a range of slots to skip +/// inclusive on both ends +#[cfg_attr( + feature = "frozen-abi", + derive(AbiExample), + frozen_abi(digest = "G8Nrx3sMYdnLpHsCNark3BGA58BmW2sqNnqjkYhQHtN") +)] +#[derive(Clone, Copy, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct SkipVote { + pub(crate) slot: Slot, +} + +impl SkipVote { + /// Construct a skip vote for `slot` + pub fn new(slot: Slot) -> Self { + Self { slot } + } + + /// The slot to skip + pub fn slot(&self) -> Slot { + self.slot + } +} + +/// A notarization fallback vote +#[cfg_attr( + feature = "frozen-abi", + derive(AbiExample), + frozen_abi(digest = "7j5ZPwwyz1FaG3fpyQv5PVnQXicdSmqSk8NvqzkG1Eqz") +)] +#[derive(Clone, Copy, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct NotarizationFallbackVote { + slot: Slot, + block_id: Hash, +} + +impl NotarizationFallbackVote { + /// Construct a notarization vote for `slot` + pub fn new(slot: Slot, block_id: Hash) -> Self { + Self { slot, block_id } + } + + /// The slot to notarize + pub fn slot(&self) -> Slot { + self.slot + } + + /// The block_id of the notarization slot + pub fn block_id(&self) -> &Hash { + &self.block_id + } +} + +/// A skip fallback vote +#[cfg_attr( + feature = "frozen-abi", + derive(AbiExample), + frozen_abi(digest = "WsUNum8V62gjRU1yAnPuBMAQui4YvMwD1RwrzHeYkeF") +)] +#[derive(Clone, Copy, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct SkipFallbackVote { + pub(crate) slot: Slot, +} + +impl SkipFallbackVote { + /// Construct a skip fallback vote for `slot` + pub fn new(slot: Slot) -> Self { + Self { slot } + } + + /// The slot to skip + pub fn slot(&self) -> Slot { + self.slot + } +} diff --git a/votor/Cargo.toml b/votor/Cargo.toml new file mode 100644 index 00000000000000..b4b1ed38785585 --- /dev/null +++ b/votor/Cargo.toml @@ -0,0 +1,75 @@ +[package] +name = "agave-votor" +documentation = "https://docs.rs/agave-votor" +readme = "../README.md" +version = { workspace = true } +authors = { workspace = true } +description = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[features] +agave-unstable-api = [] +dev-context-only-utils = ["solana-runtime/dev-context-only-utils"] +frozen-abi = [ + "dep:solana-frozen-abi", + "dep:solana-frozen-abi-macro", + "solana-accounts-db/frozen-abi", + "solana-bloom/frozen-abi", + "solana-ledger/frozen-abi", + "solana-runtime/frozen-abi", +] + +[dependencies] +anyhow = { workspace = true } +bincode = { workspace = true } +bitvec = { workspace = true } +bs58 = { workspace = true } +crossbeam-channel = { workspace = true } +dashmap = { workspace = true, features = ["rayon", "raw-api"] } +itertools = { workspace = true } +log = { workspace = true } +parking_lot = { workspace = true } +qualifier_attr = { workspace = true } +rayon = { workspace = true } +serde = { workspace = true } +serde_bytes = { workspace = true } +serde_derive = { workspace = true } +solana-accounts-db = { workspace = true } +solana-bloom = { workspace = true } +solana-bls-signatures = { workspace = true } +solana-clock = { workspace = true } +solana-entry = { workspace = true } +solana-epoch-schedule = { workspace = true } +solana-frozen-abi = { workspace = true, optional = true, features = [ + "frozen-abi", +] } +solana-frozen-abi-macro = { workspace = true, optional = true, features = [ + "frozen-abi", +] } +solana-gossip = { workspace = true } +solana-hash = { workspace = true } +solana-keypair = { workspace = true } +solana-ledger = { workspace = true } +solana-logger = { workspace = true } +solana-measure = { workspace = true } +solana-metrics = { workspace = true } +solana-pubkey = { workspace = true } +solana-rpc = { workspace = true } +solana-runtime = { workspace = true } +solana-signature = { workspace = true } +solana-signer = { workspace = true } +solana-signer-store = { workspace = true } +solana-time-utils = { workspace = true } +solana-transaction = { workspace = true } +solana-votor-messages = { workspace = true } +thiserror = { workspace = true } + +[dev-dependencies] +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } +test-case = { workspace = true } + +[lints] +workspace = true diff --git a/votor/src/common.rs b/votor/src/common.rs new file mode 100644 index 00000000000000..b936437f690be0 --- /dev/null +++ b/votor/src/common.rs @@ -0,0 +1,127 @@ +use { + solana_votor_messages::{consensus_message::Certificate, vote::Vote}, + std::time::Duration, +}; + +// Core consensus types and constants +pub type Stake = u64; + +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub enum VoteType { + Finalize, + Notarize, + NotarizeFallback, + Skip, + SkipFallback, +} + +impl VoteType { + pub fn get_type(vote: &Vote) -> VoteType { + match vote { + Vote::Notarize(_) => VoteType::Notarize, + Vote::NotarizeFallback(_) => VoteType::NotarizeFallback, + Vote::Skip(_) => VoteType::Skip, + Vote::SkipFallback(_) => VoteType::SkipFallback, + Vote::Finalize(_) => VoteType::Finalize, + } + } + + #[allow(dead_code)] + pub fn is_notarize_type(&self) -> bool { + matches!(self, Self::Notarize | Self::NotarizeFallback) + } +} + +pub const fn conflicting_types(vote_type: VoteType) -> &'static [VoteType] { + match vote_type { + VoteType::Finalize => &[VoteType::NotarizeFallback, VoteType::Skip], + VoteType::Notarize => &[VoteType::Skip, VoteType::NotarizeFallback], + VoteType::NotarizeFallback => &[VoteType::Finalize, VoteType::Notarize], + VoteType::Skip => &[ + VoteType::Finalize, + VoteType::Notarize, + VoteType::SkipFallback, + ], + VoteType::SkipFallback => &[VoteType::Skip], + } +} + +/// Lookup from `CertificateId` to the `VoteType`s that contribute, +/// as well as the stake fraction required for certificate completion. +/// +/// Must be in sync with `vote_to_certificate_ids` +pub const fn certificate_limits_and_vote_types( + cert_type: Certificate, +) -> (f64, &'static [VoteType]) { + match cert_type { + Certificate::Notarize(_, _) => (0.6, &[VoteType::Notarize]), + Certificate::NotarizeFallback(_, _) => { + (0.6, &[VoteType::Notarize, VoteType::NotarizeFallback]) + } + Certificate::FinalizeFast(_, _) => (0.8, &[VoteType::Notarize]), + Certificate::Finalize(_) => (0.6, &[VoteType::Finalize]), + Certificate::Skip(_) => (0.6, &[VoteType::Skip, VoteType::SkipFallback]), + } +} + +/// Lookup from `Vote` to the `CertificateId`s the vote accounts for +/// +/// Must be in sync with `certificate_limits_and_vote_types` and `VoteType::get_type` +pub fn vote_to_certificate_ids(vote: &Vote) -> Vec { + match vote { + Vote::Notarize(vote) => vec![ + Certificate::Notarize(vote.slot(), *vote.block_id()), + Certificate::NotarizeFallback(vote.slot(), *vote.block_id()), + Certificate::FinalizeFast(vote.slot(), *vote.block_id()), + ], + Vote::NotarizeFallback(vote) => { + vec![Certificate::NotarizeFallback(vote.slot(), *vote.block_id())] + } + Vote::Finalize(vote) => vec![Certificate::Finalize(vote.slot())], + Vote::Skip(vote) => vec![Certificate::Skip(vote.slot())], + Vote::SkipFallback(vote) => vec![Certificate::Skip(vote.slot())], + } +} + +pub const MAX_ENTRIES_PER_PUBKEY_FOR_OTHER_TYPES: usize = 1; +pub const MAX_ENTRIES_PER_PUBKEY_FOR_NOTARIZE_LITE: usize = 3; + +pub const SAFE_TO_NOTAR_MIN_NOTARIZE_ONLY: f64 = 0.4; +pub const SAFE_TO_NOTAR_MIN_NOTARIZE_FOR_NOTARIZE_OR_SKIP: f64 = 0.2; +pub const SAFE_TO_NOTAR_MIN_NOTARIZE_AND_SKIP: f64 = 0.6; + +pub const SAFE_TO_SKIP_THRESHOLD: f64 = 0.4; + +/// Time bound assumed on network transmission delays during periods of synchrony. +const DELTA: Duration = Duration::from_millis(250); + +/// Time the leader has for producing and sending the block. +const DELTA_BLOCK: Duration = Duration::from_millis(400); + +/// Base timeout for when leader's first slice should arrive if they sent it immediately. +const DELTA_TIMEOUT: Duration = DELTA.checked_mul(3).unwrap(); + +#[allow(dead_code)] +/// TODO(wen): remove allow(dead_code) when timer is fully integrated +/// Timeout for standstill detection mechanism. +const DELTA_STANDSTILL: Duration = Duration::from_millis(10_000); + +/// Returns the Duration for when the `SkipTimer` should be set for for the given slot in the leader window. +#[inline] +pub fn skip_timeout(leader_block_index: usize) -> Duration { + DELTA_TIMEOUT + .saturating_add( + DELTA_BLOCK + .saturating_mul(leader_block_index as u32) + .saturating_add(DELTA_TIMEOUT), + ) + .saturating_add(DELTA) +} + +/// Block timeout, when we should publish the final shred for the leader block index +/// within the leader window +#[inline] +pub fn block_timeout(leader_block_index: usize) -> Duration { + // TODO: based on testing, perhaps adjust this + DELTA_BLOCK.saturating_mul((leader_block_index as u32).saturating_add(1)) +} diff --git a/votor/src/consensus_pool.rs b/votor/src/consensus_pool.rs new file mode 100644 index 00000000000000..a1035ade74dc5b --- /dev/null +++ b/votor/src/consensus_pool.rs @@ -0,0 +1,5 @@ +pub mod parent_ready_tracker; +pub mod slot_stake_counters; +mod stats; +mod vote_certificate_builder; +mod vote_pool; diff --git a/votor/src/consensus_pool/parent_ready_tracker.rs b/votor/src/consensus_pool/parent_ready_tracker.rs new file mode 100644 index 00000000000000..36709f4be44386 --- /dev/null +++ b/votor/src/consensus_pool/parent_ready_tracker.rs @@ -0,0 +1,410 @@ +//! Tracks the parent-ready condition +//! +//! The parent-ready condition pertains to a slot `s` and a block hash `hash(b)`, +//! where `s` is the first slot of a leader window and `s > slot(b)`. +//! Specifically, it is defined as the following: +//! - Block `b` is notarized or notarized-fallback, and +//! - slots `slot(b) + 1` (inclusive) to `s` (non-inclusive) are skip-certified. +//! +//! Additional restriction on notarization votes ensure that the parent-ready +//! condition holds for a block `b` only if it also holds for all ancestors of `b`. +//! Together this ensures that the block `b` is a valid parent for block +//! production, i.e., under good network conditions an honest leader proposing +//! a block with parent `b` in slot `s` will have their block finalized. + +use { + crate::{common::MAX_ENTRIES_PER_PUBKEY_FOR_NOTARIZE_LITE, event::VotorEvent}, + solana_clock::{Slot, NUM_CONSECUTIVE_LEADER_SLOTS}, + solana_pubkey::Pubkey, + solana_votor_messages::consensus_message::Block, + std::collections::HashMap, +}; + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum BlockProductionParent { + MissedWindow, + ParentNotReady, + Parent(Block), +} + +#[derive(Clone, Debug, Default)] +pub struct ParentReadyTracker { + /// Our pubkey for logging + my_pubkey: Pubkey, + + /// Parent ready status for each slot + slot_statuses: HashMap, + + /// Root + root: Slot, + + /// Highest slot with parent ready status + // TODO: While the voting loop is sequential we track every slot (not just the first in window) + // However once we handle all slots concurrently we will update this to only count first leader + // slot in window + highest_with_parent_ready: Slot, +} + +#[derive(Clone, Default, Debug)] +struct ParentReadyStatus { + /// Whether this slot has a skip certificate + skip: bool, + /// The blocks that have been notar fallbacked in this slot + notar_fallbacks: Vec, + /// The parent blocks that achieve parent ready in this slot, + /// Theses blocks are all potential parents choosable in this slot + parents_ready: Vec, +} + +impl ParentReadyTracker { + /// Creates a new tracker with the root bank as implicitely notarized fallback + pub fn new(my_pubkey: Pubkey, root_block @ (root_slot, _): Block) -> Self { + let mut slot_statuses = HashMap::new(); + slot_statuses.insert( + root_slot, + ParentReadyStatus { + skip: false, + notar_fallbacks: vec![root_block], + parents_ready: vec![], + }, + ); + slot_statuses.insert( + root_slot.saturating_add(1), + ParentReadyStatus { + skip: false, + notar_fallbacks: vec![], + parents_ready: vec![root_block], + }, + ); + Self { + my_pubkey, + slot_statuses, + root: root_slot, + highest_with_parent_ready: root_slot.saturating_add(1), + } + } + + /// Adds a new notarize fallback certificate, we can use Notarize/NotarizeFallback/FastFinalize + pub fn add_new_notar_fallback_or_stronger( + &mut self, + block @ (slot, _): Block, + events: &mut Vec, + ) { + if slot <= self.root { + return; + } + + let status = self.slot_statuses.entry(slot).or_default(); + if status.notar_fallbacks.contains(&block) { + return; + } + trace!( + "{}: Adding new notar fallback for {block:?}", + self.my_pubkey + ); + status.notar_fallbacks.push(block); + assert!(status.notar_fallbacks.len() <= MAX_ENTRIES_PER_PUBKEY_FOR_NOTARIZE_LITE); + + // Add this block as valid parent to skip connected future blocks + for s in slot.saturating_add(1).. { + trace!( + "{}: Adding new parent ready for {s} parent {block:?}", + self.my_pubkey + ); + let status = self.slot_statuses.entry(s).or_default(); + if !status.parents_ready.contains(&block) { + status.parents_ready.push(block); + + // Only notify for parent ready on first leader slots + if s % NUM_CONSECUTIVE_LEADER_SLOTS == 0 { + events.push(VotorEvent::ParentReady { + slot: s, + parent_block: block, + }); + } + + self.highest_with_parent_ready = s.max(self.highest_with_parent_ready); + } + + if !status.skip { + break; + } + } + } + + /// Adds a new skip certificate + pub fn add_new_skip(&mut self, slot: Slot, events: &mut Vec) { + if slot <= self.root { + return; + } + + trace!("{}: Adding new skip for {slot:?}", self.my_pubkey); + let status = self.slot_statuses.entry(slot).or_default(); + status.skip = true; + + // Get newly connected future slots + let mut future_slots = vec![]; + for s in slot.saturating_add(1).. { + future_slots.push(s); + if !self.slot_statuses.get(&s).is_some_and(|ss| ss.skip) { + break; + } + } + + // Find possible parents using the previous slot + let mut potential_parents = vec![]; + let Some(status) = self.slot_statuses.get(&(slot.saturating_sub(1))) else { + return; + }; + for nf in &status.notar_fallbacks { + // If there's a notarize fallback certificate we can use the previous slot + // as a parent + potential_parents.push(*nf); + } + if status.skip { + // If there's a skip certificate we can use the parents of the previous slot + // as a parent + for parent in &status.parents_ready { + potential_parents.push(*parent); + } + } + + if potential_parents.is_empty() { + return; + } + + // Add these as valid parents to the future slots + for s in future_slots { + trace!( + "{}: Adding new parent ready for {s} parents {potential_parents:?}", + self.my_pubkey, + ); + let status = self.slot_statuses.entry(s).or_default(); + for &block in &potential_parents { + if status.parents_ready.contains(&block) { + // We already have this parent ready + continue; + } + status.parents_ready.push(block); + // Only notify for parent ready on first leader slots + if s % NUM_CONSECUTIVE_LEADER_SLOTS == 0 { + events.push(VotorEvent::ParentReady { + slot: s, + parent_block: block, + }); + } + } + + self.highest_with_parent_ready = s.max(self.highest_with_parent_ready); + } + } + + pub fn parent_ready(&self, slot: Slot, parent: Block) -> bool { + self.slot_statuses + .get(&slot) + .is_some_and(|ss| ss.parents_ready.contains(&parent)) + } + + /// For our leader slot `slot`, which block should we use as the parent + pub fn block_production_parent(&self, slot: Slot) -> BlockProductionParent { + if self.highest_parent_ready() > slot { + // This indicates that our block has already received a certificate + // either because we were too slow, or because we are restarting + // and catching up. Either way we should not attempt to produce this slot + return BlockProductionParent::MissedWindow; + } + match self + .slot_statuses + .get(&slot) + .and_then(|ss| ss.parents_ready.iter().min().copied()) + { + Some(parent) => BlockProductionParent::Parent(parent), + // TODO: this will be plugged in for optimistic block production + None => BlockProductionParent::ParentNotReady, + } + } + + pub fn highest_parent_ready(&self) -> Slot { + self.highest_with_parent_ready + } + + pub fn set_root(&mut self, root: Slot) { + self.root = root; + self.slot_statuses.retain(|&s, _| s >= root); + } + + /// Updates the pubkey. Note that the pubkey is used for logging purposes only. + pub fn update_pubkey(&mut self, new_pubkey: Pubkey) { + self.my_pubkey = new_pubkey; + } +} + +#[cfg(test)] +mod tests { + use { + super::*, itertools::Itertools, solana_clock::NUM_CONSECUTIVE_LEADER_SLOTS, + solana_hash::Hash, solana_pubkey::Pubkey, + }; + + #[test] + fn basic() { + let genesis = Block::default(); + let mut tracker = ParentReadyTracker::new(Pubkey::default(), genesis); + let mut events = vec![]; + + for i in 1..2 * NUM_CONSECUTIVE_LEADER_SLOTS { + let block = (i, Hash::new_unique()); + tracker.add_new_notar_fallback_or_stronger(block, &mut events); + assert_eq!(tracker.highest_parent_ready(), i + 1); + assert!(tracker.parent_ready(i + 1, block)); + } + } + + #[test] + fn skips() { + let genesis = Block::default(); + let mut tracker = ParentReadyTracker::new(Pubkey::default(), genesis); + let mut events = vec![]; + let block = (1, Hash::new_unique()); + + tracker.add_new_notar_fallback_or_stronger(block, &mut events); + tracker.add_new_skip(1, &mut events); + tracker.add_new_skip(2, &mut events); + tracker.add_new_skip(3, &mut events); + + assert!(tracker.parent_ready(4, block)); + assert!(tracker.parent_ready(4, genesis)); + assert_eq!(tracker.highest_parent_ready(), 4); + } + + #[test] + fn out_of_order() { + let genesis = Block::default(); + let mut tracker = ParentReadyTracker::new(Pubkey::default(), genesis); + let mut events = vec![]; + let block = (1, Hash::new_unique()); + + tracker.add_new_skip(3, &mut events); + tracker.add_new_skip(2, &mut events); + + tracker.add_new_notar_fallback_or_stronger(block, &mut events); + assert!(tracker.parent_ready(4, block)); + assert!(!tracker.parent_ready(4, genesis)); + + tracker.add_new_skip(1, &mut events); + assert!(tracker.parent_ready(4, block)); + assert!(tracker.parent_ready(4, genesis)); + } + + #[test] + fn snapshot_wfsm() { + let root_slot = 2147; + let root_block = (root_slot, Hash::new_unique()); + let mut tracker = ParentReadyTracker::new(Pubkey::default(), root_block); + let mut events = vec![]; + + assert!(tracker.parent_ready(root_slot + 1, root_block)); + assert_eq!(tracker.highest_parent_ready(), root_slot + 1); + + // Skipping root slot shouldn't do anything + tracker.add_new_skip(root_slot, &mut events); + assert!(tracker.parent_ready(root_slot + 1, root_block)); + assert_eq!(tracker.highest_parent_ready(), root_slot + 1); + + // Adding new certs should work as root slot is implicitely notarized fallback + tracker.add_new_skip(root_slot + 1, &mut events); + tracker.add_new_skip(root_slot + 2, &mut events); + assert!(tracker.parent_ready(root_slot + 3, root_block)); + assert_eq!(tracker.highest_parent_ready(), root_slot + 3); + + let block = (root_slot + 4, Hash::new_unique()); + tracker.add_new_notar_fallback_or_stronger(block, &mut events); + assert!(tracker.parent_ready(root_slot + 3, root_block)); + assert!(tracker.parent_ready(root_slot + 5, block)); + assert_eq!(tracker.highest_parent_ready(), root_slot + 5); + } + + #[test] + fn highest_parent_ready_out_of_order() { + let genesis = Block::default(); + let mut tracker = ParentReadyTracker::new(Pubkey::default(), genesis); + let mut events = vec![]; + assert_eq!(tracker.highest_parent_ready(), 1); + + tracker.add_new_skip(2, &mut events); + assert_eq!(tracker.highest_parent_ready(), 1); + + tracker.add_new_skip(3, &mut events); + assert_eq!(tracker.highest_parent_ready(), 1); + + tracker.add_new_skip(1, &mut events); + assert!(tracker.parent_ready(4, genesis)); + assert_eq!(tracker.highest_parent_ready(), 4); + assert_eq!( + tracker.block_production_parent(4), + BlockProductionParent::Parent(genesis) + ); + } + + #[test] + fn missed_window() { + let genesis = Block::default(); + let mut tracker = ParentReadyTracker::new(Pubkey::default(), genesis); + let mut events = vec![]; + assert_eq!(tracker.highest_parent_ready(), 1); + assert_eq!( + tracker.block_production_parent(4), + BlockProductionParent::ParentNotReady + ); + + tracker.add_new_notar_fallback_or_stronger((4, Hash::new_unique()), &mut events); + assert_eq!(tracker.highest_parent_ready(), 5); + assert_eq!( + tracker.block_production_parent(4), + BlockProductionParent::MissedWindow + ); + + assert_eq!( + tracker.block_production_parent(8), + BlockProductionParent::ParentNotReady + ); + tracker.add_new_notar_fallback_or_stronger((64, Hash::new_unique()), &mut events); + assert_eq!(tracker.highest_parent_ready(), 65); + assert_eq!( + tracker.block_production_parent(8), + BlockProductionParent::MissedWindow + ); + } + + #[test] + fn pick_more_skips() { + let genesis = Block::default(); + let mut tracker = ParentReadyTracker::new(Pubkey::default(), genesis); + let mut events = vec![]; + + for i in 1..=10 { + tracker.add_new_skip(i, &mut vec![]); + tracker.add_new_notar_fallback_or_stronger((i, Hash::new_unique()), &mut vec![]); + } + + tracker.add_new_skip(11, &mut events); + + assert_eq!(12, tracker.highest_parent_ready(),); + let parent_readys: Vec = events + .into_iter() + .map(|event| match event { + VotorEvent::ParentReady { slot, parent_block } => { + assert!(slot == 12); + parent_block.0 + } + _ => panic!("Invalid event"), + }) + .sorted() + .collect(); + assert_eq!(parent_readys, (0..=10).collect::>()); + assert_eq!( + tracker.block_production_parent(12), + BlockProductionParent::Parent(genesis) + ); + } +} diff --git a/votor/src/consensus_pool/slot_stake_counters.rs b/votor/src/consensus_pool/slot_stake_counters.rs new file mode 100644 index 00000000000000..075a3a52f02bf8 --- /dev/null +++ b/votor/src/consensus_pool/slot_stake_counters.rs @@ -0,0 +1,311 @@ +#![allow(dead_code)] +// TODO(wen): remove allow(dead_code) when consensus_pool is fully integrated + +use { + crate::{ + common::{ + Stake, SAFE_TO_NOTAR_MIN_NOTARIZE_AND_SKIP, + SAFE_TO_NOTAR_MIN_NOTARIZE_FOR_NOTARIZE_OR_SKIP, SAFE_TO_NOTAR_MIN_NOTARIZE_ONLY, + SAFE_TO_SKIP_THRESHOLD, + }, + consensus_pool::stats::ConsensusPoolStats, + event::VotorEvent, + }, + solana_hash::Hash, + solana_votor_messages::vote::Vote, + std::collections::BTreeMap, +}; + +#[derive(Debug, Default)] +pub(crate) struct SlotStakeCounters { + my_first_vote: Option, + total_stake: Stake, + skip_total: Stake, + notarize_total: Stake, + notarize_entry_total: BTreeMap, + top_notarized_stake: Stake, + safe_to_notar_sent: Vec, + safe_to_skip_sent: bool, +} + +impl SlotStakeCounters { + pub fn new(total_stake: Stake) -> Self { + Self { + total_stake, + ..Default::default() + } + } + + pub fn add_vote( + &mut self, + vote: &Vote, + entry_stake: Stake, + is_my_own_vote: bool, + events: &mut Vec, + stats: &mut ConsensusPoolStats, + ) { + match vote { + Vote::Skip(_) => self.skip_total = entry_stake, + Vote::Notarize(vote) => { + let old_entry_stake = self + .notarize_entry_total + .insert(*vote.block_id(), entry_stake) + .unwrap_or(0); + self.notarize_total = self + .notarize_total + .saturating_sub(old_entry_stake) + .saturating_add(entry_stake); + self.top_notarized_stake = self.top_notarized_stake.max(entry_stake); + } + _ => return, // Not interested in other vote types + } + if self.my_first_vote.is_none() && is_my_own_vote { + self.my_first_vote = Some(*vote); + } + if self.my_first_vote.is_none() { + // We have not voted yet, no need to check safe to notarize or skip + return; + } + let slot = vote.slot(); + // Check safe to notar + for (block_id, stake) in &self.notarize_entry_total { + if !self.safe_to_notar_sent.contains(block_id) && self.is_safe_to_notar(block_id, stake) + { + events.push(VotorEvent::SafeToNotar((slot, *block_id))); + stats.event_safe_to_notarize = stats.event_safe_to_notarize.saturating_add(1); + self.safe_to_notar_sent.push(*block_id); + } + } + // Check safe to skip + if !self.safe_to_skip_sent && self.is_safe_to_skip() { + events.push(VotorEvent::SafeToSkip(slot)); + self.safe_to_skip_sent = true; + stats.event_safe_to_skip = stats.event_safe_to_skip.saturating_add(1); + } + } + + fn is_safe_to_notar(&self, block_id: &Hash, stake: &Stake) -> bool { + // White paper v1.1 page 22: The event is only issued if the node voted in slot s already, + // but not to notarize b. Moreover: + // notar(b) >= 40% or (skip(s) + notar(b) >= 60% and notar(b) >= 20%) + if let Some(Vote::Notarize(my_vote)) = self.my_first_vote.as_ref() { + if my_vote.block_id() == block_id { + return false; // I voted for the same block, no need to send NotarizeFallback + } + } + let skip_ratio = self.skip_total as f64 / self.total_stake as f64; + let notarized_ratio = *stake as f64 / self.total_stake as f64; + trace!("safe_to_notar {block_id:?} {skip_ratio} {notarized_ratio}"); + // Check if the block fits condition (i) 40% of stake holders voted notarize + notarized_ratio >= SAFE_TO_NOTAR_MIN_NOTARIZE_ONLY + // Check if the block fits condition (ii) 20% notarized, and 60% notarized or skip + || (notarized_ratio >= SAFE_TO_NOTAR_MIN_NOTARIZE_FOR_NOTARIZE_OR_SKIP + && notarized_ratio + skip_ratio >= SAFE_TO_NOTAR_MIN_NOTARIZE_AND_SKIP) + } + + fn is_safe_to_skip(&self) -> bool { + // White paper v1.1 page 22: The event is only issued if the node voted in slot s already, + // but not to skip s. Moreover: + // skip(s) + Sum of all notarize - (max in notarize(b)) >= 40% + if let Some(Vote::Notarize(_)) = self.my_first_vote.as_ref() { + trace!( + "safe_to_skip {} {:?} {} {} {}", + self.my_first_vote.unwrap().slot(), + self.my_first_vote.unwrap().block_id(), + self.skip_total, + self.notarize_total, + self.top_notarized_stake + ); + self.skip_total + .saturating_add(self.notarize_total.saturating_sub(self.top_notarized_stake)) + as f64 + / self.total_stake as f64 + >= SAFE_TO_SKIP_THRESHOLD + } else { + false + } + } +} + +#[cfg(test)] +mod tests { + use {super::*, solana_votor_messages::vote::Vote}; + + #[test] + fn test_safe_to_notar() { + let mut counters = SlotStakeCounters::new(100); + + let mut events = vec![]; + let mut stats = ConsensusPoolStats::default(); + let slot = 2; + // I voted for skip + counters.add_vote( + &Vote::new_skip_vote(slot), + 10, + true, + &mut events, + &mut stats, + ); + assert!(events.is_empty()); + assert_eq!(stats.event_safe_to_notarize, 0); + + // 40% of stake holders voted notarize + counters.add_vote( + &Vote::new_notarization_vote(slot, Hash::default()), + 40, + false, + &mut events, + &mut stats, + ); + assert_eq!(events.len(), 1); + assert!( + matches!(events[0], VotorEvent::SafeToNotar((s, block_id)) if s == slot && block_id == Hash::default()) + ); + assert_eq!(stats.event_safe_to_notarize, 1); + events.clear(); + + // Adding more notarizations does not trigger more events + counters.add_vote( + &Vote::new_notarization_vote(slot, Hash::default()), + 20, + false, + &mut events, + &mut stats, + ); + assert!(events.is_empty()); + assert_eq!(stats.event_safe_to_notarize, 1); + + // Reset counters + counters = SlotStakeCounters::new(100); + events.clear(); + stats = ConsensusPoolStats::default(); + + // I voted for notarize b + let hash_1 = Hash::new_unique(); + counters.add_vote( + &Vote::new_notarization_vote(slot, hash_1), + 1, + true, + &mut events, + &mut stats, + ); + assert!(events.is_empty()); + assert_eq!(stats.event_safe_to_notarize, 0); + + // 25% of stake holders voted notarize b' + let hash_2 = Hash::new_unique(); + counters.add_vote( + &Vote::new_notarization_vote(slot, hash_2), + 25, + false, + &mut events, + &mut stats, + ); + assert!(events.is_empty()); + assert_eq!(stats.event_safe_to_notarize, 0); + + // 35% more of stake holders voted skip + counters.add_vote( + &Vote::new_skip_vote(slot), + 35, + false, + &mut events, + &mut stats, + ); + assert_eq!(events.len(), 1); + assert!( + matches!(events[0], VotorEvent::SafeToNotar((s, block_id)) if s == slot && block_id == hash_2) + ); + assert_eq!(stats.event_safe_to_notarize, 1); + } + + #[test] + fn test_safe_to_skip() { + let mut counters = SlotStakeCounters::new(100); + + let mut events = vec![]; + let mut stats = ConsensusPoolStats::default(); + let slot = 2; + // I voted for notarize b + counters.add_vote( + &Vote::new_notarization_vote(slot, Hash::default()), + 10, + true, + &mut events, + &mut stats, + ); + assert!(events.is_empty()); + assert_eq!(stats.event_safe_to_skip, 0); + + // 40% of stake holders voted skip + counters.add_vote( + &Vote::new_skip_vote(slot), + 40, + false, + &mut events, + &mut stats, + ); + assert_eq!(events.len(), 1); + assert!(matches!(events[0], VotorEvent::SafeToSkip(s) if s == slot)); + assert_eq!(stats.event_safe_to_skip, 1); + events.clear(); + + // Adding more skips does not trigger more events + counters.add_vote( + &Vote::new_skip_vote(slot), + 20, + false, + &mut events, + &mut stats, + ); + assert!(events.is_empty()); + assert_eq!(stats.event_safe_to_skip, 1); + + // Reset counters + counters = SlotStakeCounters::new(100); + events.clear(); + stats = ConsensusPoolStats::default(); + + // I voted for notarize b, 10% of stake holders voted with me + let hash_1 = Hash::new_unique(); + counters.add_vote( + &Vote::new_notarization_vote(slot, hash_1), + 10, + true, + &mut events, + &mut stats, + ); + // 20% of stake holders voted a different notarization b' + let hash_2 = Hash::new_unique(); + counters.add_vote( + &Vote::new_notarization_vote(slot, hash_2), + 20, + false, + &mut events, + &mut stats, + ); + // 30% of stake holders voted skip + counters.add_vote( + &Vote::new_skip_vote(slot), + 30, + false, + &mut events, + &mut stats, + ); + assert_eq!(events.len(), 1); + assert!(matches!(events[0], VotorEvent::SafeToSkip(s) if s == slot)); + assert_eq!(stats.event_safe_to_skip, 1); + events.clear(); + + // Adding more notarization on b does not trigger more events + counters.add_vote( + &Vote::new_notarization_vote(slot, hash_1), + 10, + false, + &mut events, + &mut stats, + ); + assert!(events.is_empty()); + assert_eq!(stats.event_safe_to_skip, 1); + } +} diff --git a/votor/src/consensus_pool/stats.rs b/votor/src/consensus_pool/stats.rs new file mode 100644 index 00000000000000..1005978b1bbf03 --- /dev/null +++ b/votor/src/consensus_pool/stats.rs @@ -0,0 +1,232 @@ +#![allow(dead_code)] +// TODO(wen): remove allow(dead_code) when consensus_pool is fully integrated + +use { + crate::common::VoteType, + solana_metrics::datapoint_info, + solana_votor_messages::consensus_message::CertificateType, + std::time::{Duration, Instant}, +}; + +const STATS_REPORT_INTERVAL: Duration = Duration::from_secs(10); + +#[derive(Debug)] +pub(crate) struct ConsensusPoolStats { + pub(crate) conflicting_votes: u32, + pub(crate) event_safe_to_notarize: u32, + pub(crate) event_safe_to_skip: u32, + pub(crate) exist_certs: u32, + pub(crate) exist_votes: u32, + pub(crate) incoming_certs: u32, + pub(crate) incoming_votes: u32, + pub(crate) out_of_range_certs: u32, + pub(crate) out_of_range_votes: u32, + + pub(crate) new_certs_generated: Vec, + pub(crate) new_certs_ingested: Vec, + pub(crate) ingested_votes: Vec, + + pub(crate) last_request_time: Instant, +} + +impl Default for ConsensusPoolStats { + fn default() -> Self { + Self::new() + } +} + +impl ConsensusPoolStats { + pub fn new() -> Self { + let num_vote_types = (VoteType::SkipFallback as usize).saturating_add(1); + let num_cert_types = (CertificateType::Skip as usize).saturating_add(1); + Self { + conflicting_votes: 0, + event_safe_to_notarize: 0, + event_safe_to_skip: 0, + exist_certs: 0, + exist_votes: 0, + incoming_certs: 0, + incoming_votes: 0, + out_of_range_certs: 0, + out_of_range_votes: 0, + + new_certs_ingested: vec![0; num_cert_types], + new_certs_generated: vec![0; num_cert_types], + ingested_votes: vec![0; num_vote_types], + + last_request_time: Instant::now(), + } + } + + pub fn incr_ingested_vote_type(&mut self, vote_type: VoteType) { + let index = vote_type as usize; + + self.ingested_votes[index] = self.ingested_votes[index].saturating_add(1); + } + + pub fn incr_cert_type(&mut self, cert_type: CertificateType, is_generated: bool) { + let index = cert_type as usize; + let array = if is_generated { + &mut self.new_certs_generated + } else { + &mut self.new_certs_ingested + }; + + array[index] = array[index].saturating_add(1); + } + + fn report(&self) { + datapoint_info!( + "consensus_pool_stats", + ("conflicting_votes", self.conflicting_votes as i64, i64), + ("event_safe_to_skip", self.event_safe_to_skip as i64, i64), + ( + "event_safe_to_notarize", + self.event_safe_to_notarize as i64, + i64 + ), + ("exist_votes", self.exist_votes as i64, i64), + ("exist_certs", self.exist_certs as i64, i64), + ("incoming_votes", self.incoming_votes as i64, i64), + ("incoming_certs", self.incoming_certs as i64, i64), + ("out_of_range_votes", self.out_of_range_votes as i64, i64), + ("out_of_range_certs", self.out_of_range_certs as i64, i64), + ); + + datapoint_info!( + "consensus_ingested_votes", + ( + "finalize", + *self + .ingested_votes + .get(VoteType::Finalize as usize) + .unwrap() as i64, + i64 + ), + ( + "notarize", + *self + .ingested_votes + .get(VoteType::Notarize as usize) + .unwrap() as i64, + i64 + ), + ( + "notarize_fallback", + *self + .ingested_votes + .get(VoteType::NotarizeFallback as usize) + .unwrap() as i64, + i64 + ), + ( + "skip", + *self.ingested_votes.get(VoteType::Skip as usize).unwrap() as i64, + i64 + ), + ( + "skip_fallback", + *self + .ingested_votes + .get(VoteType::SkipFallback as usize) + .unwrap() as i64, + i64 + ), + ); + + datapoint_info!( + "certfificate_pool_ingested_certs", + ( + "finalize", + *self + .new_certs_ingested + .get(CertificateType::Finalize as usize) + .unwrap() as i64, + i64 + ), + ( + "finalize_fast", + *self + .new_certs_ingested + .get(CertificateType::FinalizeFast as usize) + .unwrap() as i64, + i64 + ), + ( + "notarize", + *self + .new_certs_ingested + .get(CertificateType::Notarize as usize) + .unwrap() as i64, + i64 + ), + ( + "notarize_fallback", + *self + .new_certs_ingested + .get(CertificateType::NotarizeFallback as usize) + .unwrap() as i64, + i64 + ), + ( + "skip", + *self + .new_certs_ingested + .get(CertificateType::Skip as usize) + .unwrap() as i64, + i64 + ), + ); + + datapoint_info!( + "consensus_pool_generated_certs", + ( + "finalize", + *self + .new_certs_generated + .get(CertificateType::Finalize as usize) + .unwrap() as i64, + i64 + ), + ( + "finalize_fast", + *self + .new_certs_generated + .get(CertificateType::FinalizeFast as usize) + .unwrap() as i64, + i64 + ), + ( + "notarize", + *self + .new_certs_generated + .get(CertificateType::Notarize as usize) + .unwrap() as i64, + i64 + ), + ( + "notarize_fallback", + *self + .new_certs_generated + .get(CertificateType::NotarizeFallback as usize) + .unwrap() as i64, + i64 + ), + ( + "skip", + *self + .new_certs_generated + .get(CertificateType::Skip as usize) + .unwrap() as i64, + i64 + ), + ); + } + + pub fn maybe_report(&mut self) { + if self.last_request_time.elapsed() >= STATS_REPORT_INTERVAL { + self.report(); + *self = Self::new(); + } + } +} diff --git a/votor/src/consensus_pool/vote_certificate_builder.rs b/votor/src/consensus_pool/vote_certificate_builder.rs new file mode 100644 index 00000000000000..373c240877960c --- /dev/null +++ b/votor/src/consensus_pool/vote_certificate_builder.rs @@ -0,0 +1,476 @@ +use { + crate::common::{certificate_limits_and_vote_types, VoteType}, + bitvec::prelude::*, + itertools::Itertools, + solana_bls_signatures::{BlsError, SignatureProjective}, + solana_signer_store::{decode, encode_base2, encode_base3, DecodeError, Decoded, EncodeError}, + solana_votor_messages::consensus_message::{Certificate, CertificateMessage, VoteMessage}, + thiserror::Error, +}; + +/// Maximum number of validators in a certificate +/// +/// There are around 1500 validators currently. For a clean power-of-two +/// implementation, we should choose either 2048 or 4096. Choose a more +/// conservative number 4096 for now. During build() we will cut off end +/// of the bitmaps if the tail contains only zeroes, so actual bitmap +/// length will be less than or equal to this number. +const MAXIMUM_VALIDATORS: usize = 4096; + +#[allow(dead_code)] +#[derive(Debug, Error, PartialEq)] +pub enum CertificateError { + #[error("BLS error: {0}")] + BlsError(#[from] BlsError), + #[error("solana-signer-store decode error: {0:?}")] + DecodeError(DecodeError), + #[error("solana-signer-store encode error: {0:?}")] + EncodeError(EncodeError), + #[error("Validator does not exist for given rank: {0}")] + ValidatorDoesNotExist(u16), +} + +// TODO(wen): remove dead_code when we migrate consensus_pool +#[allow(dead_code)] +/// A builder for creating a `CertificateMessage` by efficiently aggregating BLS signatures. +#[derive(Clone)] +pub struct VoteCertificateBuilder { + certificate: Certificate, + signature: SignatureProjective, + // For some certificates we need two bitmaps, for example, NotarizeFallback + // certificates have Notarize and NotarizeFallback votes, so we need two bitmaps + // to represent them. The order of the VoteType is defined in certificate_limits_and_vote_types. + // We normally put fallback votes in the second bitmap. + // The order of the VoteType is important, if you change it, you might interpret + // the bitmap incorrectly. + // Some certificates (like Finalize) only need one bitmap, then the second bitmap + // will be empty. + input_bitmap_1: BitVec, + input_bitmap_2: BitVec, +} + +impl TryFrom for VoteCertificateBuilder { + type Error = CertificateError; + + fn try_from(message: CertificateMessage) -> Result { + let projective_signature = SignatureProjective::try_from(message.signature)?; + let decoded_bitmap = + decode(&message.bitmap, MAXIMUM_VALIDATORS).map_err(CertificateError::DecodeError)?; + let (mut input_bitmap_1, mut input_bitmap_2) = match decoded_bitmap { + Decoded::Base2(bitmap) => ( + bitmap, + BitVec::::repeat(false, MAXIMUM_VALIDATORS), + ), + Decoded::Base3(bitmap1, bitmap2) => (bitmap1, bitmap2), + }; + input_bitmap_1.resize(MAXIMUM_VALIDATORS, false); + input_bitmap_2.resize(MAXIMUM_VALIDATORS, false); + Ok(VoteCertificateBuilder { + certificate: message.certificate, + signature: projective_signature, + input_bitmap_1, + input_bitmap_2, + }) + } +} + +#[allow(dead_code)] +impl VoteCertificateBuilder { + pub fn new(certificate_id: Certificate) -> Self { + Self { + certificate: certificate_id, + signature: SignatureProjective::identity(), + input_bitmap_1: BitVec::repeat(false, MAXIMUM_VALIDATORS), + input_bitmap_2: BitVec::repeat(false, MAXIMUM_VALIDATORS), + } + } + + /// Aggregates a slice of `VoteMessage`s into the builder. + pub fn aggregate(&mut self, messages: &[VoteMessage]) -> Result<(), CertificateError> { + if messages.is_empty() { + return Ok(()); + } + + let vote_types = certificate_limits_and_vote_types(self.certificate).1; + for vote_message in messages { + let rank = vote_message.rank as usize; + if MAXIMUM_VALIDATORS <= rank { + return Err(CertificateError::ValidatorDoesNotExist(vote_message.rank)); + } + + let current_vote_type = VoteType::get_type(&vote_message.vote); + + if current_vote_type == vote_types[0] { + self.input_bitmap_1.set(rank, true); + } else if vote_types.len() == 2 && current_vote_type == vote_types[1] { + self.input_bitmap_2.set(rank, true); + } + } + + let signature_iter = messages + .iter() + .map(|vote_message| &vote_message.signature) + .collect_vec(); + Ok(self.signature.aggregate_with(&signature_iter)?) + } + + pub fn build(self) -> Result { + let mut input_bitmap_1 = self.input_bitmap_1; + let mut input_bitmap_2 = self.input_bitmap_2; + + let last_one_1 = input_bitmap_1 // use local variable + .last_one() + .map_or(0, |i| i.saturating_add(1)); + let last_one_2 = input_bitmap_2 // use local variable + .last_one() + .map_or(0, |i| i.saturating_add(1)); + let new_length = last_one_1.max(last_one_2); + if new_length > MAXIMUM_VALIDATORS { + error!( + "Bitmap length exceeds maximum allowed: {MAXIMUM_VALIDATORS} should be caught \ + during aggregation" + ); + return Err(CertificateError::ValidatorDoesNotExist(new_length as u16)); + } + + input_bitmap_1.resize(new_length, false); + input_bitmap_2.resize(new_length, false); + let bitmap = if input_bitmap_2.count_ones() > 0 { + // If we have two bitmaps, use Base3 encoding + encode_base3(&input_bitmap_1, &input_bitmap_2).map_err(CertificateError::EncodeError)? + } else { + // If we only have one bitmap, use Base2 encoding + encode_base2(&input_bitmap_1).map_err(CertificateError::EncodeError)? + }; + Ok(CertificateMessage { + certificate: self.certificate, + signature: self.signature.into(), + bitmap, + }) + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + solana_bls_signatures::{ + Keypair as BLSKeypair, PubkeyProjective as BLSPubkeyProjective, + Signature as BLSSignature, SignatureProjective, VerifiablePubkey, + }, + solana_hash::Hash, + solana_votor_messages::{ + consensus_message::{Certificate, CertificateType, VoteMessage}, + vote::Vote, + }, + }; + + #[test] + fn test_normal_build() { + let hash = Hash::new_unique(); + let certificate = Certificate::new(CertificateType::NotarizeFallback, 1, Some(hash)); + let mut builder = VoteCertificateBuilder::new(certificate); + // Test building the certificate from Notarize and NotarizeFallback votes + // Create Notarize on validator 1, 4, 6 + let vote = Vote::new_notarization_vote(1, hash); + let rank_1 = [1, 4, 6]; + let messages_1 = rank_1 + .iter() + .map(|&rank| { + let keypair = BLSKeypair::new(); + let signature = keypair.sign(b"fake_vote_message"); + VoteMessage { + vote, + signature: signature.into(), + rank, + } + }) + .collect::>(); + builder + .aggregate(&messages_1) + .expect("Failed to aggregate notarization votes"); + // Create NotarizeFallback on validator 2, 3, 5, 7 + let vote = Vote::new_notarization_fallback_vote(1, hash); + let rank_2 = [2, 3, 5, 7]; + let messages_2 = rank_2 + .iter() + .map(|&rank| { + let keypair = BLSKeypair::new(); + let signature = keypair.sign(b"fake_vote_message_2"); + VoteMessage { + vote, + signature: signature.into(), + rank, + } + }) + .collect::>(); + builder + .aggregate(&messages_2) + .expect("Failed to aggregate notarization fallback votes"); + + let certificate_message = builder.build().expect("Failed to build certificate"); + assert_eq!(certificate_message.certificate, certificate); + match decode(&certificate_message.bitmap, MAXIMUM_VALIDATORS) + .expect("Failed to decode bitmap") + { + Decoded::Base3(bitmap1, bitmap2) => { + assert_eq!(bitmap1.len(), 8); + assert_eq!(bitmap2.len(), 8); + for i in rank_1 { + assert!(bitmap1[i as usize]); + } + assert_eq!(bitmap1.count_ones(), 3); + for i in rank_2 { + assert!(bitmap2[i as usize]); + } + assert_eq!(bitmap2.count_ones(), 4); + } + _ => panic!("Expected Base3 encoding"), + } + + // Build a new certificate with only Notarize votes, we should get Base2 encoding + let mut builder = VoteCertificateBuilder::new(certificate); + builder + .aggregate(&messages_1) + .expect("Failed to aggregate notarization votes"); + let certificate_message = builder.build().expect("Failed to build certificate"); + assert_eq!(certificate_message.certificate, certificate); + match decode(&certificate_message.bitmap, MAXIMUM_VALIDATORS) + .expect("Failed to decode bitmap") + { + Decoded::Base2(bitmap1) => { + assert_eq!(bitmap1.len(), 7); + for i in rank_1 { + assert!(bitmap1[i as usize]); + } + assert_eq!(bitmap1.count_ones(), 3); + } + _ => panic!("Expected Base2 encoding"), + } + + // Base2 encoding only applies when the first bitmap is non-empty, if we build another + // certificate with only NotarizeFallback votes, we should still get Base3 encoding + let mut builder = VoteCertificateBuilder::new(certificate); + builder + .aggregate(&messages_2) + .expect("Failed to aggregate notarization fallback votes"); + let certificate_message = builder.build().expect("Failed to build certificate"); + assert_eq!(certificate_message.certificate, certificate); + match decode(&certificate_message.bitmap, MAXIMUM_VALIDATORS) + .expect("Failed to decode bitmap") + { + Decoded::Base3(bitmap1, bitmap2) => { + assert_eq!(bitmap1.count_ones(), 0); + assert_eq!(bitmap2.len(), 8); + for i in rank_2 { + assert!(bitmap2[i as usize]); + } + assert_eq!(bitmap2.count_ones(), 4); + } + _ => panic!("Expected Base3 encoding"), + } + } + + #[test] + fn test_builder_with_errors() { + let hash = Hash::new_unique(); + let certificate = Certificate::new(CertificateType::NotarizeFallback, 1, Some(hash)); + let mut builder = VoteCertificateBuilder::new(certificate); + + // Test with a rank that exceeds the maximum allowed + let vote = Vote::new_notarization_vote(1, hash); + let vote2 = Vote::new_notarization_fallback_vote(1, hash); + let rank_out_of_bounds = MAXIMUM_VALIDATORS.saturating_add(1); // Exceeds MAXIMUM_VALIDATORS + let keypair = BLSKeypair::new(); + let signature = keypair.sign(b"fake_vote_message"); + let message_out_of_bounds = VoteMessage { + vote, + signature: signature.into(), + rank: rank_out_of_bounds as u16, + }; + assert_eq!( + builder.aggregate(&[message_out_of_bounds]), + Err(CertificateError::ValidatorDoesNotExist( + rank_out_of_bounds as u16 + )) + ); + + // Test bls error + let message_with_invalid_signature = VoteMessage { + vote, + signature: BLSSignature::default(), // Invalid signature + rank: 1, + }; + assert_eq!( + builder.aggregate(&[message_with_invalid_signature]), + Err(CertificateError::BlsError(BlsError::PointConversion)) + ); + + // Test encoding error + // Create two bitmaps with the same rank set + let signature = keypair.sign(b"fake_vote_message_2"); + let messages_1 = vec![VoteMessage { + vote, + signature: signature.into(), + rank: 1, + }]; + let mut builder = VoteCertificateBuilder::new(certificate); + builder + .aggregate(&messages_1) + .expect("Failed to aggregate notarization votes"); + let messages_2 = vec![VoteMessage { + vote: vote2, + signature: signature.into(), + rank: 1, // Same rank as in messages_1 + }]; + builder + .aggregate(&messages_2) + .expect("Failed to aggregate notarization fallback votes"); + assert_eq!( + builder.build(), + Err(CertificateError::EncodeError( + EncodeError::InvalidBitCombination + )) + ); + + // Test decoding error + let corrupt_certificate_message = CertificateMessage { + certificate: Certificate::new(CertificateType::NotarizeFallback, 1, Some(hash)), + signature: signature.into(), + bitmap: vec![0xFF; 100], // Corrupted bitmap + }; + assert_eq!( + VoteCertificateBuilder::try_from(corrupt_certificate_message).err(), + Some(CertificateError::DecodeError( + DecodeError::UnsupportedEncoding + )) + ); + } + + #[test] + fn test_certificate_verification_base2_encoding() { + let slot = 10; + let hash = Hash::new_unique(); + let certificate_id = Certificate::new(CertificateType::Notarize, slot, Some(hash)); + + // 1. Setup: Create keypairs and a single vote object. + // All validators will sign the same message, resulting in a single bitmap. + let num_validators = 5; + let mut keypairs = Vec::new(); + let mut vote_messages = Vec::new(); + let vote = Vote::new_notarization_vote(slot, hash); + let serialized_vote = bincode::serialize(&vote).unwrap(); + + for i in 0..num_validators { + let keypair = BLSKeypair::new(); + let signature = keypair.sign(&serialized_vote); + vote_messages.push(VoteMessage { + vote, + signature: signature.into(), + rank: i as u16, + }); + keypairs.push(keypair); + } + + // 2. Generation: Aggregate votes and build the certificate. This will + // use base2 encoding because it only contains one type of vote. + let mut builder = VoteCertificateBuilder::new(certificate_id); + builder + .aggregate(&vote_messages) + .expect("Failed to aggregate votes"); + let certificate_message = builder.build().expect("Failed to build certificate"); + + // 3. Verification: Aggregate the public keys and verify the signature. + let pubkey_refs: Vec<_> = keypairs.iter().map(|kp| &kp.public).collect(); + let aggregate_pubkey = + BLSPubkeyProjective::aggregate(&pubkey_refs).expect("Failed to aggregate public keys"); + + let verification_result = + aggregate_pubkey.verify_signature(&certificate_message.signature, &serialized_vote); + + assert!( + verification_result.unwrap_or(false), + "BLS aggregate signature verification failed for base2 encoded certificate" + ); + } + + #[test] + fn test_certificate_verification_base3_encoding() { + let slot = 20; + let hash = Hash::new_unique(); + // A NotarizeFallback certificate can be composed of both Notarize and NotarizeFallback + // votes. + let certificate_id = Certificate::new(CertificateType::NotarizeFallback, slot, Some(hash)); + + // 1. Setup: Create two groups of validators signing two different vote types. + let mut all_vote_messages = Vec::new(); + let mut all_pubkeys = Vec::new(); + let mut all_messages = Vec::new(); + + // Group 1: Signs a Notarize vote. + let notarize_vote = Vote::new_notarization_vote(slot, hash); + let serialized_notarize_vote = bincode::serialize(¬arize_vote).unwrap(); + for i in 0..3 { + let keypair = BLSKeypair::new(); + let signature = keypair.sign(&serialized_notarize_vote); + all_vote_messages.push(VoteMessage { + vote: notarize_vote, + signature: signature.into(), + rank: i as u16, // Ranks 0, 1, 2 + }); + all_pubkeys.push(keypair.public); + all_messages.push(serialized_notarize_vote.clone()); + } + + // Group 2: Signs a NotarizeFallback vote. + let notarize_fallback_vote = Vote::new_notarization_fallback_vote(slot, hash); + let serialized_fallback_vote = bincode::serialize(¬arize_fallback_vote).unwrap(); + for i in 3..6 { + let keypair = BLSKeypair::new(); + let signature = keypair.sign(&serialized_fallback_vote); + all_vote_messages.push(VoteMessage { + vote: notarize_fallback_vote, + signature: signature.into(), + rank: i as u16, // Ranks 3, 4, 5 + }); + all_pubkeys.push(keypair.public); + all_messages.push(serialized_fallback_vote.clone()); + } + + // 2. Generation: Aggregate votes. Because there are two vote types, this will use + // base3 encoding. + let mut builder = VoteCertificateBuilder::new(certificate_id); + builder + .aggregate(&all_vote_messages) + .expect("Failed to aggregate votes"); + let certificate_message = builder.build().expect("Failed to build certificate"); + + // 3. Verification: + let decoded_bitmap = + decode(&certificate_message.bitmap, MAXIMUM_VALIDATORS).expect("Failed to decode"); + + match decoded_bitmap { + Decoded::Base2(_bitmap) => { + panic!("Expected Base3 encoding, but got Base2 encoding"); + } + Decoded::Base3(bitmap1, bitmap2) => { + // Bitmap1 should correspond to the Notarize votes (ranks 0, 1, 2) + assert_eq!(bitmap1.count_ones(), 3); + assert!(bitmap1[0] && bitmap1[1] && bitmap1[2]); + // Bitmap2 should correspond to the NotarizeFallback votes (ranks 3, 4, 5) + assert_eq!(bitmap2.count_ones(), 3); + assert!(bitmap2[3] && bitmap2[4] && bitmap2[5]); + } + } + + let pubkey_refs: Vec<_> = all_pubkeys.iter().collect(); + let message_refs: Vec<&[u8]> = all_messages.iter().map(|m| m.as_slice()).collect(); + + SignatureProjective::verify_distinct_aggregated( + &pubkey_refs, + &certificate_message.signature, + &message_refs, + ) + .unwrap(); + } +} diff --git a/votor/src/consensus_pool/vote_pool.rs b/votor/src/consensus_pool/vote_pool.rs new file mode 100644 index 00000000000000..9d4a42a068eda3 --- /dev/null +++ b/votor/src/consensus_pool/vote_pool.rs @@ -0,0 +1,311 @@ +use { + crate::{common::Stake, consensus_pool::vote_certificate_builder::VoteCertificateBuilder}, + solana_hash::Hash, + solana_pubkey::Pubkey, + solana_votor_messages::consensus_message::VoteMessage, + std::collections::{HashMap, HashSet}, +}; + +#[allow(dead_code)] +#[derive(Debug)] +pub(crate) struct VoteEntry { + pub(crate) transactions: Vec, + pub(crate) total_stake_by_key: Stake, +} + +#[allow(dead_code)] +impl VoteEntry { + pub fn new() -> Self { + Self { + transactions: Vec::new(), + total_stake_by_key: 0, + } + } +} + +#[allow(dead_code)] +pub(crate) trait VotePool { + fn total_stake(&self) -> Stake; + fn has_prev_validator_vote(&self, validator_vote_key: &Pubkey) -> bool; +} + +#[allow(dead_code)] +/// There are two types of vote pools: +/// - SimpleVotePool: Tracks all votes of a specfic vote type made by validators for some slot N, but only one vote per block. +/// - DuplicateBlockVotePool: Tracks all votes of a specfic vote type made by validators for some slot N, +/// but allows votes for different blocks by the same validator. Only relevant for VotePool's that are of type +/// Notarization or NotarizationFallback +pub(crate) enum VotePoolType { + SimpleVotePool(SimpleVotePool), + DuplicateBlockVotePool(DuplicateBlockVotePool), +} + +pub(crate) struct SimpleVotePool { + /// Tracks all votes of a specfic vote type made by validators for some slot N. + pub(crate) vote_entry: VoteEntry, + prev_voted_validators: HashSet, +} + +#[allow(dead_code)] +impl SimpleVotePool { + pub fn new() -> Self { + Self { + vote_entry: VoteEntry::new(), + prev_voted_validators: HashSet::new(), + } + } + + pub fn add_vote( + &mut self, + validator_vote_key: &Pubkey, + validator_stake: Stake, + transaction: &VoteMessage, + ) -> Option { + if self.prev_voted_validators.contains(validator_vote_key) { + return None; + } + self.prev_voted_validators.insert(*validator_vote_key); + self.vote_entry.transactions.push(*transaction); + self.vote_entry.total_stake_by_key = self + .vote_entry + .total_stake_by_key + .saturating_add(validator_stake); + Some(self.vote_entry.total_stake_by_key) + } + + pub fn add_to_certificate(&self, output: &mut VoteCertificateBuilder) { + output + .aggregate(&self.vote_entry.transactions) + .expect("Incoming vote message signatures are assumed to be valid") + } +} + +impl VotePool for SimpleVotePool { + fn total_stake(&self) -> Stake { + self.vote_entry.total_stake_by_key + } + fn has_prev_validator_vote(&self, validator_vote_key: &Pubkey) -> bool { + self.prev_voted_validators.contains(validator_vote_key) + } +} + +pub(crate) struct DuplicateBlockVotePool { + max_entries_per_pubkey: usize, + pub(crate) votes: HashMap, + total_stake: Stake, + prev_voted_block_ids: HashMap>, +} + +#[allow(dead_code)] +impl DuplicateBlockVotePool { + pub fn new(max_entries_per_pubkey: usize) -> Self { + Self { + max_entries_per_pubkey, + votes: HashMap::new(), + total_stake: 0, + prev_voted_block_ids: HashMap::new(), + } + } + + pub fn add_vote( + &mut self, + validator_vote_key: &Pubkey, + voted_block_id: Hash, + transaction: &VoteMessage, + validator_stake: Stake, + ) -> Option { + // Check whether the validator_vote_key already used the same voted_block_id or exceeded max_entries_per_pubkey + // If so, return false, otherwise add the voted_block_id to the prev_votes + let prev_voted_block_ids = self + .prev_voted_block_ids + .entry(*validator_vote_key) + .or_default(); + if prev_voted_block_ids.contains(&voted_block_id) { + return None; + } + let inserted_first_time = prev_voted_block_ids.is_empty(); + if prev_voted_block_ids.len() >= self.max_entries_per_pubkey { + return None; + } + prev_voted_block_ids.push(voted_block_id); + + let vote_entry = self + .votes + .entry(voted_block_id) + .or_insert_with(VoteEntry::new); + vote_entry.transactions.push(*transaction); + vote_entry.total_stake_by_key = vote_entry + .total_stake_by_key + .saturating_add(validator_stake); + + if inserted_first_time { + self.total_stake = self.total_stake.saturating_add(validator_stake); + } + Some(vote_entry.total_stake_by_key) + } + + pub fn total_stake_by_block_id(&self, block_id: &Hash) -> Stake { + self.votes + .get(block_id) + .map_or(0, |vote_entries| vote_entries.total_stake_by_key) + } + + pub fn add_to_certificate(&self, block_id: &Hash, output: &mut VoteCertificateBuilder) { + if let Some(vote_entries) = self.votes.get(block_id) { + output + .aggregate(&vote_entries.transactions) + .expect("Incoming vote message signatures are assumed to be valid") + } + } + + pub fn has_prev_validator_vote_for_block( + &self, + validator_vote_key: &Pubkey, + block_id: &Hash, + ) -> bool { + self.prev_voted_block_ids + .get(validator_vote_key) + .is_some_and(|vs| vs.contains(block_id)) + } +} + +impl VotePool for DuplicateBlockVotePool { + fn total_stake(&self) -> Stake { + self.total_stake + } + fn has_prev_validator_vote(&self, validator_vote_key: &Pubkey) -> bool { + self.prev_voted_block_ids.contains_key(validator_vote_key) + } +} + +#[cfg(test)] +mod test { + use { + super::*, + solana_bls_signatures::Signature as BLSSignature, + solana_votor_messages::{consensus_message::VoteMessage, vote::Vote}, + }; + + #[test] + fn test_skip_vote_pool() { + let mut vote_pool = SimpleVotePool::new(); + let vote = Vote::new_skip_vote(5); + let transaction = VoteMessage { + vote, + signature: BLSSignature::default(), + rank: 1, + }; + let my_pubkey = Pubkey::new_unique(); + + assert_eq!(vote_pool.add_vote(&my_pubkey, 10, &transaction), Some(10)); + assert_eq!(vote_pool.total_stake(), 10); + + // Adding the same key again should fail + assert_eq!(vote_pool.add_vote(&my_pubkey, 10, &transaction), None); + assert_eq!(vote_pool.total_stake(), 10); + + // Adding a different key should succeed + let new_pubkey = Pubkey::new_unique(); + assert_eq!(vote_pool.add_vote(&new_pubkey, 60, &transaction), Some(70)); + assert_eq!(vote_pool.total_stake(), 70); + } + + #[test] + fn test_notarization_pool() { + let mut vote_pool = DuplicateBlockVotePool::new(1); + let my_pubkey = Pubkey::new_unique(); + let block_id = Hash::new_unique(); + let vote = Vote::new_notarization_vote(3, block_id); + let transaction = VoteMessage { + vote, + signature: BLSSignature::default(), + rank: 1, + }; + assert_eq!( + vote_pool.add_vote(&my_pubkey, block_id, &transaction, 10), + Some(10) + ); + assert_eq!(vote_pool.total_stake(), 10); + assert_eq!(vote_pool.total_stake_by_block_id(&block_id), 10); + + // Adding the same key again should fail + assert_eq!( + vote_pool.add_vote(&my_pubkey, block_id, &transaction, 10), + None + ); + assert_eq!(vote_pool.total_stake(), 10); + + // Adding a different bankhash should fail + assert_eq!( + vote_pool.add_vote(&my_pubkey, block_id, &transaction, 10), + None + ); + assert_eq!(vote_pool.total_stake(), 10); + + // Adding a different key should succeed + let new_pubkey = Pubkey::new_unique(); + assert_eq!( + vote_pool.add_vote(&new_pubkey, block_id, &transaction, 60), + Some(70) + ); + assert_eq!(vote_pool.total_stake(), 70); + assert_eq!(vote_pool.total_stake_by_block_id(&block_id), 70); + } + + #[test] + fn test_notarization_fallback_pool() { + solana_logger::setup(); + let mut vote_pool = DuplicateBlockVotePool::new(3); + let vote = Vote::new_notarization_fallback_vote(7, Hash::new_unique()); + let transaction = VoteMessage { + vote, + signature: BLSSignature::default(), + rank: 1, + }; + let my_pubkey = Pubkey::new_unique(); + + let block_ids: Vec = (0..4).map(|_| Hash::new_unique()).collect(); + + // Adding the first 3 votes should succeed, but total_stake should remain at 10 + for block_id in &block_ids[0..3] { + assert_eq!( + vote_pool.add_vote(&my_pubkey, *block_id, &transaction, 10), + Some(10) + ); + assert_eq!(vote_pool.total_stake(), 10); + assert_eq!(vote_pool.total_stake_by_block_id(block_id), 10); + } + // Adding the 4th vote should fail + assert_eq!( + vote_pool.add_vote(&my_pubkey, block_ids[3], &transaction, 10), + None + ); + assert_eq!(vote_pool.total_stake(), 10); + assert_eq!(vote_pool.total_stake_by_block_id(&block_ids[3]), 0); + + // Adding a different key should succeed + let new_pubkey = Pubkey::new_unique(); + for block_id in &block_ids[1..3] { + assert_eq!( + vote_pool.add_vote(&new_pubkey, *block_id, &transaction, 60), + Some(70) + ); + assert_eq!(vote_pool.total_stake(), 70); + assert_eq!(vote_pool.total_stake_by_block_id(block_id), 70); + } + + // The new key only added 2 votes, so adding block_ids[3] should succeed + assert_eq!( + vote_pool.add_vote(&new_pubkey, block_ids[3], &transaction, 60), + Some(60) + ); + assert_eq!(vote_pool.total_stake(), 70); + assert_eq!(vote_pool.total_stake_by_block_id(&block_ids[3]), 60); + + // Now if adding the same key again, it should fail + assert_eq!( + vote_pool.add_vote(&new_pubkey, block_ids[0], &transaction, 60), + None + ); + } +} diff --git a/votor/src/event.rs b/votor/src/event.rs new file mode 100644 index 00000000000000..30d014e55f72b6 --- /dev/null +++ b/votor/src/event.rs @@ -0,0 +1,96 @@ +use { + crossbeam_channel::{Receiver, Sender}, + solana_clock::Slot, + solana_runtime::bank::Bank, + solana_votor_messages::consensus_message::Block, + std::{sync::Arc, time::Instant}, +}; + +#[derive(Debug, Clone)] +pub struct CompletedBlock { + pub slot: Slot, + // TODO: once we have the async execution changes this can be (block_id, parent_block_id) instead + pub bank: Arc, +} + +/// Context for the block creation loop to start a leader window +#[derive(Copy, Clone, Debug)] +pub struct LeaderWindowInfo { + pub start_slot: Slot, + pub end_slot: Slot, + pub parent_block: Block, + pub skip_timer: Instant, +} + +pub type VotorEventSender = Sender; +pub type VotorEventReceiver = Receiver; + +/// Events that trigger actions in Votor +/// TODO: remove bank hash once we update votes +#[derive(Debug, Clone)] +pub enum VotorEvent { + /// A block has completed replay and is ready for voting + Block(CompletedBlock), + + /// The block has received a notarization certificate + BlockNotarized(Block), + + /// Received the first shred for the slot. + FirstShred(Slot), + + /// The pool has marked the given block as a ready parent for `slot` + ParentReady { slot: Slot, parent_block: Block }, + + //// Timeout to early detect that a honest that has crashed and + /// if the leader window should be skipped. + TimeoutCrashedLeader(Slot), + + /// Timeout to inspect whether the remaining leader window should be skipped. + Timeout(Slot), + + /// The given block has reached the safe to notar status + SafeToNotar(Block), + + /// The given slot has reached the safe to skip status + SafeToSkip(Slot), + + /// We are the leader for this window and have reached the parent ready status + /// Produce the window + ProduceWindow(LeaderWindowInfo), + + /// The block has received a slow or fast finalization certificate and is eligble for rooting + /// The second bool indicates whether the block is a fast finalization + Finalized(Block, bool), + + /// We have not observed a finalization and reached the standstill timeout + /// The slot is the highest finalized slot + Standstill(Slot), + + /// The identity keypair has changed due to an operator calling set-identity + SetIdentity, +} + +impl VotorEvent { + /// Ignore old events + #[allow(dead_code)] + // TODO(wen): remove allow(dead_code) when event_handler is fully integrated + pub(crate) fn should_ignore(&self, root: Slot) -> bool { + match self { + VotorEvent::Block(completed_block) => completed_block.slot <= root, + VotorEvent::Timeout(s) + | VotorEvent::SafeToSkip(s) + | VotorEvent::TimeoutCrashedLeader(s) + | VotorEvent::FirstShred(s) + | VotorEvent::SafeToNotar((s, _)) + | VotorEvent::Finalized((s, _), _) + | VotorEvent::BlockNotarized((s, _)) + | VotorEvent::ParentReady { + slot: s, + parent_block: _, + } => s <= &root, + VotorEvent::ProduceWindow(_) => false, + VotorEvent::Standstill(_) => false, + VotorEvent::SetIdentity => false, + } + } +} diff --git a/votor/src/lib.rs b/votor/src/lib.rs new file mode 100644 index 00000000000000..994c6947fedb91 --- /dev/null +++ b/votor/src/lib.rs @@ -0,0 +1,29 @@ +#![cfg_attr(feature = "frozen-abi", feature(min_specialization))] + +#[cfg(feature = "agave-unstable-api")] +pub mod common; + +#[cfg(feature = "agave-unstable-api")] +pub mod consensus_pool; + +#[cfg(feature = "agave-unstable-api")] +pub mod event; + +#[cfg(feature = "agave-unstable-api")] +pub mod root_utils; + +#[cfg(feature = "agave-unstable-api")] +#[macro_use] +extern crate log; + +#[cfg(feature = "agave-unstable-api")] +extern crate serde_derive; + +#[cfg(feature = "agave-unstable-api")] +pub mod vote_history; +#[cfg(feature = "agave-unstable-api")] +pub mod vote_history_storage; + +#[cfg_attr(feature = "frozen-abi", macro_use)] +#[cfg(feature = "frozen-abi")] +extern crate solana_frozen_abi_macro; diff --git a/votor/src/root_utils.rs b/votor/src/root_utils.rs new file mode 100644 index 00000000000000..e26f2e0792ddd8 --- /dev/null +++ b/votor/src/root_utils.rs @@ -0,0 +1,141 @@ +use { + crossbeam_channel::Sender, + log::{info, warn}, + solana_clock::Slot, + solana_ledger::{blockstore::Blockstore, leader_schedule_cache::LeaderScheduleCache}, + solana_pubkey::Pubkey, + solana_rpc::{ + optimistically_confirmed_bank_tracker::{BankNotification, BankNotificationSenderConfig}, + rpc_subscriptions::RpcSubscriptions, + }, + solana_runtime::{ + bank_forks::{BankForks, SetRootError}, + installed_scheduler_pool::BankWithScheduler, + snapshot_controller::SnapshotController, + }, + std::sync::{Arc, RwLock}, +}; + +/// Sets the new root, additionally performs the callback after setting the bank forks root +/// During this transition period where both replay stage and votor can root depending on the feature flag we +/// have a callback that cleans up progress map and other tower bft structures. Then the callgraph is +/// +/// ReplayStage::check_and_handle_new_root -> root_utils::check_and_handle_new_root(callback) +/// | +/// v +/// ReplayStage::handle_new_root -> root_utils::set_bank_forks_root(callback) -> callback() +/// +/// Votor does not need the progress map or other tower bft structures, so it will not use the callback. +#[allow(clippy::too_many_arguments)] +pub fn check_and_handle_new_root( + parent_slot: Slot, + new_root: Slot, + snapshot_controller: Option<&SnapshotController>, + highest_super_majority_root: Option, + bank_notification_sender: &Option, + drop_bank_sender: &Sender>, + blockstore: &Blockstore, + leader_schedule_cache: &Arc, + bank_forks: &RwLock, + rpc_subscriptions: Option<&RpcSubscriptions>, + my_pubkey: &Pubkey, + callback: CB, +) -> Result<(), SetRootError> +where + CB: FnOnce(&BankForks), +{ + // get the root bank before squash + let root_bank = bank_forks + .read() + .unwrap() + .get(new_root) + .expect("Root bank doesn't exist"); + let mut rooted_banks = root_bank.parents(); + let oldest_parent = rooted_banks.last().map(|last| last.parent_slot()); + rooted_banks.push(root_bank.clone()); + let rooted_slots: Vec<_> = rooted_banks.iter().map(|bank| bank.slot()).collect(); + // The following differs from rooted_slots by including the parent slot of the oldest parent bank. + let rooted_slots_with_parents = bank_notification_sender + .as_ref() + .is_some_and(|sender| sender.should_send_parents) + .then(|| { + let mut new_chain = rooted_slots.clone(); + new_chain.push(oldest_parent.unwrap_or(parent_slot)); + new_chain + }); + + // Call leader schedule_cache.set_root() before blockstore.set_root() because + // bank_forks.root is consumed by repair_service to update gossip, so we don't want to + // get shreds for repair on gossip before we update leader schedule, otherwise they may + // get dropped. + leader_schedule_cache.set_root(rooted_banks.last().unwrap()); + blockstore + .set_roots(rooted_slots.iter()) + .expect("Ledger set roots failed"); + set_bank_forks_root( + new_root, + bank_forks, + snapshot_controller, + highest_super_majority_root, + drop_bank_sender, + callback, + )?; + blockstore.slots_stats.mark_rooted(new_root); + if let Some(rpc_subscriptions) = rpc_subscriptions { + rpc_subscriptions.notify_roots(rooted_slots); + } + if let Some(sender) = bank_notification_sender { + let dependency_work = sender + .dependency_tracker + .as_ref() + .map(|s| s.get_current_declared_work()); + sender + .sender + .send((BankNotification::NewRootBank(root_bank), dependency_work)) + .unwrap_or_else(|err| warn!("bank_notification_sender failed: {err:?}")); + + if let Some(new_chain) = rooted_slots_with_parents { + let dependency_work = sender + .dependency_tracker + .as_ref() + .map(|s| s.get_current_declared_work()); + sender + .sender + .send((BankNotification::NewRootedChain(new_chain), dependency_work)) + .unwrap_or_else(|err| warn!("bank_notification_sender failed: {err:?}")); + } + } + info!("{my_pubkey}: new root {new_root}"); + Ok(()) +} + +/// Sets the bank forks root: +/// - Prune the program cache +/// - Prune bank forks and drop the removed banks +/// - Calls the callback for use in replay stage and tests +pub fn set_bank_forks_root( + new_root: Slot, + bank_forks: &RwLock, + snapshot_controller: Option<&SnapshotController>, + highest_super_majority_root: Option, + drop_bank_sender: &Sender>, + callback: CB, +) -> Result<(), SetRootError> +where + CB: FnOnce(&BankForks), +{ + bank_forks.read().unwrap().prune_program_cache(new_root); + let removed_banks = bank_forks.write().unwrap().set_root( + new_root, + snapshot_controller, + highest_super_majority_root, + )?; + + drop_bank_sender + .send(removed_banks) + .unwrap_or_else(|err| warn!("bank drop failed: {err:?}")); + + let r_bank_forks = bank_forks.read().unwrap(); + callback(&r_bank_forks); + Ok(()) +} diff --git a/votor/src/vote_history.rs b/votor/src/vote_history.rs new file mode 100644 index 00000000000000..e120e46b595d86 --- /dev/null +++ b/votor/src/vote_history.rs @@ -0,0 +1,553 @@ +use { + super::vote_history_storage::{ + Result, SavedVoteHistory, SavedVoteHistoryVersions, VoteHistoryStorage, + }, + serde::{Deserialize, Serialize}, + solana_clock::Slot, + solana_hash::Hash, + solana_keypair::Keypair, + solana_pubkey::Pubkey, + solana_votor_messages::{consensus_message::Block, vote::Vote}, + std::collections::{hash_map::Entry, HashMap, HashSet}, + thiserror::Error, +}; + +#[cfg_attr(feature = "frozen-abi", derive(AbiExample))] +#[derive(PartialEq, Eq, Debug, Default, Clone, Copy, Serialize, Deserialize)] +pub(crate) enum BlockhashStatus { + /// No vote since restart + #[default] + Uninitialized, + /// Non voting validator + NonVoting, + /// Hot spare validator + HotSpare, + /// Successfully generated vote tx with blockhash + Blockhash(Slot, Hash), +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)] +pub enum VoteHistoryVersions { + Current(VoteHistory), +} +impl VoteHistoryVersions { + pub fn new_current(vote_history: VoteHistory) -> Self { + Self::Current(vote_history) + } + + pub fn convert_to_current(self) -> VoteHistory { + match self { + VoteHistoryVersions::Current(vote_history) => vote_history, + } + } +} + +#[cfg_attr( + feature = "frozen-abi", + derive(AbiExample), + frozen_abi(digest = "H9oKKcWpebSTPtnXG6Aetwb7434CrW21pxnrrusYVEPy") +)] +#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Default)] +pub struct VoteHistory { + /// The validator identity that cast votes + pub node_pubkey: Pubkey, + + /// The slots which this node has cast either a notarization or skip vote + voted: HashSet, + + /// The blocks for which this node has cast a notarization vote + /// In the format of slot, block_id, bank_hash + voted_notar: HashMap, + + /// The blocks for which this node has cast a notarization fallback + /// vote in this slot + voted_notar_fallback: HashMap>, + + /// The slots for which this node has cast a skip fallback vote + voted_skip_fallback: HashSet, + + /// The slots in which this node has cast at least one of: + /// - `SkipVote` + /// - `SkipFallback` + /// - `NotarizeFallback` + skipped: HashSet, + + /// The slots for which this node has cast a finalization vote. This node + /// will not cast any additional votes for these slots + its_over: HashSet, + + /// All votes cast for a `slot`, for use in refresh + votes_cast: HashMap>, + + /// Blocks which have a notarization certificate via the certificate pool + notarized_blocks: HashSet, + + /// Slots which have a parent ready condition via the certificate pool + parent_ready_slots: HashMap>, + + /// The latest root set by the voting loop. The above structures will not + /// contain votes for slots before `root` + root: Slot, +} + +impl VoteHistory { + pub fn new(node_pubkey: Pubkey, root: Slot) -> Self { + Self { + node_pubkey, + root, + ..Self::default() + } + } + + /// Have we cast a notarization or skip vote for `slot` + pub fn voted(&self, slot: Slot) -> bool { + assert!(slot >= self.root); + self.voted.contains(&slot) + } + + /// The block for which we voted notarize in slot `slot` + pub fn voted_notar(&self, slot: Slot) -> Option { + assert!(slot >= self.root); + self.voted_notar.get(&slot).copied() + } + + /// Whether we voted notarize fallback in `slot` for block `(block_id, bank_hash)` + pub fn voted_notar_fallback(&self, slot: Slot, block_id: Hash) -> bool { + assert!(slot >= self.root); + self.voted_notar_fallback + .get(&slot) + .is_some_and(|v| v.contains(&block_id)) + } + + /// Whether we voted skip fallback for `slot` + pub fn voted_skip_fallback(&self, slot: Slot) -> bool { + assert!(slot >= self.root); + self.voted_skip_fallback.contains(&slot) + } + + /// Have we cast any skip vote variation for `slot` + pub fn skipped(&self, slot: Slot) -> bool { + assert!(slot >= self.root); + self.skipped.contains(&slot) + } + + /// Have we casted a finalization vote for `slot` + pub fn its_over(&self, slot: Slot) -> bool { + assert!(slot >= self.root); + self.its_over.contains(&slot) + } + + /// All votes cast since `slot` excluding `slot`, for use in + /// refresh + pub fn votes_cast_since(&self, slot: Slot) -> Vec { + self.votes_cast + .iter() + .filter(|(&s, _)| s > slot) + .flat_map(|(_, votes)| votes.iter()) + .cloned() + .collect() + } + + /// Have we casted a bad window vote for `slot`: + /// - Skip + /// - Notarize fallback + /// - Skip fallback + pub fn bad_window(&self, slot: Slot) -> bool { + assert!(slot >= self.root); + self.skipped.contains(&slot) + || self.voted_notar_fallback.contains_key(&slot) + || self.voted_skip_fallback.contains(&slot) + } + + pub fn is_block_notarized(&self, block: &Block) -> bool { + self.notarized_blocks.contains(block) + } + + pub fn is_parent_ready(&self, slot: Slot, parent: &Block) -> bool { + self.parent_ready_slots + .get(&slot) + .is_some_and(|ps| ps.contains(parent)) + } + + /// The latest root slot set by the voting loop + pub fn root(&self) -> Slot { + self.root + } + + /// Add a new vote to the voting history + pub fn add_vote(&mut self, vote: Vote) { + assert!(vote.slot() >= self.root); + // TODO: these assert!s are for my debugging, can consider removing + // in final version + match vote { + Vote::Notarize(vote) => { + assert!(self.voted.insert(vote.slot())); + assert!(self + .voted_notar + .insert(vote.slot(), *vote.block_id()) + .is_none()); + } + Vote::Finalize(vote) => { + assert!(!self.skipped(vote.slot())); + self.its_over.insert(vote.slot()); + } + Vote::Skip(vote) => { + self.voted.insert(vote.slot()); + self.skipped.insert(vote.slot()); + } + Vote::NotarizeFallback(vote) => { + assert!(self.voted(vote.slot())); + assert!(!self.its_over(vote.slot())); + self.skipped.insert(vote.slot()); + self.voted_notar_fallback + .entry(vote.slot()) + .or_default() + .insert(*vote.block_id()); + } + Vote::SkipFallback(vote) => { + assert!(self.voted(vote.slot())); + assert!(!self.its_over(vote.slot())); + self.skipped.insert(vote.slot()); + self.voted_skip_fallback.insert(vote.slot()); + } + } + self.votes_cast.entry(vote.slot()).or_default().push(vote); + } + + /// Add a new notarized block + pub fn add_block_notarized(&mut self, block @ (slot, _): Block) { + if slot < self.root { + return; + } + self.notarized_blocks.insert(block); + } + + /// Add a new parent ready slot + /// + /// Returns true if the insertion was successful and this was the + /// first parent ready for this slot, indicating we should set timeouts. + pub fn add_parent_ready(&mut self, slot: Slot, parent: Block) -> bool { + if slot < self.root { + return false; + } + match self.parent_ready_slots.entry(slot) { + Entry::Occupied(mut entry) => { + entry.get_mut().insert(parent); + false + } + Entry::Vacant(entry) => { + entry.insert(HashSet::from([parent])); + true + } + } + } + + pub fn highest_parent_ready_slot(&self) -> Option { + self.parent_ready_slots.keys().max().copied() + } + + /// Sets the new root slot and cleans up outdated slots < `root` + pub fn set_root(&mut self, root: Slot) { + self.root = root; + self.voted.retain(|s| *s >= root); + self.voted_notar.retain(|s, _| *s >= root); + self.voted_notar_fallback.retain(|s, _| *s >= root); + self.voted_skip_fallback.retain(|s| *s >= root); + self.skipped.retain(|s| *s >= root); + self.its_over.retain(|s| *s >= root); + self.votes_cast.retain(|s, _| *s >= root); + self.notarized_blocks.retain(|(s, _)| *s >= root); + self.parent_ready_slots.retain(|s, _| *s >= root); + } + + #[allow(dead_code)] + /// Save the vote history to `vote_history_storage` signed by `node_keypair` + pub fn save( + &self, + vote_history_storage: &dyn VoteHistoryStorage, + node_keypair: &Keypair, + ) -> Result<()> { + let saved_vote_history = SavedVoteHistory::new(self, node_keypair)?; + vote_history_storage.store(&SavedVoteHistoryVersions::from(saved_vote_history))?; + Ok(()) + } + + /// Restore the saved vote history from `vote_history_storage` for `node_pubkey` + pub fn restore( + vote_history_storage: &dyn VoteHistoryStorage, + node_pubkey: &Pubkey, + ) -> Result { + vote_history_storage.load(node_pubkey) + } +} + +#[derive(Error, Debug)] +pub enum VoteHistoryError { + #[error("IO Error: {0}")] + IoError(#[from] std::io::Error), + + #[error("Serialization Error: {0}")] + SerializeError(#[from] bincode::Error), + + #[error("The signature on the saved vote history is invalid")] + InvalidSignature, + + #[error("The vote history does not match this validator: {0}")] + WrongVoteHistory(String), + + #[error("The vote history is useless because of new hard fork: {0}")] + HardFork(Slot), +} + +impl VoteHistoryError { + pub fn is_file_missing(&self) -> bool { + if let VoteHistoryError::IoError(io_err) = &self { + io_err.kind() == std::io::ErrorKind::NotFound + } else { + false + } + } +} + +#[cfg(test)] +mod test { + use { + super::*, crate::vote_history_storage::FileVoteHistoryStorage, solana_signer::Signer, + solana_votor_messages::vote::Vote, + }; + + // Votes cast since is kept in HashMap, so order is not guaranteed. + // This function checks that the votes are the same, regardless of order. + fn check_votes_cast_since(vote_history: &VoteHistory, slot: Slot, expected_votes: Vec) { + let votes = vote_history.votes_cast_since(slot); + assert_eq!(votes.len(), expected_votes.len()); + // This is correct because expected_votes has no duplicates + for vote in expected_votes { + assert!(votes.contains(&vote)); + } + } + + #[test] + fn test_add_votes() { + let mut vote_history = VoteHistory::new(Pubkey::new_unique(), 0); + // No votes for now + assert!(vote_history.votes_cast_since(0).is_empty()); + + // Vote Notarize on slot 1 + let block_id_1 = Hash::new_unique(); + let vote_notarize_1 = Vote::new_notarization_vote(1, block_id_1); + vote_history.add_vote(vote_notarize_1); + assert!(vote_history.voted(1)); + assert!(!vote_history.its_over(1)); + check_votes_cast_since(&vote_history, 0, vec![vote_notarize_1]); + assert_eq!(vote_history.voted_notar(1), Some(block_id_1)); + assert!(!vote_history.skipped(1)); + assert!(!vote_history.voted_notar_fallback(1, block_id_1)); + assert!(!vote_history.bad_window(1)); + + // Vote Finalize on slot 1 + let vote_finalize_1 = Vote::new_finalization_vote(1); + vote_history.add_vote(vote_finalize_1); + assert!(vote_history.voted(1)); + assert!(vote_history.its_over(1)); + check_votes_cast_since(&vote_history, 0, vec![vote_notarize_1, vote_finalize_1]); + assert!(!vote_history.bad_window(1)); + + // Vote Skip on slot 2 + let vote_skip_2 = Vote::new_skip_vote(2); + vote_history.add_vote(vote_skip_2); + assert!(vote_history.voted(2)); + assert!(vote_history.skipped(2)); + check_votes_cast_since( + &vote_history, + 0, + vec![vote_notarize_1, vote_finalize_1, vote_skip_2], + ); + assert_eq!(vote_history.voted_notar(2), None); + assert!(!vote_history.its_over(2)); + assert!(vote_history.bad_window(2)); + + // Now vote NotarizeFallback on slot 2 + let block_id_2 = Hash::new_unique(); + let vote_notarize_fallback_2 = Vote::new_notarization_fallback_vote(2, block_id_2); + vote_history.add_vote(vote_notarize_fallback_2); + assert!(vote_history.voted(2)); + assert!(vote_history.skipped(2)); + assert_eq!(vote_history.voted_notar(2), None); + assert!(vote_history.voted_notar_fallback(2, block_id_2)); + check_votes_cast_since( + &vote_history, + 0, + vec![ + vote_notarize_1, + vote_finalize_1, + vote_skip_2, + vote_notarize_fallback_2, + ], + ); + assert!(!vote_history.its_over(2)); + assert!(vote_history.bad_window(2)); + + // Vote Notarize on slot 3 + let block_id_3 = Hash::new_unique(); + let vote_notarize_3 = Vote::new_notarization_vote(3, block_id_3); + vote_history.add_vote(vote_notarize_3); + assert!(vote_history.voted(3)); + assert!(!vote_history.skipped(3)); + assert_eq!(vote_history.voted_notar(3), Some(block_id_3)); + assert!(!vote_history.voted_notar_fallback(3, block_id_3)); + check_votes_cast_since( + &vote_history, + 0, + vec![ + vote_notarize_1, + vote_finalize_1, + vote_skip_2, + vote_notarize_fallback_2, + vote_notarize_3, + ], + ); + assert!(!vote_history.its_over(3)); + assert!(!vote_history.bad_window(3)); + + // Now vote SkipFallback on slot 3 + let vote_skip_fallback_3 = Vote::new_skip_fallback_vote(3); + vote_history.add_vote(vote_skip_fallback_3); + assert!(vote_history.voted(3)); + assert!(vote_history.skipped(3)); + assert_eq!(vote_history.voted_notar(3), Some(block_id_3)); + assert!(!vote_history.voted_notar_fallback(3, block_id_3)); + assert!(vote_history.voted_skip_fallback(3)); + check_votes_cast_since( + &vote_history, + 0, + vec![ + vote_notarize_1, + vote_finalize_1, + vote_skip_2, + vote_notarize_fallback_2, + vote_notarize_3, + vote_skip_fallback_3, + ], + ); + assert!(!vote_history.its_over(3)); + assert!(vote_history.bad_window(3)); + + // Set root on 2 + vote_history.set_root(2); + assert_eq!(vote_history.root(), 2); + check_votes_cast_since( + &vote_history, + 0, + vec![ + vote_skip_2, + vote_notarize_fallback_2, + vote_notarize_3, + vote_skip_fallback_3, + ], + ); + // set_root doesn't automatically set its_over to true + assert!(!vote_history.its_over(2)); + } + + #[test] + fn test_add_notarized_blocks() { + let mut vote_history = VoteHistory::new(Pubkey::new_unique(), 0); + let block_1 = (1, Hash::new_unique()); + assert!(!vote_history.is_block_notarized(&block_1)); + vote_history.add_block_notarized(block_1); + assert!(vote_history.is_block_notarized(&block_1)); + + let block_2 = (2, Hash::new_unique()); + assert!(!vote_history.is_block_notarized(&block_2)); + vote_history.add_block_notarized(block_2); + assert!(vote_history.is_block_notarized(&block_2)); + + vote_history.set_root(2); + assert_eq!(vote_history.root(), 2); + assert!(!vote_history.is_block_notarized(&block_1)); + assert!(vote_history.is_block_notarized(&block_2)); + + // Adding a block before root silently returns + vote_history.add_block_notarized(block_1); + assert!(!vote_history.is_block_notarized(&block_1)); + } + + #[test] + fn test_add_parent_ready() { + let mut vote_history = VoteHistory::new(Pubkey::new_unique(), 0); + assert_eq!(vote_history.highest_parent_ready_slot(), None); + let block_id_0 = (0, Hash::new_unique()); + vote_history.add_parent_ready(1, block_id_0); + assert!(vote_history.is_parent_ready(1, &block_id_0)); + assert_eq!(vote_history.highest_parent_ready_slot(), Some(1)); + + vote_history.set_root(1); + assert_eq!(vote_history.root(), 1); + assert!(vote_history.is_parent_ready(1, &block_id_0)); + assert_eq!(vote_history.highest_parent_ready_slot(), Some(1)); + + // Add parent ready for slot 2 + let block_id_2_0 = (1, Hash::new_unique()); + let block_id_2_1 = (1, Hash::new_unique()); + assert!(vote_history.add_parent_ready(2, block_id_2_0)); + assert!(vote_history.is_parent_ready(2, &block_id_2_0)); + assert_eq!(vote_history.highest_parent_ready_slot(), Some(2)); + assert!(!vote_history.add_parent_ready(2, block_id_2_1)); + assert!(vote_history.is_parent_ready(2, &block_id_2_1)); + assert!(!vote_history.add_parent_ready(2, block_id_0)); + assert!(vote_history.is_parent_ready(2, &block_id_0)); + + // Set root to 2 + vote_history.set_root(2); + assert_eq!(vote_history.root(), 2); + assert!(!vote_history.is_parent_ready(1, &block_id_0)); + assert!(vote_history.is_parent_ready(2, &block_id_2_0)); + assert!(vote_history.is_parent_ready(2, &block_id_2_1)); + assert!(vote_history.is_parent_ready(2, &block_id_0)); + assert_eq!(vote_history.highest_parent_ready_slot(), Some(2)); + + // Adding a parent ready for slot before root silently returns false + assert!(!vote_history.add_parent_ready(1, block_id_0)); + } + + #[test] + fn test_save_and_restore() { + let node_keypair = Keypair::new(); + let mut vote_history = VoteHistory::new(node_keypair.pubkey(), 0); + let vote_history_storage = FileVoteHistoryStorage::new(std::env::temp_dir()); + + // Add Notarize on 1 and Skip on 2 + let vote_1 = Vote::new_notarization_vote(1, Hash::new_unique()); + let vote_2 = Vote::new_skip_vote(2); + vote_history.add_vote(vote_1); + vote_history.add_vote(vote_2); + + // Save to storage + assert!(vote_history + .save(&vote_history_storage, &node_keypair) + .is_ok()); + // Restore from storage + let restored_vote_history = + VoteHistory::restore(&vote_history_storage, &node_keypair.pubkey()) + .ok() + .unwrap(); + check_votes_cast_since(&restored_vote_history, 0, vec![vote_1, vote_2]); + assert_eq!(restored_vote_history, vote_history); + + // Save should fail if you give wrong keypair + let error = vote_history + .save(&vote_history_storage, &Keypair::new()) + .err() + .unwrap(); + assert!(matches!(error, VoteHistoryError::WrongVoteHistory(_))); + assert!(!error.is_file_missing()); + + // Restore should fail if you give wrong pubkey + let error = VoteHistory::restore(&vote_history_storage, &Pubkey::new_unique()) + .err() + .unwrap(); + assert!(matches!(error, VoteHistoryError::IoError(_))); + assert!(error.is_file_missing()); + } +} diff --git a/votor/src/vote_history_storage.rs b/votor/src/vote_history_storage.rs new file mode 100644 index 00000000000000..1f5918465549ab --- /dev/null +++ b/votor/src/vote_history_storage.rs @@ -0,0 +1,238 @@ +use { + super::vote_history::*, + log::trace, + serde::{Deserialize, Serialize}, + solana_pubkey::Pubkey, + solana_signature::Signature, + solana_signer::Signer, + std::{ + fs::{self, File}, + io::{self, BufReader}, + path::PathBuf, + }, +}; + +pub type Result = std::result::Result; + +#[cfg_attr(feature = "frozen-abi", derive(AbiExample))] +#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq)] +pub enum SavedVoteHistoryVersions { + Current(SavedVoteHistory), +} + +impl SavedVoteHistoryVersions { + fn try_into_vote_history(&self, node_pubkey: &Pubkey) -> Result { + // This method assumes that `self` was just deserialized + assert_eq!(self.pubkey(), Pubkey::default()); + + let vote_history = match self { + SavedVoteHistoryVersions::Current(t) => { + if !t.signature.verify(node_pubkey.as_ref(), &t.data) { + return Err(VoteHistoryError::InvalidSignature); + } + bincode::deserialize(&t.data).map(VoteHistoryVersions::Current) + } + }; + vote_history + .map_err(|e| e.into()) + .and_then(|vote_history: VoteHistoryVersions| { + let vote_history = vote_history.convert_to_current(); + if vote_history.node_pubkey != *node_pubkey { + return Err(VoteHistoryError::WrongVoteHistory(format!( + "node_pubkey is {:?} but found vote history for {:?}", + node_pubkey, vote_history.node_pubkey + ))); + } + Ok(vote_history) + }) + } + + fn serialize_into(&self, file: &mut File) -> Result<()> { + bincode::serialize_into(file, self).map_err(|e| e.into()) + } + + fn pubkey(&self) -> Pubkey { + match self { + SavedVoteHistoryVersions::Current(t) => t.node_pubkey, + } + } +} + +impl From for SavedVoteHistoryVersions { + fn from(vote_history: SavedVoteHistory) -> SavedVoteHistoryVersions { + SavedVoteHistoryVersions::Current(vote_history) + } +} + +#[cfg_attr( + feature = "frozen-abi", + derive(AbiExample), + frozen_abi(digest = "42PkuFNWFBZ6X7QoPKtpLu6SY8bxmd6KVGJbVsNBm46m") +)] +#[derive(Default, Clone, Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct SavedVoteHistory { + signature: Signature, + #[serde(with = "serde_bytes")] + data: Vec, + #[serde(skip)] + node_pubkey: Pubkey, +} + +impl SavedVoteHistory { + pub fn new(vote_history: &VoteHistory, keypair: &T) -> Result { + let node_pubkey = keypair.pubkey(); + if vote_history.node_pubkey != node_pubkey { + return Err(VoteHistoryError::WrongVoteHistory(format!( + "node_pubkey is {:?} but found vote history for {:?}", + node_pubkey, vote_history.node_pubkey + ))); + } + + let data = bincode::serialize(&vote_history)?; + let signature = keypair.sign_message(&data); + Ok(Self { + signature, + data, + node_pubkey, + }) + } +} + +pub trait VoteHistoryStorage: Sync + Send { + fn load(&self, node_pubkey: &Pubkey) -> Result; + fn store(&self, saved_vote_history: &SavedVoteHistoryVersions) -> Result<()>; +} + +#[derive(Debug, Default, Clone, PartialEq, Eq)] +pub struct NullVoteHistoryStorage {} + +impl VoteHistoryStorage for NullVoteHistoryStorage { + fn load(&self, _node_pubkey: &Pubkey) -> Result { + Err(VoteHistoryError::IoError(io::Error::new( + io::ErrorKind::Other, + "NullVoteHistoryStorage::load() not available", + ))) + } + + fn store(&self, _saved_vote_history: &SavedVoteHistoryVersions) -> Result<()> { + Ok(()) + } +} + +#[derive(Debug, Default, Clone, PartialEq, Eq)] +pub struct FileVoteHistoryStorage { + pub vote_history_path: PathBuf, +} + +impl FileVoteHistoryStorage { + pub fn new(vote_history_path: PathBuf) -> Self { + Self { vote_history_path } + } + + pub fn filename(&self, node_pubkey: &Pubkey) -> PathBuf { + self.vote_history_path + .join(format!("vote_history-{node_pubkey}")) + .with_extension("bin") + } +} + +impl VoteHistoryStorage for FileVoteHistoryStorage { + fn load(&self, node_pubkey: &Pubkey) -> Result { + let filename = self.filename(node_pubkey); + trace!("load {}", filename.display()); + + // Ensure to create parent dir here, because restore() precedes save() always + fs::create_dir_all(filename.parent().unwrap())?; + + // New format + let file = File::open(&filename)?; + let mut stream = BufReader::new(file); + + bincode::deserialize_from(&mut stream) + .map_err(|e| e.into()) + .and_then(|t: SavedVoteHistoryVersions| t.try_into_vote_history(node_pubkey)) + } + + fn store(&self, saved_vote_history: &SavedVoteHistoryVersions) -> Result<()> { + let pubkey = saved_vote_history.pubkey(); + let filename = self.filename(&pubkey); + trace!("store: {}", filename.display()); + let new_filename = filename.with_extension("bin.new"); + + { + // overwrite anything if exists + let mut file = File::create(&new_filename)?; + saved_vote_history.serialize_into(&mut file)?; + // file.sync_all() hurts performance; pipeline sync-ing and submitting votes to the cluster! + } + fs::rename(&new_filename, &filename)?; + // self.path.parent().sync_all() hurts performance same as the above sync + Ok(()) + } +} + +#[cfg(test)] +mod test { + use {super::*, solana_keypair::Keypair, solana_votor_messages::vote::Vote}; + + #[test] + fn test_file_vote_history_storage() { + solana_logger::setup(); + let tmp_dir = std::env::temp_dir(); + let storage = FileVoteHistoryStorage::new(tmp_dir.clone()); + let keypair = Keypair::new(); + let pubkey = keypair.pubkey(); + assert_eq!( + storage.filename(&pubkey), + PathBuf::from(format!("{}/vote_history-{}.bin", tmp_dir.display(), pubkey)) + ); + + let mut vote_history = VoteHistory::new(pubkey, 0); + let saved_vote_history = SavedVoteHistory::new(&vote_history, &keypair).unwrap(); + let saved_vote_history_versions = SavedVoteHistoryVersions::from(saved_vote_history); + assert!(storage.store(&saved_vote_history_versions).is_ok()); + let restored_vote_history = storage.load(&pubkey).unwrap(); + assert_eq!(restored_vote_history.root(), 0); + + // Overwrite and check we get the new one + vote_history.set_root(1); + vote_history.add_vote(Vote::new_skip_vote(2)); + let saved_vote_history = SavedVoteHistory::new(&vote_history, &keypair).unwrap(); + let saved_vote_history_versions = SavedVoteHistoryVersions::from(saved_vote_history); + assert!(storage.store(&saved_vote_history_versions).is_ok()); + let restored_vote_history = storage.load(&pubkey).unwrap(); + assert_eq!(restored_vote_history.root(), 1); + assert_eq!( + restored_vote_history.votes_cast_since(0), + vote_history.votes_cast_since(0) + ); + + // Load with a wrong pubkey should fail + let error = storage.load(&Pubkey::new_unique()).err().unwrap(); + assert!(matches!(error, VoteHistoryError::IoError(_))); + // Move Vote history to a wrong location should fail + let original_path = storage.filename(&pubkey); + let new_pubkey = Pubkey::new_unique(); + let new_path = storage.filename(&new_pubkey); + // Copy the old file to new_path + fs::copy(&original_path, &new_path).unwrap(); + let error = storage.load(&new_pubkey).err().unwrap(); + assert!(matches!(error, VoteHistoryError::InvalidSignature)); + } + + #[test] + fn test_null_vote_history_storage() { + let storage = NullVoteHistoryStorage::default(); + let keypair = Keypair::new(); + let pubkey = keypair.pubkey(); + // NullVoteHistoryStorage::load() always fails + assert!(storage.load(&pubkey).is_err()); + + let vote_history = VoteHistory::new(pubkey, 0); + let saved_vote_history = SavedVoteHistory::new(&vote_history, &keypair).unwrap(); + let saved_vote_history_versions = SavedVoteHistoryVersions::from(saved_vote_history); + // NullVoteHistoryStorage::save() always succeeds + assert!(storage.store(&saved_vote_history_versions).is_ok()); + assert!(storage.load(&pubkey).is_err()); + } +} diff --git a/watchtower/Cargo.toml b/watchtower/Cargo.toml index 3b4518197c1de9..e0f31756328306 100644 --- a/watchtower/Cargo.toml +++ b/watchtower/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "agave-watchtower" -description = "Blockchain, Rebuilt for Scale" documentation = "https://docs.rs/agave-watchtower" version = { workspace = true } authors = { workspace = true } +description = { workspace = true } repository = { workspace = true } homepage = { workspace = true } license = { workspace = true } @@ -19,12 +19,12 @@ log = { workspace = true } solana-clap-utils = { workspace = true } solana-cli-config = { workspace = true } solana-cli-output = { workspace = true } -solana-hash = "=2.3.0" -solana-logger = "=2.3.1" +solana-hash = "=3.0.0" +solana-logger = "=3.0.0" solana-metrics = { workspace = true } -solana-native-token = "=2.2.2" +solana-native-token = "=3.0.0" solana-notifier = { workspace = true } -solana-pubkey = { version = "=2.4.0", default-features = false } +solana-pubkey = { version = "=3.0.0", default-features = false } solana-rpc-client = { workspace = true } solana-rpc-client-api = { workspace = true } solana-version = { workspace = true } diff --git a/watchtower/src/main.rs b/watchtower/src/main.rs index e4ae9495555d07..065748133d300a 100644 --- a/watchtower/src/main.rs +++ b/watchtower/src/main.rs @@ -12,7 +12,7 @@ use { solana_cli_output::display::format_labeled_address, solana_hash::Hash, solana_metrics::{datapoint_error, datapoint_info}, - solana_native_token::{sol_to_lamports, Sol}, + solana_native_token::{sol_str_to_lamports, Sol}, solana_notifier::{NotificationType, Notifier}, solana_pubkey::Pubkey, solana_rpc_client::rpc_client::RpcClient, @@ -44,7 +44,8 @@ fn get_config() -> Config { let matches = App::new(crate_name!()) .about(crate_description!()) .version(solana_version::version!()) - .after_help("ADDITIONAL HELP: + .after_help( + "ADDITIONAL HELP: To receive a Slack, Discord, PagerDuty and/or Telegram notification on sanity failure, define environment variables before running `agave-watchtower`: @@ -56,7 +57,8 @@ fn get_config() -> Config { export TELEGRAM_BOT_TOKEN=... export TELEGRAM_CHAT_ID=... - PagerDuty requires an Integration Key from the Events API v2 (Add this integration to your PagerDuty service to get this) + PagerDuty requires an Integration Key from the Events API v2 (Add this integration to your \ + PagerDuty service to get this) export PAGERDUTY_INTEGRATION_KEY=... @@ -64,7 +66,10 @@ fn get_config() -> Config { and a sending number owned by that account, define environment variable before running `agave-watchtower`: - export TWILIO_CONFIG='ACCOUNT=,TOKEN=,TO=,FROM='") + export \ + TWILIO_CONFIG='ACCOUNT=,TOKEN=,TO=,\ + FROM='", + ) .arg({ let arg = Arg::with_name("config_file") .short("C") @@ -96,7 +101,9 @@ fn get_config() -> Config { .multiple(true) .number_of_values(3) .conflicts_with("json_rpc_url") - .help("JSON RPC URLs for the cluster (takes exactly 3 values, conflicts with --url)"), + .help( + "JSON RPC URLs for the cluster (takes exactly 3 values, conflicts with --url)", + ), ) .arg( Arg::with_name("rpc_timeout") @@ -120,7 +127,7 @@ fn get_config() -> Config { .value_name("COUNT") .takes_value(true) .default_value("1") - .help("How many consecutive failures must occur to trigger a notification") + .help("How many consecutive failures must occur to trigger a notification"), ) .arg( Arg::with_name("validator_identities") @@ -129,7 +136,7 @@ fn get_config() -> Config { .takes_value(true) .validator(is_pubkey_or_keypair) .multiple(true) - .help("Validator identities to monitor for delinquency") + .help("Validator identities to monitor for delinquency"), ) .arg( Arg::with_name("minimum_validator_identity_balance") @@ -138,19 +145,22 @@ fn get_config() -> Config { .takes_value(true) .default_value("10") .validator(is_parsable::) - .help("Alert when the validator identity balance is less than this amount of SOL") + .help("Alert when the validator identity balance is less than this amount of SOL"), ) .arg( // Deprecated parameter, now always enabled Arg::with_name("no_duplicate_notifications") .long("no-duplicate-notifications") - .hidden(hidden_unless_forced()) + .hidden(hidden_unless_forced()), ) .arg( Arg::with_name("monitor_active_stake") .long("monitor-active-stake") .takes_value(false) - .help("Alert when the current stake for the cluster drops below the amount specified by --active-stake-alert-threshold"), + .help( + "Alert when the current stake for the cluster drops below the amount \ + specified by --active-stake-alert-threshold", + ), ) .arg( Arg::with_name("active_stake_alert_threshold") @@ -165,10 +175,11 @@ fn get_config() -> Config { Arg::with_name("ignore_http_bad_gateway") .long("ignore-http-bad-gateway") .takes_value(false) - .help("Ignore HTTP 502 Bad Gateway errors from the JSON RPC URL. \ - This flag can help reduce false positives, at the expense of \ - no alerting should a Bad Gateway error be a side effect of \ - the real problem") + .help( + "Ignore HTTP 502 Bad Gateway errors from the JSON RPC URL. This flag can help \ + reduce false positives, at the expense of no alerting should a Bad Gateway \ + error be a side effect of the real problem", + ), ) .arg( Arg::with_name("name_suffix") @@ -176,7 +187,7 @@ fn get_config() -> Config { .value_name("SUFFIX") .takes_value(true) .default_value("") - .help("Add this string into all notification messages after \"agave-watchtower\"") + .help("Add this string into all notification messages after \"agave-watchtower\""), ) .arg( Arg::with_name("acceptable_slot_range") @@ -185,7 +196,7 @@ fn get_config() -> Config { .takes_value(true) .default_value("50") .validator(is_parsable::) - .help("Acceptable range of slots for endpoints, checked at watchtower startup") + .help("Acceptable range of slots for endpoints, checked at watchtower startup"), ) .get_matches(); @@ -197,11 +208,10 @@ fn get_config() -> Config { let interval = Duration::from_secs(value_t_or_exit!(matches, "interval", u64)); let unhealthy_threshold = value_t_or_exit!(matches, "unhealthy_threshold", usize); - let minimum_validator_identity_balance = sol_to_lamports(value_t_or_exit!( - matches, - "minimum_validator_identity_balance", - f64 - )); + let minimum_validator_identity_balance = matches + .value_of("minimum_validator_identity_balance") + .and_then(sol_str_to_lamports) + .unwrap(); let json_rpc_urls = values_t!(matches, "json_rpc_urls", String).unwrap_or_else(|_| { vec![value_t!(matches, "json_rpc_url", String).unwrap_or_else(|_| config.json_rpc_url)] }); @@ -282,8 +292,8 @@ fn query_endpoint( match get_cluster_info(config, &endpoint.rpc_client) { Ok((transaction_count, recent_blockhash, vote_accounts, validator_balances)) => { - info!("Current transaction count: {}", transaction_count); - info!("Recent blockhash: {}", recent_blockhash); + info!("Current transaction count: {transaction_count}"); + info!("Recent blockhash: {recent_blockhash}"); info!("Current validator count: {}", vote_accounts.current.len()); info!( "Delinquent validator count: {}", @@ -385,12 +395,12 @@ fn query_endpoint( if let client_error::ErrorKind::Reqwest(reqwest_err) = err.kind() { if let Some(client_error::reqwest::StatusCode::BAD_GATEWAY) = reqwest_err.status() { if config.ignore_http_bad_gateway { - warn!("Error suppressed: {}", err); + warn!("Error suppressed: {err}"); return Ok(None); } } } - warn!("rpc-error: {}", err); + warn!("rpc-error: {err}"); Err(err) } } @@ -413,8 +423,8 @@ fn validate_endpoints( let slot = endpoint.rpc_client.get_slot()?; let genesis_hash = endpoint.rpc_client.get_genesis_hash()?; - info!("Genesis hash: {}", genesis_hash); - info!("Current slot: {}", slot); + info!("Genesis hash: {genesis_hash}"); + info!("Current slot: {slot}"); max_slot = max_slot.max(slot); min_slot = min_slot.min(slot); @@ -422,7 +432,7 @@ fn validate_endpoints( if let Some(common_genesis_hash) = opt_common_genesis_hash { if common_genesis_hash != genesis_hash { return Err( - "Endpoints don't aggree on genesis hash, have you mixed up clusters?".into(), + "Endpoints don't agree on genesis hash, have you mixed up clusters?".into(), ); } } else { @@ -458,7 +468,7 @@ fn main() -> Result<(), Box> { .collect(); if let Err(err) = validate_endpoints(&config, &endpoints) { - error!("Endpoint validation failed: {}", err); + error!("Endpoint validation failed: {err}"); std::process::exit(1); } @@ -510,9 +520,9 @@ fn main() -> Result<(), Box> { if failures.len() > 1 { failures.clear(); // Ignoring other failures when watchtower is unreliable - let watchtower_unreliable_msg = - "Watchtower is unreliable, RPC endpoints provide inconsistent information" - .into(); + let watchtower_unreliable_msg = "Watchtower is unreliable, RPC endpoints provide \ + inconsistent information" + .into(); failures.insert("watchtower-reliability", watchtower_unreliable_msg); } @@ -550,7 +560,7 @@ fn main() -> Result<(), Box> { "All clear after {}", humantime::format_duration(alarm_duration) ); - info!("{}", all_clear_msg); + info!("{all_clear_msg}"); notifier.send( &format!("agave-watchtower{}: {}", config.name_suffix, all_clear_msg), &NotificationType::Resolve { incident }, diff --git a/wen-restart/Cargo.toml b/wen-restart/Cargo.toml index fe7a678577fe6b..10862793309512 100644 --- a/wen-restart/Cargo.toml +++ b/wen-restart/Cargo.toml @@ -30,8 +30,8 @@ solana-ledger = { workspace = true } solana-pubkey = { workspace = true } solana-runtime = { workspace = true } solana-shred-version = { workspace = true } +solana-svm-timings = { workspace = true } solana-time-utils = { workspace = true } -solana-timings = { workspace = true } solana-vote = { workspace = true } solana-vote-interface = { workspace = true } solana-vote-program = { workspace = true } diff --git a/wen-restart/src/wen_restart.rs b/wen-restart/src/wen_restart.rs index bcda7d1257714f..d2bdc67b75811a 100644 --- a/wen-restart/src/wen_restart.rs +++ b/wen-restart/src/wen_restart.rs @@ -46,8 +46,8 @@ use { }, }, solana_shred_version::compute_shred_version, + solana_svm_timings::ExecuteTimings, solana_time_utils::timestamp, - solana_timings::ExecuteTimings, solana_vote::vote_transaction::VoteTransaction, std::{ collections::{HashMap, HashSet}, @@ -515,18 +515,6 @@ pub(crate) fn generate_snapshot( while abs_status.is_running() { std::thread::yield_now(); } - // Similar to waiting for ABS to stop, we also wait for the initial startup - // verification to complete. The startup verification runs in the background - // and verifies the snapshot's accounts are correct. We only want a - // single accounts hash calculation to run at a time, and since snapshot - // creation below will calculate the accounts hash, we wait for the startup - // verification to complete before proceeding. - new_root_bank - .rc - .accounts - .accounts_db - .verify_accounts_hash_in_bg - .join_background_thread(); let snapshot_config = snapshot_controller.snapshot_config(); let mut directory = &snapshot_config.full_snapshot_archives_dir; diff --git a/xdp/src/route.rs b/xdp/src/route.rs index 64c212d487a32c..96798511a9e579 100644 --- a/xdp/src/route.rs +++ b/xdp/src/route.rs @@ -247,6 +247,6 @@ mod tests { fn test_router() { let router = Router::new().unwrap(); let next_hop = router.route("1.1.1.1".parse().unwrap()).unwrap(); - eprintln!("{:?}", next_hop); + eprintln!("{next_hop:?}"); } } diff --git a/xdp/src/tx_loop.rs b/xdp/src/tx_loop.rs index c22ca0c7705193..ea333697c8366b 100644 --- a/xdp/src/tx_loop.rs +++ b/xdp/src/tx_loop.rs @@ -133,30 +133,6 @@ pub fn tx_loop, A: AsRef<[SocketAddr]>>( // packets. let mut batched_packets = 0; - // With some drivers, or always when we work in SKB mode, we need to explicitly kick the driver - // once we want the NIC to do something. - let kick = |ring: &TxRing>| { - if !ring.needs_wakeup() { - return; - } - - if let Err(e) = ring.wake() { - match e.raw_os_error() { - // these are non-fatal errors - Some(libc::EBUSY | libc::ENOBUFS | libc::EAGAIN) => {} - // this can temporarily happen with some drivers when changing - // settings (eg with ethtool) - Some(libc::ENETDOWN) => { - log::warn!("network interface is down") - } - // we should never get here, hopefully the driver recovers? - _ => { - log::error!("network interface driver error: {e:?}"); - } - } - } - }; - let mut timeouts = 0; loop { match receiver.try_recv() { @@ -193,25 +169,27 @@ pub fn tx_loop, A: AsRef<[SocketAddr]>>( for (addrs, payload) in batched_items.drain(..) { for addr in addrs.as_ref() { - // loop until we have space for the next packet - loop { - completion.sync(true); - // we haven't written any frames so we only need to sync the consumer position - ring.sync(false); - - // check if any frames were completed - while let Some(frame_offset) = completion.read() { - umem.release(frame_offset); - } - - if ring.available() > 0 && umem.available() > 0 { - // we have a frame and a slot in the ring - break; + if ring.available() == 0 || umem.available() == 0 { + // loop until we have space for the next packet + loop { + completion.sync(true); + // we haven't written any frames so we only need to sync the consumer position + ring.sync(false); + + // check if any frames were completed + while let Some(frame_offset) = completion.read() { + umem.release(frame_offset); + } + + if ring.available() > 0 && umem.available() > 0 { + // we have space for the next packet, break out of the loop + break; + } + + // queues are full, if NEEDS_WAKEUP is set kick the driver so hopefully it'll + // complete some work + kick(&ring); } - - // queues are full, if NEEDS_WAKEUP is set kick the driver so hopefully it'll - // complete some work - kick(&ring); } // at this point we're guaranteed to have a frame to write the next packet into and @@ -231,7 +209,8 @@ pub fn tx_loop, A: AsRef<[SocketAddr]>>( // sanity check that the address is routable through our NIC if next_hop.if_index != dev.if_index() { log::warn!( - "dropping packet: turbine peer {addr} must be routed through if_index: {} our if_index: {}", + "dropping packet: turbine peer {addr} must be routed through \ + if_index: {} our if_index: {}", next_hop.if_index, dev.if_index() ); @@ -240,7 +219,11 @@ pub fn tx_loop, A: AsRef<[SocketAddr]>>( // we need the MAC address to send the packet if next_hop.mac_addr.is_none() { - log::warn!("dropping packet: turbine peer {addr} must be routed through {} which has no known MAC address", next_hop.ip_addr); + log::warn!( + "dropping packet: turbine peer {addr} must be routed through {} which \ + has no known MAC address", + next_hop.ip_addr + ); skip = true; }; @@ -325,3 +308,33 @@ pub fn tx_loop, A: AsRef<[SocketAddr]>>( kick(&ring); } } + +// With some drivers, or always when we work in SKB mode, we need to explicitly kick the driver once +// we want the NIC to do something. +#[inline(always)] +fn kick(ring: &TxRing>) { + if !ring.needs_wakeup() { + return; + } + + if let Err(e) = ring.wake() { + kick_error(e); + } +} + +#[inline(never)] +fn kick_error(e: std::io::Error) { + match e.raw_os_error() { + // these are non-fatal errors + Some(libc::EBUSY | libc::ENOBUFS | libc::EAGAIN) => {} + // this can temporarily happen with some drivers when changing + // settings (eg with ethtool) + Some(libc::ENETDOWN) => { + log::warn!("network interface is down") + } + // we should never get here, hopefully the driver recovers? + _ => { + log::error!("network interface driver error: {e:?}"); + } + } +} diff --git a/zk-keygen/Cargo.toml b/zk-keygen/Cargo.toml deleted file mode 100644 index 1eb81240e44caa..00000000000000 --- a/zk-keygen/Cargo.toml +++ /dev/null @@ -1,40 +0,0 @@ -[package] -name = "solana-zk-keygen" -description = """ -Solana privacy-related key generation utility - -The tool currently supports two types of encryption keys that are used in the SPL Token-2022 program: - - ElGamal keypair that can be used for public key encryption - - AES128 key that can be used for an authenticated symmetric encryption (e.g. AES-GCM-SIV) -""" -publish = false -version = { workspace = true } -authors = { workspace = true } -repository = { workspace = true } -homepage = { workspace = true } -license = { workspace = true } -edition = { workspace = true } - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[[bin]] -name = "solana-zk-keygen" -path = "src/main.rs" - -[dependencies] -bs58 = { workspace = true } -clap = { version = "3.1.5", features = ["cargo", "derive"] } -dirs-next = { workspace = true } -solana-clap-v3-utils = { workspace = true, features = ["elgamal"] } -solana-remote-wallet = { workspace = true, features = ["default"] } -solana-seed-derivable = "=2.2.1" -solana-signer = "=2.2.1" -solana-version = { workspace = true } -solana-zk-token-sdk = { workspace = true } -thiserror = { workspace = true } -tiny-bip39 = { workspace = true } - -[dev-dependencies] -solana-pubkey = { workspace = true, features = ["rand"] } -tempfile = { workspace = true } diff --git a/zk-keygen/README.md b/zk-keygen/README.md new file mode 100644 index 00000000000000..354115e6fa6aad --- /dev/null +++ b/zk-keygen/README.md @@ -0,0 +1,3 @@ +# PLEASE READ: This repo no longer contains the Solana ZK-KEYGEN + +The solana-zk-keygen is currently developed at . diff --git a/zk-keygen/src/main.rs b/zk-keygen/src/main.rs deleted file mode 100644 index e63522038b98b8..00000000000000 --- a/zk-keygen/src/main.rs +++ /dev/null @@ -1,461 +0,0 @@ -use { - bip39::{Mnemonic, MnemonicType, Seed}, - clap::{crate_description, crate_name, Arg, ArgMatches, Command, PossibleValue}, - solana_clap_v3_utils::{ - input_parsers::{signer::SignerSourceParserBuilder, STDOUT_OUTFILE_TOKEN}, - keygen::{ - check_for_overwrite, - mnemonic::{acquire_passphrase_and_message, try_get_language, try_get_word_count}, - no_outfile_arg, KeyGenerationCommonArgs, NO_OUTFILE_ARG, - }, - keypair::{ - ae_key_from_path, ae_key_from_seed_phrase, elgamal_keypair_from_path, - elgamal_keypair_from_seed_phrase, SKIP_SEED_PHRASE_VALIDATION_ARG, - }, - DisplayError, - }, - solana_seed_derivable::SeedDerivable, - solana_signer::EncodableKey, - solana_zk_token_sdk::encryption::{auth_encryption::AeKey, elgamal::ElGamalKeypair}, - std::{error, str::FromStr}, - thiserror::Error, -}; - -fn output_encodable_key( - key: &K, - outfile: &str, - source: &str, -) -> Result<(), Box> { - if outfile == STDOUT_OUTFILE_TOKEN { - let mut stdout = std::io::stdout(); - key.write(&mut stdout)?; - } else { - key.write_to_file(outfile)?; - println!("Wrote {source} to {outfile}"); - } - Ok(()) -} - -fn app(crate_version: &str) -> Command { - Command::new(crate_name!()) - .about(crate_description!()) - .version(crate_version) - .subcommand_required(true) - .arg_required_else_help(true) - .subcommand( - Command::new("new") - .about("Generate a new encryption key/keypair file from a random seed phrase and optional BIP39 passphrase") - .disable_version_flag(true) - .arg( - Arg::new("type") - .index(1) - .takes_value(true) - .value_parser(clap::value_parser!(KeyType)) - .value_name("TYPE") - .required(true) - .help("The type of encryption key") - ) - .arg( - Arg::new("outfile") - .short('o') - .long("outfile") - .value_name("FILEPATH") - .takes_value(true) - .help("Path to generated file"), - ) - .arg( - Arg::new("force") - .long("force") - .help("Overwrite the output file if it exists"), - ) - .arg( - Arg::new("silent") - .long("silent") - .help("Do not display seed phrase. Useful when piping output to other programs that prompt for user input, like gpg"), - ) - .key_generation_common_args() - .arg(no_outfile_arg().conflicts_with_all(&["outfile", "silent"])) - ) - .subcommand( - Command::new("pubkey") - .about("Display the pubkey from a keypair file") - .disable_version_flag(true) - .arg( - Arg::new("type") - .index(1) - .takes_value(true) - .value_parser([ - PossibleValue::new("elgamal") - ]) - .value_name("TYPE") - .required(true) - .help("The type of keypair") - ) - .arg( - Arg::new("keypair") - .index(2) - .value_name("KEYPAIR") - .takes_value(true) - .help("Filepath or URL to a keypair"), - ) - .arg( - Arg::new(SKIP_SEED_PHRASE_VALIDATION_ARG.name) - .long(SKIP_SEED_PHRASE_VALIDATION_ARG.long) - .help(SKIP_SEED_PHRASE_VALIDATION_ARG.help), - ) - ) - .subcommand( - Command::new("recover") - .about("Recover keypair from seed phrase and optional BIP39 passphrase") - .disable_version_flag(true) - .arg( - Arg::new("type") - .index(1) - .takes_value(true) - .value_parser(clap::value_parser!(KeyType)) - .value_name("TYPE") - .required(true) - .help("The type of keypair") - ) - .arg( - Arg::new("prompt_signer") - .index(2) - .value_name("KEYPAIR") - .takes_value(true) - .value_parser(SignerSourceParserBuilder::default().allow_prompt().allow_legacy().build()) - .help("`prompt:` URI scheme or `ASK` keyword"), - ) - .arg( - Arg::new("outfile") - .short('o') - .long("outfile") - .value_name("FILEPATH") - .takes_value(true) - .help("Path to generated file"), - ) - .arg( - Arg::new("force") - .long("force") - .help("Overwrite the output file if it exists"), - ) - .arg( - Arg::new(SKIP_SEED_PHRASE_VALIDATION_ARG.name) - .long(SKIP_SEED_PHRASE_VALIDATION_ARG.long) - .help(SKIP_SEED_PHRASE_VALIDATION_ARG.help), - ), - ) -} - -fn main() -> Result<(), Box> { - let matches = app(solana_version::version!()) - .try_get_matches() - .unwrap_or_else(|e| e.exit()); - do_main(&matches).map_err(|err| DisplayError::new_as_boxed(err).into()) -} - -fn do_main(matches: &ArgMatches) -> Result<(), Box> { - let subcommand = matches.subcommand().unwrap(); - match subcommand { - ("new", matches) => { - let key_type = matches.try_get_one::("type")?.unwrap(); - - let mut path = dirs_next::home_dir().expect("home directory"); - let outfile = if matches.try_contains_id("outfile")? { - matches.get_one::("outfile").map(|s| s.as_str()) - } else if matches.try_contains_id(NO_OUTFILE_ARG.name)? { - None - } else { - path.extend([".config", "solana", key_type.default_file_name()]); - Some(path.to_str().unwrap()) - }; - - match outfile { - Some(STDOUT_OUTFILE_TOKEN) => (), - Some(outfile) => check_for_overwrite(outfile, matches)?, - None => (), - } - - let word_count = try_get_word_count(matches)?.unwrap(); - let mnemonic_type = MnemonicType::for_word_count(word_count)?; - let language = try_get_language(matches)?.unwrap(); - - let mnemonic = Mnemonic::new(mnemonic_type, language); - let (passphrase, passphrase_message) = acquire_passphrase_and_message(matches).unwrap(); - let seed = Seed::new(&mnemonic, &passphrase); - - let silent = matches.try_contains_id("silent")?; - - match key_type { - KeyType::ElGamal => { - if !silent { - eprintln!("Generating a new ElGamal keypair"); - } - - let elgamal_keypair = ElGamalKeypair::from_seed(seed.as_bytes())?; - if let Some(outfile) = outfile { - output_encodable_key(&elgamal_keypair, outfile, "new ElGamal keypair") - .map_err(|err| format!("Unable to write {outfile}: {err}"))?; - } - - if !silent { - let phrase: &str = mnemonic.phrase(); - let divider = String::from_utf8(vec![b'='; phrase.len()]).unwrap(); - println!( - "{}\npubkey: {}\n{}\nSave this seed phrase{} to recover your new ElGamal keypair:\n{}\n{}", - ÷r, elgamal_keypair.pubkey(), ÷r, passphrase_message, phrase, ÷r - ); - } - } - KeyType::Aes128 => { - if !silent { - eprintln!("Generating a new AES128 encryption key"); - } - - let aes_key = AeKey::from_seed(seed.as_bytes())?; - if let Some(outfile) = outfile { - output_encodable_key(&aes_key, outfile, "new AES128 key") - .map_err(|err| format!("Unable to write {outfile}: {err}"))?; - } - - if !silent { - let phrase: &str = mnemonic.phrase(); - let divider = String::from_utf8(vec![b'='; phrase.len()]).unwrap(); - println!( - "{}\nSave this seed phrase{} to recover your new AES128 key:\n{}\n{}", - ÷r, passphrase_message, phrase, ÷r - ); - } - } - } - } - ("pubkey", matches) => { - let key_type = matches.try_get_one::("type")?.unwrap(); - let key_type = if key_type == "elgamal" { - KeyType::ElGamal - } else { - return Err("unsupported key type".into()); - }; - - let mut path = dirs_next::home_dir().expect("home directory"); - let path = if matches.try_contains_id("keypair")? { - matches.get_one::("keypair").unwrap() - } else { - path.extend([".config", "solana", key_type.default_file_name()]); - path.to_str().unwrap() - }; - - // wrap the logic inside a match statement in case more keys are supported in the - // future - match key_type { - KeyType::ElGamal => { - let elgamal_keypair = - elgamal_keypair_from_path(matches, path, "pubkey recovery", false)?; - let elgamal_pubkey = elgamal_keypair.pubkey(); - println!("{elgamal_pubkey}"); - } - _ => unreachable!(), - } - } - ("recover", matches) => { - let key_type = matches.try_get_one::("type")?.unwrap(); - - let mut path = dirs_next::home_dir().expect("home directory"); - let outfile = if matches.try_contains_id("outfile")? { - matches.get_one::("outfile").unwrap() - } else { - path.extend([".config", "solana", key_type.default_file_name()]); - path.to_str().unwrap() - }; - - if outfile != STDOUT_OUTFILE_TOKEN { - check_for_overwrite(outfile, matches)?; - } - - let name = "recover"; - match key_type { - KeyType::ElGamal => { - let keypair = if let Some(path) = - matches.try_get_one::("prompt_signer")? - { - elgamal_keypair_from_path(matches, path, name, true)? - } else { - let skip_validation = - matches.try_contains_id(SKIP_SEED_PHRASE_VALIDATION_ARG.name)?; - elgamal_keypair_from_seed_phrase(name, skip_validation, true, None, true)? - }; - output_encodable_key(&keypair, outfile, "recovered ElGamal keypair")?; - } - KeyType::Aes128 => { - let key = if let Some(path) = matches.try_get_one::("prompt_signer")? { - ae_key_from_path(matches, path, name)? - } else { - let skip_validation = - matches.try_contains_id(SKIP_SEED_PHRASE_VALIDATION_ARG.name)?; - ae_key_from_seed_phrase(name, skip_validation, None, true)? - }; - output_encodable_key(&key, outfile, "recovered AES128 key")?; - } - } - } - _ => unreachable!(), - } - - Ok(()) -} - -#[derive(Clone)] -enum KeyType { - ElGamal, - Aes128, -} - -impl KeyType { - fn default_file_name(&self) -> &str { - match self { - KeyType::ElGamal => "elgamal.json", - KeyType::Aes128 => "aes128.json", - } - } -} - -#[derive(Debug, Error)] -#[error("unsupported key type: \"{0}\"")] -pub struct KeyTypeError(pub String); - -impl FromStr for KeyType { - type Err = KeyTypeError; - fn from_str(s: &str) -> Result { - let s = s.to_ascii_lowercase(); - match s.as_str() { - "elgamal" => Ok(Self::ElGamal), - "aes128" => Ok(Self::Aes128), - _ => Err(KeyTypeError(s)), - } - } -} - -#[cfg(test)] -mod tests { - use { - super::*, - solana_pubkey::Pubkey, - tempfile::{tempdir, TempDir}, - }; - - fn process_test_command(args: &[&str]) -> Result<(), Box> { - let solana_version = solana_version::version!(); - let app_matches = app(solana_version).get_matches_from(args); - do_main(&app_matches) - } - - fn tmp_outfile_path(out_dir: &TempDir, name: &str) -> String { - let path = out_dir.path().join(name); - path.into_os_string().into_string().unwrap() - } - - #[test] - fn test_arguments() { - let solana_version = solana_version::version!(); - - // run clap internal assert statements - app(solana_version).debug_assert(); - } - - #[test] - fn test_new_elgamal() { - let outfile_dir = tempdir().unwrap(); - // use `Pubkey::new_unique()` to generate names for temporary key files - let outfile_path = tmp_outfile_path(&outfile_dir, &Pubkey::new_unique().to_string()); - - // general success case - process_test_command(&[ - "solana-zk-keygen", - "new", - "elgamal", - "--outfile", - &outfile_path, - "--no-bip39-passphrase", - ]) - .unwrap(); - - // refuse to overwrite file - let result = process_test_command(&[ - "solana-zk-keygen", - "new", - "elgamal", - "--outfile", - &outfile_path, - "--no-bip39-passphrase", - ]) - .unwrap_err() - .to_string(); - - let expected = format!("Refusing to overwrite {outfile_path} without --force flag"); - assert_eq!(result, expected); - - // no outfile - process_test_command(&[ - "solana-keygen", - "new", - "elgamal", - "--no-bip39-passphrase", - "--no-outfile", - ]) - .unwrap(); - } - - #[test] - fn test_new_aes128() { - let outfile_dir = tempdir().unwrap(); - // use `Pubkey::new_unique()` to generate names for temporary key files - let outfile_path = tmp_outfile_path(&outfile_dir, &Pubkey::new_unique().to_string()); - - // general success case - process_test_command(&[ - "solana-zk-keygen", - "new", - "aes128", - "--outfile", - &outfile_path, - "--no-bip39-passphrase", - ]) - .unwrap(); - - // refuse to overwrite file - let result = process_test_command(&[ - "solana-zk-keygen", - "new", - "aes128", - "--outfile", - &outfile_path, - "--no-bip39-passphrase", - ]) - .unwrap_err() - .to_string(); - - let expected = format!("Refusing to overwrite {outfile_path} without --force flag"); - assert_eq!(result, expected); - - // no outfile - process_test_command(&[ - "solana-keygen", - "new", - "aes128", - "--no-bip39-passphrase", - "--no-outfile", - ]) - .unwrap(); - } - - #[test] - fn test_pubkey() { - let keypair_out_dir = tempdir().unwrap(); - // use `Pubkey::new_unique()` to generate names for temporary key files - let keypair_path = tmp_outfile_path(&keypair_out_dir, &Pubkey::new_unique().to_string()); - - let keypair = ElGamalKeypair::new_rand(); - keypair.write_to_file(&keypair_path).unwrap(); - - process_test_command(&["solana-keygen", "pubkey", "elgamal", &keypair_path]).unwrap(); - } -} diff --git a/zk-token-sdk/README.md b/zk-token-sdk/README.md new file mode 100644 index 00000000000000..76948bb8c9476f --- /dev/null +++ b/zk-token-sdk/README.md @@ -0,0 +1,7 @@ +# zk-token-sdk (DEPRECATED) + +**This crate is deprecated and no longer maintained.** + +This crate has been replaced by the [zk-sdk](https://github.com/solana-program/zk-elgamal-proof) crate. + +For the latest updates and features, please use the new crate. diff --git a/zk-token-sdk/src/instruction/fee_sigma.rs b/zk-token-sdk/src/instruction/fee_sigma.rs index adddef5f64fc9d..a0c0161653d338 100644 --- a/zk-token-sdk/src/instruction/fee_sigma.rs +++ b/zk-token-sdk/src/instruction/fee_sigma.rs @@ -6,7 +6,7 @@ //! A formal documentation of how transfer fees and fee sigma proof are computed can be found in //! the [`ZK Token proof`] program documentation. //! -//! [`ZK Token proof`]: https://docs.solanalabs.com/runtime/zk-token-proof +//! [`ZK Token proof`]: https://docs.anza.xyz/runtime/zk-elgamal-proof #[cfg(not(target_os = "solana"))] use { @@ -43,7 +43,7 @@ pub struct FeeSigmaProofData { /// /// We refer to [`ZK Token proof`] for the formal details on how the fee sigma proof is computed. /// -/// [`ZK Token proof`]: https://docs.solanalabs.com/runtime/zk-token-proof +/// [`ZK Token proof`]: https://docs.anza.xyz/runtime/zk-elgamal-proof #[derive(Clone, Copy, Pod, Zeroable)] #[repr(C)] pub struct FeeSigmaProofContext { diff --git a/zk-token-sdk/src/instruction/mod.rs b/zk-token-sdk/src/instruction/mod.rs index 7512638816065f..d78b0f1623e4c6 100644 --- a/zk-token-sdk/src/instruction/mod.rs +++ b/zk-token-sdk/src/instruction/mod.rs @@ -1,6 +1,6 @@ //! The instruction data types for the [`ZK Token proof`] instruction. //! -//! [`ZK Token proof`]: https://docs.solanalabs.com/runtime/zk-token-proof +//! [`ZK Token proof`]: https://docs.anza.xyz/runtime/zk-elgamal-proof pub mod batched_grouped_ciphertext_validity; pub mod batched_range_proof; diff --git a/zk-token-sdk/src/lib.rs b/zk-token-sdk/src/lib.rs index 83d8b188366e7d..cd9467a0a143d4 100644 --- a/zk-token-sdk/src/lib.rs +++ b/zk-token-sdk/src/lib.rs @@ -1,3 +1,10 @@ +// Deprecate the crate +#![deprecated( + since = "3.0.0", + note = "use the `solana-zk-sdk` instead: https://github.com/solana-program/zk-elgamal-proof/tree/main/zk-sdk" +)] +// Allow deprecated warnings to be suppressed in the crate +#![allow(deprecated)] #![allow(clippy::arithmetic_side_effects, clippy::op_ref)] // The warning `clippy::op_ref` is disabled to allow efficient operator arithmetic of structs that diff --git a/zk-token-sdk/src/sigma_proofs/fee_proof.rs b/zk-token-sdk/src/sigma_proofs/fee_proof.rs index b847a5b955d5ac..ac9751646be691 100644 --- a/zk-token-sdk/src/sigma_proofs/fee_proof.rs +++ b/zk-token-sdk/src/sigma_proofs/fee_proof.rs @@ -8,7 +8,7 @@ //! The protocol guarantees computational soundness (by the hardness of discrete log) and perfect //! zero-knowledge in the random oracle model. //! -//! [`ZK Token proof program`]: https://docs.solanalabs.com/runtime/zk-token-proof +//! [`ZK Token proof program`]: https://docs.anza.xyz/runtime/zk-elgamal-proof #[cfg(not(target_os = "solana"))] use { diff --git a/zk-token-sdk/src/sigma_proofs/mod.rs b/zk-token-sdk/src/sigma_proofs/mod.rs index 38a6dde20816ca..a07bfd30d7b154 100644 --- a/zk-token-sdk/src/sigma_proofs/mod.rs +++ b/zk-token-sdk/src/sigma_proofs/mod.rs @@ -3,7 +3,7 @@ //! Formal documentation and security proofs for the sigma proofs in this module can be found in //! [`ZK Token proof`] program documentation. //! -//! [`ZK Token proof`]: https://docs.solanalabs.com/runtime/zk-token-proof +//! [`ZK Token proof`]: https://docs.anza.xyz/runtime/zk-elgamal-proof pub mod errors; diff --git a/zk-token-sdk/src/zk_token_proof_instruction.rs b/zk-token-sdk/src/zk_token_proof_instruction.rs index 1d25a8bbb91897..e8f7c2aef1ef06 100644 --- a/zk-token-sdk/src/zk_token_proof_instruction.rs +++ b/zk-token-sdk/src/zk_token_proof_instruction.rs @@ -29,8 +29,8 @@ //! this instruction must be signed by the context account's owner. This instruction can be used by //! the account owner to reclaim lamports for storage. //! -//! [`ZK Token proof`]: https://docs.solanalabs.com/runtime/zk-token-proof -//! [`context-state`]: https://docs.solanalabs.com/runtime/zk-token-proof#context-data +//! [`ZK Token proof`]: https://docs.anza.xyz/runtime/zk-elgamal-proof +//! [`context-state`]: https://docs.anza.xyz/runtime/zk-elgamal-proof#context-data pub use crate::instruction::*; use { diff --git a/zk-token-sdk/src/zk_token_proof_program.rs b/zk-token-sdk/src/zk_token_proof_program.rs index 1a2cdc29d1b7f9..dd3bab2750ffaa 100644 --- a/zk-token-sdk/src/zk_token_proof_program.rs +++ b/zk-token-sdk/src/zk_token_proof_program.rs @@ -5,7 +5,7 @@ //! the program as well as the technical details of some of the proof instructions can be found in //! the [`ZK Token proof`] documentation. //! -//! [`ZK Token proof`]: https://docs.solanalabs.com/runtime/zk-token-proof +//! [`ZK Token proof`]: https://docs.anza.xyz/runtime/zk-elgamal-proof // Program Id of the ZkToken Proof program pub use solana_sdk_ids::zk_token_proof_program::{check_id, id, ID};