diff --git a/.config/nextest.toml b/.config/nextest.toml
index 381428729ddeff..082fee67c02ee7 100644
--- a/.config/nextest.toml
+++ b/.config/nextest.toml
@@ -1,6 +1,9 @@
[store]
dir = "target/nextest"
+[test-groups]
+build-sbf = { max-threads = 1 }
+
[profile.ci]
failure-output = "immediate-final"
slow-timeout = { period = "60s", terminate-after = 1 }
@@ -25,13 +28,17 @@ threads-required = "num-cpus"
filter = "package(solana-gossip) & test(/^test_star_network_push_ring_200/)"
threads-required = "num-cpus"
+[[profile.ci.overrides]]
+filter = "package(solana-gossip) & test(/^gossip_ring/)"
+threads-required = "num-cpus"
+
[[profile.ci.overrides]]
filter = "package(solana-gossip) & test(/^cluster_info::tests::new_with_external_ip_test_random/)"
threads-required = "num-cpus"
[[profile.ci.overrides]]
filter = "package(solana-cargo-build-sbf)"
-threads-required = "num-cpus"
+test-group = "build-sbf"
[[profile.ci.overrides]]
filter = 'package(solana-local-cluster) & test(/^test_kill_partition_switch_threshold_progress$/)'
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 74c0219a170d37..18221ff8244373 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -1,16 +1,21 @@
-# The SVM team is in the process of migrating these subdirectories to a new
-# repo and would like to avoid introducing dependencies in the meantime.
+# Please keep this file sorted
+/bloom/ @anza-xyz/networking
/compute-budget-instruction/ @anza-xyz/fees
+/core/src/repair/ @anza-xyz/networking
/fee/ @anza-xyz/fees
+/gossip/ @anza-xyz/networking
/log-collector/ @anza-xyz/svm
+/net-utils/ @anza-xyz/networking
/program-runtime/ @anza-xyz/svm
/programs/bpf_loader/ @anza-xyz/svm
/programs/loader-v4/ @anza-xyz/svm
/runtime-transaction/ @anza-xyz/tx-metadata
+/svm-callback/ @anza-xyz/svm
/svm-conformance/ @anza-xyz/svm
/svm-transaction/ @anza-xyz/svm
/svm/ @anza-xyz/svm
-/svm/examples/Cargo.lock
-/svm-callback/ @anza-xyz/svm
+/tls-utils/ @anza-xyz/networking
/transaction-context/ @anza-xyz/svm
/transaction-view/ @anza-xyz/tx-metadata
+/turbine/ @anza-xyz/networking
+/xdp/ @anza-xyz/networking
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index 95e3fb34445ee0..d4a244f1711830 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -5,12 +5,17 @@
version: 2
updates:
-- package-ecosystem: cargo
- directory: "/"
- schedule:
- interval: daily
- time: "01:00"
- timezone: America/Los_Angeles
- #labels:
- # - "automerge"
- open-pull-requests-limit: 6
+ - package-ecosystem: cargo
+ directory: "/"
+ schedule:
+ interval: daily
+ time: "01:00"
+ timezone: America/Los_Angeles
+ open-pull-requests-limit: 6
+ - package-ecosystem: "github-actions"
+ directory: "/"
+ schedule:
+ interval: daily
+ time: "01:00"
+ timezone: America/Los_Angeles
+ open-pull-requests-limit: 3
diff --git a/.github/label-actions.yml b/.github/label-actions.yml
index 029ec96a2dc9fc..8a67d20c3de3ce 100644
--- a/.github/label-actions.yml
+++ b/.github/label-actions.yml
@@ -16,11 +16,11 @@ question:
this is a bug with Solana itself, please post your question to the Solana Stack Exchange
using this link: https://solana.stackexchange.com/questions/ask
-
+
---
_This
- [automated message](https://github.com/solana-labs/solana/blob/master/.github/label-actions.yml)
+ [automated message](https://github.com/anza-xyz/agave/blob/master/.github/label-actions.yml)
is a result of having added the ‘question’ tag_.
# Close the issue
diff --git a/.github/workflows/add-team-to-ghsa.yml b/.github/workflows/add-team-to-ghsa.yml
index 5e5f2f70881050..3a66048b732103 100644
--- a/.github/workflows/add-team-to-ghsa.yml
+++ b/.github/workflows/add-team-to-ghsa.yml
@@ -11,7 +11,7 @@ jobs:
runs-on: ubuntu-24.04
steps:
- name: Checkout
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
with:
ref: master
- name: Run script
diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml
index 2cf9a5895ff347..0078e9c6788874 100644
--- a/.github/workflows/benchmark.yml
+++ b/.github/workflows/benchmark.yml
@@ -69,7 +69,7 @@ jobs:
steps:
- name: Checkout
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
- name: Before Command
if: ${{ matrix.test.before_command != '' }}
diff --git a/.github/workflows/cargo.yml b/.github/workflows/cargo.yml
index 2a8b94ad2dd376..c4b3a7b644125a 100644
--- a/.github/workflows/cargo.yml
+++ b/.github/workflows/cargo.yml
@@ -54,7 +54,7 @@ jobs:
apk update
apk add bash git
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v5
- uses: mozilla-actions/sccache-action@v0.0.9
with:
diff --git a/.github/workflows/changelog-label.yml b/.github/workflows/changelog-label.yml
index ffd8ec21033ef8..30c77cea8a2a13 100644
--- a/.github/workflows/changelog-label.yml
+++ b/.github/workflows/changelog-label.yml
@@ -10,7 +10,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Check if changes to CHANGELOG.md
diff --git a/.github/workflows/client-targets.yml b/.github/workflows/client-targets.yml
index 1b875fed808a9d..a3ef05860a21f2 100644
--- a/.github/workflows/client-targets.yml
+++ b/.github/workflows/client-targets.yml
@@ -31,7 +31,7 @@ jobs:
- armv7-linux-androideabi
runs-on: ${{ matrix.os }}
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v5
# This can be removed once cargo-ndk >= 3.5.4 is used.
- name: Setup environment for Android NDK
@@ -61,7 +61,7 @@ jobs:
- x86_64-apple-darwin
runs-on: ${{ matrix.os }}
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v5
- name: Setup Rust
run: |
diff --git a/.github/workflows/crate-check.yml b/.github/workflows/crate-check.yml
index c8bbcf79a26496..8ed80637fd7617 100644
--- a/.github/workflows/crate-check.yml
+++ b/.github/workflows/crate-check.yml
@@ -18,7 +18,7 @@ jobs:
if: github.repository == 'anza-xyz/agave'
runs-on: ubuntu-22.04
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v5
with:
fetch-depth: 0
diff --git a/.github/workflows/dependabot-pr.yml b/.github/workflows/dependabot-pr.yml
index 70f7e939e35f25..1b06354332045e 100644
--- a/.github/workflows/dependabot-pr.yml
+++ b/.github/workflows/dependabot-pr.yml
@@ -12,7 +12,7 @@ jobs:
if: github.triggering_actor == 'dependabot[bot]'
steps:
- name: checkout
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
with:
ref: ${{ github.event.pull_request.head.ref }}
token: ${{ secrets.PAT }}
diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml
index f6bc0049614d05..26e456a8c7b45b 100644
--- a/.github/workflows/docs.yml
+++ b/.github/workflows/docs.yml
@@ -20,7 +20,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: Checkout
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
with:
fetch-depth: 0
@@ -77,12 +77,12 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: Checkout
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
- name: Setup Node
- uses: actions/setup-node@v4
+ uses: actions/setup-node@v5
with:
- node-version: 22
+ node-version: 24
- name: Build
working-directory: docs
diff --git a/.github/workflows/downstream-project-anchor.yml b/.github/workflows/downstream-project-anchor.yml
index 08afd74097e035..c50ae9d4c3d75a 100644
--- a/.github/workflows/downstream-project-anchor.yml
+++ b/.github/workflows/downstream-project-anchor.yml
@@ -45,7 +45,7 @@ jobs:
matrix:
version: ["master"]
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v5
- shell: bash
run: |
diff --git a/.github/workflows/downstream-project-spl.yml b/.github/workflows/downstream-project-spl.yml
index 3d4887cf817804..a9e3c667356ca6 100644
--- a/.github/workflows/downstream-project-spl.yml
+++ b/.github/workflows/downstream-project-spl.yml
@@ -38,7 +38,7 @@ env:
jobs:
check:
- if: github.repository == 'anza-xyz/agave'
+ #if: github.repository == 'anza-xyz/agave'
if: false
runs-on: ubuntu-latest
timeout-minutes: 60
@@ -57,7 +57,7 @@ jobs:
# re-enable with https://github.com/buffalojoec/mollusk/pull/74
# - token
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v5
- shell: bash
run: |
@@ -77,7 +77,7 @@ jobs:
cargo check
test_cli:
- if: github.repository == 'anza-xyz/agave'
+ #if: github.repository == 'anza-xyz/agave'
if: false
runs-on: ubuntu-latest
timeout-minutes: 60
@@ -87,7 +87,7 @@ jobs:
- single-pool
- token-2022
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v5
- shell: bash
run: |
@@ -108,7 +108,7 @@ jobs:
cargo test --manifest-path clients/cli/Cargo.toml
cargo-test-sbf:
- if: github.repository == 'anza-xyz/agave'
+ #if: github.repository == 'anza-xyz/agave'
if: false
runs-on: ubuntu-latest
timeout-minutes: 60
@@ -127,7 +127,7 @@ jobs:
# re-enable with https://github.com/buffalojoec/mollusk/pull/74
# - token
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v5
- shell: bash
run: |
diff --git a/.github/workflows/label-actions.yml b/.github/workflows/label-actions.yml
new file mode 100644
index 00000000000000..7ccac26d559641
--- /dev/null
+++ b/.github/workflows/label-actions.yml
@@ -0,0 +1,15 @@
+name: "Issue Label Actions"
+
+on:
+ issues:
+ types: [labeled, unlabeled]
+
+permissions:
+ contents: read
+ issues: write
+
+jobs:
+ action:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: dessant/label-actions@v4
diff --git a/.github/workflows/publish-windows-tarball.yml b/.github/workflows/publish-windows-tarball.yml
index 42d93bee978f1a..6b92e1abb58d33 100644
--- a/.github/workflows/publish-windows-tarball.yml
+++ b/.github/workflows/publish-windows-tarball.yml
@@ -16,7 +16,7 @@ jobs:
channel: ${{ steps.build.outputs.channel }}
steps:
- name: Checkout
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
with:
ref: master
fetch-depth: 0
@@ -85,7 +85,7 @@ jobs:
path: ./windows-release
- name: Setup crediential
- uses: "google-github-actions/auth@v2"
+ uses: "google-github-actions/auth@v3"
with:
credentials_json: "${{ secrets.GCS_RELEASE_BUCKET_WRITER_CREDIENTIAL }}"
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 2a2cae1184d30c..18e6f2767f339f 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -11,7 +11,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Trigger a Buildkite Build
- uses: "buildkite/trigger-pipeline-action@v2.2.0"
+ uses: "buildkite/trigger-pipeline-action@v2.3.0"
with:
buildkite_api_access_token: ${{ secrets.TRIGGER_BK_BUILD_TOKEN }}
pipeline: "anza/agave-secondary"
@@ -25,7 +25,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Create Release
- uses: actions/github-script@v7
+ uses: actions/github-script@v8
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
@@ -44,7 +44,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
with:
fetch-depth: 0
token: ${{ secrets.VERSION_BUMP_PAT }}
@@ -80,7 +80,7 @@ jobs:
git push origin version-bump-$next_version
- name: Create PR
- uses: actions/github-script@v7
+ uses: actions/github-script@v8
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
diff --git a/.github/workflows/svm-examples.yml b/.github/workflows/svm-examples.yml
deleted file mode 100644
index d31007e53b42f8..00000000000000
--- a/.github/workflows/svm-examples.yml
+++ /dev/null
@@ -1,52 +0,0 @@
-name: SVM examples test
-
-on:
- push:
- branches:
- - master
- - v[0-9]+.[0-9]+
- pull_request:
- branches:
- - master
- - v[0-9]+.[0-9]+
- paths:
- - "**.rs"
- - "Cargo.toml"
- - "Cargo.lock"
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
- cancel-in-progress: true
-
-env:
- SHELL: /bin/bash
- SCCACHE_GHA_ENABLED: "true"
- RUSTC_WRAPPER: "sccache"
-
-jobs:
- test:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v4
-
- - shell: bash
- run: |
- .github/scripts/purge-ubuntu-runner.sh
-
- - uses: mozilla-actions/sccache-action@v0.0.9
- with:
- version: "v0.10.0"
-
- - shell: bash
- run: |
- source .github/scripts/downstream-project-spl-install-deps.sh
-
- - name: Run build
- run: |
- cd svm/examples
- cargo build
-
- - name: Run tests
- run: |
- cd svm/examples
- cargo test
\ No newline at end of file
diff --git a/.github/workflows/verify-packets.yml b/.github/workflows/verify-packets.yml
index 56e0d49620adf7..22a62c24d63635 100644
--- a/.github/workflows/verify-packets.yml
+++ b/.github/workflows/verify-packets.yml
@@ -20,7 +20,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: Checkout
- uses: actions/checkout@v4
+ uses: actions/checkout@v5
- name: Install required packages
run: |
diff --git a/.mergify.yml b/.mergify.yml
index eaea8b75178b41..0c1c5ab8e7b4ea 100644
--- a/.mergify.yml
+++ b/.mergify.yml
@@ -104,9 +104,9 @@ pull_request_rules:
- automerge
comment:
message: automerge label removed due to a CI failure
- - name: v2.2 feature-gate backport
+ - name: v2.3 feature-gate backport
conditions:
- - label=v2.2
+ - label=v2.3
- label=feature-gate
actions:
backport:
@@ -117,10 +117,10 @@ pull_request_rules:
labels:
- feature-gate
branches:
- - v2.2
- - name: v2.2 non-feature-gate backport
+ - v2.3
+ - name: v2.3 non-feature-gate backport
conditions:
- - label=v2.2
+ - label=v2.3
- label!=feature-gate
actions:
backport:
@@ -128,10 +128,10 @@ pull_request_rules:
title: "{{ destination_branch }}: {{ title }} (backport of #{{ number }})"
ignore_conflicts: true
branches:
- - v2.2
- - name: v2.2 backport warning comment
+ - v2.3
+ - name: v2.3 backport warning comment
conditions:
- - label=v2.2
+ - label=v2.3
actions:
comment:
message: >
@@ -142,9 +142,9 @@ pull_request_rules:
refactoring, plumbing, cleanup, etc that are not strictly
necessary to achieve the goal. Any of the latter should go only
into master and ride the normal stabilization schedule.
- - name: v2.3 feature-gate backport
+ - name: v3.0 feature-gate backport
conditions:
- - label=v2.3
+ - label=v3.0
- label=feature-gate
actions:
backport:
@@ -154,10 +154,10 @@ pull_request_rules:
labels:
- feature-gate
branches:
- - v2.3
- - name: v2.3 non-feature-gate backport
+ - v3.0
+ - name: v3.0 non-feature-gate backport
conditions:
- - label=v2.3
+ - label=v3.0
- label!=feature-gate
actions:
backport:
@@ -165,10 +165,10 @@ pull_request_rules:
title: "{{ destination_branch }}: {{ title }} (backport of #{{ number }})"
ignore_conflicts: true
branches:
- - v2.3
- - name: v2.3 backport warning comment
+ - v3.0
+ - name: v3.0 backport warning comment
conditions:
- - label=v2.3
+ - label=v3.0
actions:
comment:
message: >
@@ -217,20 +217,16 @@ pull_request_rules:
If this PR represents a change to a native program implementation (not
tests), please include a reviewer from the Firedancer team. And please
keep refactors to a minimum.
- - name: Notify about future move of zk-keygen, zk-sdk, and zk-token-sdk
+ - name: Notify about the deprecation of zk-token-sdk
conditions:
- or:
- - files~=^zk-keygen/
- - files~=^zk-sdk/
- files~=^zk-token-sdk/
actions:
comment:
message: |
- For your information, the `zk-keygen` and `zk-sdk` directories are
- scheduled to be relocated to `solana-program/zk-elgamal-proof` in a
- separate repository. Additionally, the `zk-token-sdk` directory will
- be removed. Please take these upcoming changes into account when
- making modifications.
+ For your information, the `solana-zk-token-sdk` is deprecated, and this
+ directory will be removed in a future version. Please take this in mind
+ when making modifications.
commands_restrictions:
# The author of copied PRs is the Mergify user.
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 8ccdbc4a78c3b2..ed790de50154d5 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -5,15 +5,24 @@ All notable changes to this project will be documented in this file.
Please follow the [guidance](#adding-to-this-changelog) at the bottom of this file when making changes
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html)
-and follows a [Backwards Compatibility Policy](https://docs.solanalabs.com/backwards-compatibility)
+and follows a [Backwards Compatibility Policy](https://docs.anza.xyz/backwards-compatibility)
Release channels have their own copy of this changelog:
-* [edge - v3.0](#edge-channel)
-* [beta - v2.3](https://github.com/anza-xyz/agave/blob/v2.3/CHANGELOG.md)
-* [stable - v2.2](https://github.com/anza-xyz/agave/blob/v2.2/CHANGELOG.md)
+* [edge - v3.1](#edge-channel)
+* [beta - v3.0](https://github.com/anza-xyz/agave/blob/v3.0/CHANGELOG.md)
+* [stable - v2.3](https://github.com/anza-xyz/agave/blob/v2.3/CHANGELOG.md)
-## 3.0.0 - Unreleased
+## 3.1.0—Unreleased
+### RPC
+#### Breaking
+#### Changes
+### Validator
+#### Breaking
+#### Deprecations
+* The `--monitor` flag with `agave-validator exit` is now deprecated. Operators can use the `monitor` command after `exit` instead.
+
+## 3.0.0
### RPC
@@ -40,10 +49,14 @@ Release channels have their own copy of this changelog:
* `--skip-poh-verify`
* Deprecated snapshot archive formats have been removed and are no longer loadable.
* Using `--snapshot-interval-slots 0` to disable generating snapshots has been removed. Use `--no-snapshots` instead.
+* Validator will now bind all ports within provided `--dynamic-port-range`, including the client ports. A range of at least 25 ports is recommended to avoid failures to bind during startup.
+* Agave and agave-ledger-tool can no longer operate with legacy shreds. Legacy shreds have not been in circulation since the activation of https://explorer.solana.com/address/GV49KKQdBNaiv2pgqhS2Dy3GWYJGXMTVYbYkdk91orRy. This change may break operations with old ledgers that may still contain legacy shreds.
#### Changes
* `--transaction-structure view` is now the default.
* The default full snapshot interval is now 100,000 slots.
+* `SOLANA_BANKING_THREADS` environment variable is no longer supported. Use `--block-prouduction-num-workers` instead.
+* By default, `agave-validator exit` will now wait for the validator process to terminate before returning. The `--wait-for-exit` flag has been deprecated, but operators can still opt out with the new `--no-wait-for-exit` flag.
## 2.3.0
diff --git a/Cargo.lock b/Cargo.lock
index 09b1776bc16672..a9a5ae57c1d373 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -43,7 +43,7 @@ version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0"
dependencies = [
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
"cipher",
"cpufeatures",
]
@@ -69,7 +69,7 @@ version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "763e484feceb7dd021b21c5c6f81aee06b1594a743455ec7efbf72e6355e447b"
dependencies = [
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
"errno",
"libc",
"num_cpus",
@@ -77,7 +77,7 @@ dependencies = [
[[package]]
name = "agave-banking-stage-ingress-types"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"crossbeam-channel",
"solana-perf",
@@ -85,7 +85,7 @@ dependencies = [
[[package]]
name = "agave-cargo-registry"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"clap 2.33.3",
"flate2",
@@ -117,7 +117,7 @@ dependencies = [
[[package]]
name = "agave-feature-set"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"ahash 0.8.11",
"solana-epoch-schedule",
@@ -131,7 +131,7 @@ dependencies = [
[[package]]
name = "agave-geyser-plugin-interface"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"log",
"solana-clock",
@@ -139,34 +139,33 @@ dependencies = [
"solana-signature",
"solana-transaction",
"solana-transaction-status",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
]
[[package]]
name = "agave-install"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"atty",
"bincode",
"bzip2",
"chrono",
"clap 2.33.3",
- "console 0.16.0",
+ "console 0.16.1",
"crossbeam-channel",
"ctrlc",
"dirs-next",
"indicatif 0.18.0",
"nix",
- "reqwest 0.12.22",
+ "reqwest 0.12.23",
"scopeguard",
- "semver 1.0.26",
+ "semver 1.0.27",
"serde",
"serde_derive",
"serde_yaml 0.8.26",
"serde_yaml 0.9.34+deprecated",
"solana-clap-utils",
"solana-config-interface",
- "solana-config-program-client",
"solana-hash",
"solana-keypair",
"solana-logger",
@@ -180,14 +179,14 @@ dependencies = [
"solana-version",
"tar",
"tempfile",
- "url 2.5.4",
+ "url 2.5.7",
"winapi 0.3.9",
"winreg",
]
[[package]]
name = "agave-io-uring"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"io-uring",
"libc",
@@ -198,7 +197,7 @@ dependencies = [
[[package]]
name = "agave-ledger-tool"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"agave-feature-set",
"agave-reserved-account-keys",
@@ -230,6 +229,7 @@ dependencies = [
"solana-clap-utils",
"solana-cli-output",
"solana-clock",
+ "solana-cluster-type",
"solana-compute-budget",
"solana-core",
"solana-cost-model",
@@ -244,7 +244,6 @@ dependencies = [
"solana-keypair",
"solana-ledger",
"solana-loader-v3-interface",
- "solana-log-collector",
"solana-logger",
"solana-measure",
"solana-message",
@@ -265,29 +264,34 @@ dependencies = [
"solana-streamer",
"solana-svm-callback",
"solana-svm-feature-set",
+ "solana-svm-log-collector",
+ "solana-svm-type-overrides",
"solana-system-interface",
"solana-transaction",
"solana-transaction-context",
"solana-transaction-status",
- "solana-type-overrides",
"solana-unified-scheduler-pool",
"solana-version",
"solana-vote",
"solana-vote-program",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
"tikv-jemallocator",
"tokio",
]
+[[package]]
+name = "agave-low-pass-filter"
+version = "3.1.0"
+
[[package]]
name = "agave-precompiles"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"agave-feature-set",
"bincode",
"bytemuck",
"digest 0.10.7",
- "ed25519-dalek",
+ "ed25519-dalek 1.0.1",
"hex",
"libsecp256k1",
"openssl",
@@ -307,7 +311,7 @@ dependencies = [
[[package]]
name = "agave-reserved-account-keys"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"agave-feature-set",
"solana-frozen-abi",
@@ -318,9 +322,13 @@ dependencies = [
"solana-sysvar",
]
+[[package]]
+name = "agave-scheduler-bindings"
+version = "3.1.0"
+
[[package]]
name = "agave-store-histogram"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"clap 2.33.3",
"solana-version",
@@ -328,7 +336,7 @@ dependencies = [
[[package]]
name = "agave-store-tool"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"ahash 0.8.11",
"clap 2.33.3",
@@ -342,7 +350,7 @@ dependencies = [
[[package]]
name = "agave-syscalls"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"assert_matches",
"bincode",
@@ -355,7 +363,7 @@ dependencies = [
"solana-bn254",
"solana-clock",
"solana-cpi",
- "solana-curve25519 3.0.0",
+ "solana-curve25519 3.1.0",
"solana-epoch-rewards",
"solana-epoch-schedule",
"solana-fee-calculator",
@@ -364,8 +372,6 @@ dependencies = [
"solana-keccak-hasher",
"solana-last-restart-slot",
"solana-loader-v3-interface",
- "solana-log-collector",
- "solana-measure",
"solana-poseidon",
"solana-program",
"solana-program-entrypoint",
@@ -378,27 +384,31 @@ dependencies = [
"solana-sha256-hasher",
"solana-slot-hashes",
"solana-stable-layout",
+ "solana-stake-interface",
"solana-svm-callback",
"solana-svm-feature-set",
+ "solana-svm-log-collector",
+ "solana-svm-measure",
+ "solana-svm-timings",
+ "solana-svm-type-overrides",
"solana-sysvar",
"solana-sysvar-id",
- "solana-timings",
"solana-transaction-context",
- "solana-type-overrides",
+ "static_assertions",
"test-case",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
]
[[package]]
name = "agave-thread-manager"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"affinity",
"agave-thread-manager",
"anyhow",
"axum 0.7.9",
- "cfg-if 1.0.1",
- "env_logger 0.11.8",
+ "cfg-if 1.0.3",
+ "env_logger",
"hyper 0.14.32",
"log",
"num_cpus",
@@ -414,7 +424,7 @@ dependencies = [
[[package]]
name = "agave-transaction-view"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"agave-transaction-view",
"bincode",
@@ -436,13 +446,13 @@ dependencies = [
[[package]]
name = "agave-validator"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"agave-geyser-plugin-interface",
"assert_cmd",
"chrono",
"clap 2.33.3",
- "console 0.16.0",
+ "console 0.16.1",
"core_affinity",
"crossbeam-channel",
"fd-lock",
@@ -458,8 +468,10 @@ dependencies = [
"num_cpus",
"predicates",
"pretty_assertions",
+ "qualifier_attr",
"rand 0.8.5",
"rayon",
+ "scopeguard",
"serde",
"serde_json",
"serde_yaml 0.9.34+deprecated",
@@ -520,23 +532,69 @@ dependencies = [
"symlink",
"tempfile",
"test-case",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
"tikv-jemallocator",
"tokio",
]
[[package]]
name = "agave-verified-packet-receiver"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"assert_matches",
"solana-perf",
"solana-streamer",
]
+[[package]]
+name = "agave-votor"
+version = "3.1.0"
+dependencies = [
+ "anyhow",
+ "bincode",
+ "bitvec",
+ "bs58",
+ "crossbeam-channel",
+ "dashmap",
+ "itertools 0.12.1",
+ "log",
+ "parking_lot 0.12.3",
+ "qualifier_attr",
+ "rayon",
+ "serde",
+ "serde_bytes",
+ "serde_derive",
+ "solana-accounts-db",
+ "solana-bloom",
+ "solana-bls-signatures",
+ "solana-clock",
+ "solana-entry",
+ "solana-epoch-schedule",
+ "solana-frozen-abi",
+ "solana-frozen-abi-macro",
+ "solana-gossip",
+ "solana-hash",
+ "solana-keypair",
+ "solana-ledger",
+ "solana-logger",
+ "solana-measure",
+ "solana-metrics",
+ "solana-pubkey",
+ "solana-rpc",
+ "solana-runtime",
+ "solana-signature",
+ "solana-signer",
+ "solana-signer-store",
+ "solana-time-utils",
+ "solana-transaction",
+ "solana-votor-messages",
+ "test-case",
+ "thiserror 2.0.16",
+]
+
[[package]]
name = "agave-watchtower"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"clap 2.33.3",
"humantime",
@@ -557,14 +615,14 @@ dependencies = [
[[package]]
name = "agave-xdp"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"aya",
"caps",
"crossbeam-channel",
"libc",
"log",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
]
[[package]]
@@ -584,7 +642,7 @@ version = "0.8.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011"
dependencies = [
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
"getrandom 0.2.15",
"once_cell",
"version_check",
@@ -630,12 +688,6 @@ version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5"
-[[package]]
-name = "android-tzdata"
-version = "0.1.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0"
-
[[package]]
name = "android_system_properties"
version = "0.1.4"
@@ -711,9 +763,9 @@ dependencies = [
[[package]]
name = "anyhow"
-version = "1.0.98"
+version = "1.0.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487"
+checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61"
[[package]]
name = "aquamarine"
@@ -726,14 +778,14 @@ dependencies = [
"proc-macro-error2",
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
name = "arbitrary"
-version = "1.4.1"
+version = "1.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dde20b3d026af13f561bdd0f15edf01fc734f0dafcedbaf42bba506a9517f223"
+checksum = "c3d036a3c4ab069c7b410a2ce876bd74808d2d0888a82667669f8e783a898bf1"
dependencies = [
"derive_arbitrary",
]
@@ -944,17 +996,6 @@ version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bfdc70193dadb9d7287fa4b633f15f90c876915b31f6af17da307fc59c9859a8"
-[[package]]
-name = "async-channel"
-version = "1.9.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35"
-dependencies = [
- "concurrent-queue",
- "event-listener 2.5.2",
- "futures-core",
-]
-
[[package]]
name = "async-compression"
version = "0.4.1"
@@ -975,7 +1016,7 @@ version = "3.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5fd03604047cee9b6ce9de9f70c6cd540a0520c813cbd49bae61f33ab80ed1dc"
dependencies = [
- "event-listener 5.3.1",
+ "event-listener",
"event-listener-strategy",
"pin-project-lite",
]
@@ -1003,13 +1044,13 @@ dependencies = [
[[package]]
name = "async-trait"
-version = "0.1.88"
+version = "0.1.89"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5"
+checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -1056,7 +1097,7 @@ dependencies = [
"matchit",
"memchr",
"mime",
- "percent-encoding 2.3.1",
+ "percent-encoding 2.3.2",
"pin-project-lite",
"rustversion",
"serde",
@@ -1085,7 +1126,7 @@ dependencies = [
"matchit",
"memchr",
"mime",
- "percent-encoding 2.3.1",
+ "percent-encoding 2.3.2",
"pin-project-lite",
"rustversion",
"serde",
@@ -1146,7 +1187,7 @@ checksum = "d18bc4e506fbb85ab7392ed993a7db4d1a452c71b75a246af4a80ab8c9d2dd50"
dependencies = [
"assert_matches",
"aya-obj",
- "bitflags 2.9.1",
+ "bitflags 2.9.4",
"bytes",
"libc",
"log",
@@ -1191,13 +1232,19 @@ checksum = "4319208da049c43661739c5fade2ba182f09d1dc2299b32298d3a31692b17e12"
dependencies = [
"addr2line",
"cc",
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
"libc",
"miniz_oxide",
"object 0.31.1",
"rustc-demangle",
]
+[[package]]
+name = "base16ct"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf"
+
[[package]]
name = "base64"
version = "0.12.3"
@@ -1222,6 +1269,12 @@ version = "0.22.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6"
+[[package]]
+name = "base64ct"
+version = "1.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "55248b47b0caf0546f7988906588779981c43bb1bc9d0c44087278f80cdb44ba"
+
[[package]]
name = "bencher"
version = "0.1.5"
@@ -1243,7 +1296,7 @@ version = "0.69.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0"
dependencies = [
- "bitflags 2.9.1",
+ "bitflags 2.9.4",
"cexpr",
"clang-sys",
"itertools 0.12.1",
@@ -1254,7 +1307,7 @@ dependencies = [
"regex",
"rustc-hash 1.1.0",
"shlex",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -1280,9 +1333,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "bitflags"
-version = "2.9.1"
+version = "2.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967"
+checksum = "2261d10cca569e4643e526d8dc2e62e433cc8aba21ab764233731f8d369bf394"
dependencies = [
"serde",
]
@@ -1304,6 +1357,7 @@ checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c"
dependencies = [
"funty",
"radium",
+ "serde",
"tap",
"wyz",
]
@@ -1317,7 +1371,7 @@ dependencies = [
"arrayref",
"arrayvec",
"cc",
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
"constant_time_eq",
"digest 0.10.7",
]
@@ -1362,36 +1416,41 @@ dependencies = [
]
[[package]]
-name = "borsh"
-version = "0.10.3"
+name = "blst"
+version = "0.3.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4114279215a005bc675e386011e594e1d9b800918cea18fcadadcce864a2046b"
+checksum = "4fd49896f12ac9b6dcd7a5998466b9b58263a695a3dd1ecc1aaca2e12a90b080"
dependencies = [
- "borsh-derive 0.10.3",
- "hashbrown 0.12.3",
+ "cc",
+ "glob",
+ "threadpool",
+ "zeroize",
]
[[package]]
-name = "borsh"
-version = "1.5.7"
+name = "blstrs"
+version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ad8646f98db542e39fc66e68a20b2144f6a732636df7c2354e74645faaa433ce"
+checksum = "7a8a8ed6fefbeef4a8c7b460e4110e12c5e22a5b7cf32621aae6ad650c4dcf29"
dependencies = [
- "borsh-derive 1.5.7",
- "cfg_aliases",
+ "blst",
+ "byte-slice-cast",
+ "ff",
+ "group",
+ "pairing",
+ "rand_core 0.6.4",
+ "serde",
+ "subtle",
]
[[package]]
-name = "borsh-derive"
-version = "0.10.3"
+name = "borsh"
+version = "1.5.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0754613691538d51f329cce9af41d7b7ca150bc973056f1156611489475f54f7"
+checksum = "ad8646f98db542e39fc66e68a20b2144f6a732636df7c2354e74645faaa433ce"
dependencies = [
- "borsh-derive-internal",
- "borsh-schema-derive-internal",
- "proc-macro-crate 0.1.5",
- "proc-macro2",
- "syn 1.0.109",
+ "borsh-derive",
+ "cfg_aliases",
]
[[package]]
@@ -1404,29 +1463,7 @@ dependencies = [
"proc-macro-crate 3.1.0",
"proc-macro2",
"quote",
- "syn 2.0.104",
-]
-
-[[package]]
-name = "borsh-derive-internal"
-version = "0.10.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "afb438156919598d2c7bad7e1c0adf3d26ed3840dbc010db1a882a65583ca2fb"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn 1.0.109",
-]
-
-[[package]]
-name = "borsh-schema-derive-internal"
-version = "0.10.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "634205cc43f74a1b9046ef87c4540ebda95696ec0f315024860cad7c5b0f5ccd"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn 1.0.109",
+ "syn 2.0.106",
]
[[package]]
@@ -1503,39 +1540,35 @@ dependencies = [
]
[[package]]
-name = "byte-tools"
-version = "0.3.1"
+name = "byte-slice-cast"
+version = "1.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7"
+checksum = "7575182f7272186991736b70173b0ea045398f984bf5ebbb3804736ce1330c9d"
[[package]]
-name = "byte-unit"
-version = "4.0.19"
+name = "byte-tools"
+version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "da78b32057b8fdfc352504708feeba7216dcd65a2c9ab02978cbd288d1279b6c"
-dependencies = [
- "serde",
- "utf8-width",
-]
+checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7"
[[package]]
name = "bytemuck"
-version = "1.23.1"
+version = "1.23.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5c76a5792e44e4abe34d3abf15636779261d45a7450612059293d1d2cfc63422"
+checksum = "3995eaeebcdf32f91f980d360f78732ddc061097ab4e39991ae7a6ace9194677"
dependencies = [
"bytemuck_derive",
]
[[package]]
name = "bytemuck_derive"
-version = "1.10.0"
+version = "1.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "441473f2b4b0459a68628c744bc61d23e730fb00128b841d30fa4bb3972257e4"
+checksum = "4f154e572231cb6ba2bd1176980827e3d5dc04cc183a75dea38109fbdd672d29"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -1625,7 +1658,7 @@ checksum = "eee4243f1f26fc7a42710e7439c149e2b10b05472f88090acce52632f231a73a"
dependencies = [
"camino",
"cargo-platform",
- "semver 1.0.26",
+ "semver 1.0.27",
"serde",
"serde_json",
"thiserror 1.0.69",
@@ -1680,9 +1713,9 @@ checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
[[package]]
name = "cfg-if"
-version = "1.0.1"
+version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268"
+checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9"
[[package]]
name = "cfg_aliases"
@@ -1698,16 +1731,15 @@ checksum = "45565fc9416b9896014f5732ac776f810ee53a66730c17e4020c3ec064a8f88f"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
name = "chrono"
-version = "0.4.41"
+version = "0.4.42"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d"
+checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2"
dependencies = [
- "android-tzdata",
"iana-time-zone",
"js-sys",
"num-traits",
@@ -1848,7 +1880,7 @@ dependencies = [
"heck 0.5.0",
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -1925,36 +1957,22 @@ dependencies = [
[[package]]
name = "console"
-version = "0.16.0"
+version = "0.16.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2e09ced7ebbccb63b4c65413d821f2e00ce54c5ca4514ddc6b3c892fdbcbc69d"
+checksum = "b430743a6eb14e9764d4260d4c0d8123087d504eeb9c48f2b2a5e810dd369df4"
dependencies = [
"encode_unicode",
"libc",
"once_cell",
"unicode-width 0.2.0",
- "windows-sys 0.60.2",
-]
-
-[[package]]
-name = "console_error_panic_hook"
-version = "0.1.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a06aeb73f470f66dcdbf7223caeebb85984942f22f1adb2a088cf9668146bbbc"
-dependencies = [
- "cfg-if 1.0.1",
- "wasm-bindgen",
+ "windows-sys 0.61.0",
]
[[package]]
-name = "console_log"
-version = "0.2.2"
+name = "const-oid"
+version = "0.9.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e89f72f65e8501878b8a004d5a1afb780987e2ce2b4532c562e367a72c57499f"
-dependencies = [
- "log",
- "web-sys",
-]
+checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8"
[[package]]
name = "const_format"
@@ -2059,7 +2077,7 @@ version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a"
dependencies = [
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
]
[[package]]
@@ -2126,7 +2144,7 @@ version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e"
dependencies = [
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
"crossbeam-epoch",
"crossbeam-utils",
]
@@ -2136,7 +2154,7 @@ name = "crossbeam-epoch"
version = "0.9.5"
source = "git+https://github.com/anza-xyz/crossbeam?rev=fd279d707025f0e60951e429bf778b4813d1b6bf#fd279d707025f0e60951e429bf778b4813d1b6bf"
dependencies = [
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
"crossbeam-utils",
"lazy_static",
"memoffset 0.6.4",
@@ -2149,7 +2167,7 @@ version = "0.8.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c3a430a770ebd84726f584a90ee7f020d28db52c6d02138900f22341f866d39c"
dependencies = [
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
]
[[package]]
@@ -2158,6 +2176,18 @@ version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7"
+[[package]]
+name = "crypto-bigint"
+version = "0.5.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76"
+dependencies = [
+ "generic-array 0.14.7",
+ "rand_core 0.6.4",
+ "subtle",
+ "zeroize",
+]
+
[[package]]
name = "crypto-common"
version = "0.1.6"
@@ -2211,12 +2241,13 @@ dependencies = [
[[package]]
name = "ctrlc"
-version = "3.4.7"
+version = "3.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "46f93780a459b7d656ef7f071fe699c4d3d2cb201c4b24d085b6ddc505276e73"
+checksum = "881c5d0a13b2f1498e2306e82cbada78390e152d4b1378fb28a84f4dcd0dc4f3"
dependencies = [
+ "dispatch",
"nix",
- "windows-sys 0.59.0",
+ "windows-sys 0.61.0",
]
[[package]]
@@ -2238,7 +2269,7 @@ version = "4.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be"
dependencies = [
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
"cpufeatures",
"curve25519-dalek-derive",
"digest 0.10.7",
@@ -2258,14 +2289,14 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
name = "darling"
-version = "0.20.1"
+version = "0.21.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0558d22a7b463ed0241e993f76f09f30b126687447751a8638587b864e4b3944"
+checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0"
dependencies = [
"darling_core",
"darling_macro",
@@ -2273,27 +2304,27 @@ dependencies = [
[[package]]
name = "darling_core"
-version = "0.20.1"
+version = "0.21.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ab8bfa2e259f8ee1ce5e97824a3c55ec4404a0d772ca7fa96bf19f0752a046eb"
+checksum = "1247195ecd7e3c85f83c8d2a366e4210d588e802133e1e355180a9870b517ea4"
dependencies = [
"fnv",
"ident_case",
"proc-macro2",
"quote",
- "strsim 0.10.0",
- "syn 2.0.104",
+ "strsim 0.11.1",
+ "syn 2.0.106",
]
[[package]]
name = "darling_macro"
-version = "0.20.1"
+version = "0.21.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a"
+checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81"
dependencies = [
"darling_core",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -2302,7 +2333,7 @@ version = "5.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856"
dependencies = [
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
"hashbrown 0.14.3",
"lock_api",
"once_cell",
@@ -2317,6 +2348,16 @@ version = "2.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57"
+[[package]]
+name = "der"
+version = "0.7.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e7c1832837b905bbfb5101e07cc24c8deddf52f93225eee6ead5f4d63d53ddcb"
+dependencies = [
+ "const-oid",
+ "zeroize",
+]
+
[[package]]
name = "der-parser"
version = "8.1.0"
@@ -2361,13 +2402,13 @@ dependencies = [
[[package]]
name = "derive-where"
-version = "1.5.0"
+version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "510c292c8cf384b1a340b816a9a6cf2599eb8f566a44949024af88418000c50b"
+checksum = "ef941ded77d15ca19b40374869ac6000af1c9f2a4c0f3d4c70926287e6364a8f"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -2378,7 +2419,7 @@ checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -2412,7 +2453,7 @@ dependencies = [
"convert_case 0.6.0",
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
"unicode-xid",
]
@@ -2465,6 +2506,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292"
dependencies = [
"block-buffer 0.10.4",
+ "const-oid",
"crypto-common",
"subtle",
]
@@ -2484,7 +2526,7 @@ version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1"
dependencies = [
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
"dirs-sys-next",
]
@@ -2499,6 +2541,12 @@ dependencies = [
"winapi 0.3.9",
]
+[[package]]
+name = "dispatch"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bd0c93bb4b0c6d9b77f4435b0ae98c24d17f1c45b2ff844c6151a07256ca923b"
+
[[package]]
name = "displaydoc"
version = "0.2.3"
@@ -2530,7 +2578,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -2557,13 +2605,37 @@ version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "abe71d579d1812060163dff96056261deb5bf6729b100fa2e36a68b9649ba3d3"
+[[package]]
+name = "ecdsa"
+version = "0.16.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca"
+dependencies = [
+ "der",
+ "digest 0.10.7",
+ "elliptic-curve",
+ "rfc6979",
+ "signature 2.2.0",
+ "spki",
+]
+
[[package]]
name = "ed25519"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4620d40f6d2601794401d6dd95a5cf69b6c157852539470eeda433a99b3c0efc"
dependencies = [
- "signature",
+ "signature 1.4.0",
+]
+
+[[package]]
+name = "ed25519"
+version = "2.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53"
+dependencies = [
+ "pkcs8",
+ "signature 2.2.0",
]
[[package]]
@@ -2573,21 +2645,36 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d"
dependencies = [
"curve25519-dalek 3.2.0",
- "ed25519",
+ "ed25519 1.2.0",
"rand 0.7.3",
"serde",
"sha2 0.9.9",
"zeroize",
]
+[[package]]
+name = "ed25519-dalek"
+version = "2.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "70e796c081cee67dc755e1a36a0a172b897fab85fc3f6bc48307991f64e4eca9"
+dependencies = [
+ "curve25519-dalek 4.1.3",
+ "ed25519 2.2.3",
+ "rand_core 0.6.4",
+ "serde",
+ "sha2 0.10.9",
+ "subtle",
+ "zeroize",
+]
+
[[package]]
name = "ed25519-dalek-bip32"
-version = "0.2.0"
+version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9d2be62a4061b872c8c0873ee4fc6f101ce7b889d039f019c5fa2af471a59908"
+checksum = "6b49a684b133c4980d7ee783936af771516011c8cd15f429dbda77245e282f03"
dependencies = [
"derivation-path",
- "ed25519-dalek",
+ "ed25519-dalek 2.2.0",
"hmac 0.12.1",
"sha2 0.10.9",
]
@@ -2610,6 +2697,25 @@ version = "1.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2"
+[[package]]
+name = "elliptic-curve"
+version = "0.13.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47"
+dependencies = [
+ "base16ct",
+ "crypto-bigint",
+ "digest 0.10.7",
+ "ff",
+ "generic-array 0.14.7",
+ "group",
+ "pkcs8",
+ "rand_core 0.6.4",
+ "sec1",
+ "subtle",
+ "zeroize",
+]
+
[[package]]
name = "encode_unicode"
version = "1.0.0"
@@ -2622,7 +2728,7 @@ version = "0.8.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a74ea89a0a1b98f6332de42c95baff457ada66d1cb4030f9ff151b2041a1c746"
dependencies = [
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
]
[[package]]
@@ -2642,7 +2748,7 @@ checksum = "03cdc46ec28bd728e67540c528013c6a10eb69a02eb31078a1bda695438cbfb8"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -2668,19 +2774,6 @@ dependencies = [
"regex",
]
-[[package]]
-name = "env_logger"
-version = "0.9.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7"
-dependencies = [
- "atty",
- "humantime",
- "log",
- "regex",
- "termcolor",
-]
-
[[package]]
name = "env_logger"
version = "0.11.8"
@@ -2710,28 +2803,6 @@ dependencies = [
"windows-sys 0.59.0",
]
-[[package]]
-name = "etcd-client"
-version = "0.11.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f4b0ea5ef6dc2388a4b1669fa32097249bc03a15417b97cb75e38afb309e4a89"
-dependencies = [
- "http 0.2.12",
- "prost",
- "tokio",
- "tokio-stream",
- "tonic",
- "tonic-build",
- "tower 0.4.13",
- "tower-service",
-]
-
-[[package]]
-name = "event-listener"
-version = "2.5.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "77f3309417938f28bf8228fcff79a4a37103981e3e186d2ccd19c74b38f4eb71"
-
[[package]]
name = "event-listener"
version = "5.3.1"
@@ -2749,7 +2820,7 @@ version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1"
dependencies = [
- "event-listener 5.3.1",
+ "event-listener",
"pin-project-lite",
]
@@ -2770,14 +2841,14 @@ dependencies = [
[[package]]
name = "fastbloom"
-version = "0.9.0"
+version = "0.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "27cea6e7f512d43b098939ff4d5a5d6fe3db07971e1d05176fe26c642d33f5b8"
+checksum = "18c1ddb9231d8554c2d6bdf4cfaabf0c59251658c68b6c95cd52dd0c513a912a"
dependencies = [
"getrandom 0.3.3",
+ "libm",
"rand 0.9.0",
"siphasher 1.0.1",
- "wide",
]
[[package]]
@@ -2792,7 +2863,7 @@ version = "3.0.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ef033ed5e9bad94e55838ca0ca906db0e043f517adda0c8b79c7a8c66c93c1b5"
dependencies = [
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
"rustix 0.38.39",
"windows-sys 0.48.0",
]
@@ -2803,6 +2874,17 @@ version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "835a3dc7d1ec9e75e2b5fb4ba75396837112d2060b03f7d43bc1897c7f7211da"
+[[package]]
+name = "ff"
+version = "0.13.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c0b50bfb653653f9ca9095b427bed08ab8d75a137839d9ad64eb11810d5b6393"
+dependencies = [
+ "bitvec",
+ "rand_core 0.6.4",
+ "subtle",
+]
+
[[package]]
name = "fiat-crypto"
version = "0.2.9"
@@ -2826,7 +2908,7 @@ version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "975ccf83d8d9d0d84682850a38c8169027be83368805971cc4f238c2b245bc98"
dependencies = [
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
"libc",
"redox_syscall 0.2.10",
"winapi 0.3.9",
@@ -2910,11 +2992,11 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
[[package]]
name = "form_urlencoded"
-version = "1.2.1"
+version = "1.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456"
+checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf"
dependencies = [
- "percent-encoding 2.3.1",
+ "percent-encoding 2.3.2",
]
[[package]]
@@ -3004,7 +3086,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -3061,7 +3143,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32c95766e0414f8bfc1d07055574c621b67739466d6ba516c4fef8e99d30d2e6"
dependencies = [
"bitflags 1.3.2",
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
"log",
"managed",
"num-traits",
@@ -3070,7 +3152,7 @@ dependencies = [
[[package]]
name = "gen-headers"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"log",
"regex",
@@ -3078,7 +3160,7 @@ dependencies = [
[[package]]
name = "gen-syscall-list"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"regex",
]
@@ -3090,7 +3172,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "186014d53bc231d0090ef8d6f03e0920c54d85a5ed22f4f2f74315ec56cf83fb"
dependencies = [
"cc",
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
"libc",
"log",
"rustversion",
@@ -3114,6 +3196,7 @@ checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a"
dependencies = [
"typenum",
"version_check",
+ "zeroize",
]
[[package]]
@@ -3132,7 +3215,7 @@ version = "0.1.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce"
dependencies = [
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
"js-sys",
"libc",
"wasi 0.9.0+wasi-snapshot-preview1",
@@ -3145,7 +3228,7 @@ version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7"
dependencies = [
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
"js-sys",
"libc",
"wasi 0.11.0+wasi-snapshot-preview1",
@@ -3158,7 +3241,7 @@ version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4"
dependencies = [
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
"js-sys",
"libc",
"r-efi",
@@ -3216,7 +3299,7 @@ version = "0.6.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "68a7f542ee6b35af73b06abc0dad1c1bae89964e4e253bc4b587b91c9637867b"
dependencies = [
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
"dashmap",
"futures 0.3.31",
"futures-timer",
@@ -3230,6 +3313,19 @@ dependencies = [
"spinning_top",
]
+[[package]]
+name = "group"
+version = "0.13.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63"
+dependencies = [
+ "ff",
+ "rand 0.8.5",
+ "rand_core 0.6.4",
+ "rand_xorshift 0.3.0",
+ "subtle",
+]
+
[[package]]
name = "h2"
version = "0.3.26"
@@ -3242,7 +3338,7 @@ dependencies = [
"futures-sink",
"futures-util",
"http 0.2.12",
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
"slab",
"tokio",
"tokio-util 0.7.16",
@@ -3374,7 +3470,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "03b876ecf37e86b359573c16c8366bc3eba52b689884a0fc42ba3f67203d2a8b"
dependencies = [
"cc",
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
"libc",
"pkg-config",
"windows-sys 0.48.0",
@@ -3486,9 +3582,9 @@ checksum = "6456b8a6c8f33fee7d958fcd1b60d55b11940a79e63ae87013e6d22e26034440"
[[package]]
name = "humantime"
-version = "2.2.0"
+version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9b112acc8b3adf4b107a8ec20977da0273a8c386765a3ec0229bd500a1443f9f"
+checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424"
[[package]]
name = "hxdmp"
@@ -3582,7 +3678,8 @@ dependencies = [
"http 1.1.0",
"hyper 1.6.0",
"hyper-util",
- "rustls 0.23.31",
+ "rustls 0.23.32",
+ "rustls-native-certs",
"rustls-pki-types",
"tokio",
"tokio-rustls 0.26.2",
@@ -3631,7 +3728,7 @@ dependencies = [
"hyper 1.6.0",
"ipnet",
"libc",
- "percent-encoding 2.3.1",
+ "percent-encoding 2.3.2",
"pin-project-lite",
"socket2 0.5.10",
"tokio",
@@ -3767,7 +3864,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -3789,9 +3886,9 @@ dependencies = [
[[package]]
name = "idna"
-version = "1.0.3"
+version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e"
+checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de"
dependencies = [
"idna_adapter",
"smallvec",
@@ -3861,9 +3958,9 @@ dependencies = [
[[package]]
name = "indexmap"
-version = "2.10.0"
+version = "2.11.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661"
+checksum = "4b0f83760fb341a774ed326568e19f5a863af4a952def8c39f9ab92fd95b88e5"
dependencies = [
"equivalent",
"hashbrown 0.15.1",
@@ -3876,7 +3973,7 @@ version = "0.17.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4adb2ee6ad319a912210a36e56e3623555817bcc877a7e6e8802d1d69c4d8056"
dependencies = [
- "console 0.16.0",
+ "console 0.16.1",
"portable-atomic",
"unicode-width 0.2.0",
"unit-prefix",
@@ -3889,7 +3986,7 @@ version = "0.18.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "70a646d946d06bedbbc4cac4c218acf4bbf2d87757a784857025f4d447e4e1cd"
dependencies = [
- "console 0.16.0",
+ "console 0.16.1",
"portable-atomic",
"unicode-width 0.2.0",
"unit-prefix",
@@ -3911,17 +4008,17 @@ version = "0.1.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c"
dependencies = [
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
]
[[package]]
name = "io-uring"
-version = "0.7.9"
+version = "0.7.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4"
+checksum = "046fa2d4d00aea763528b4950358d0ead425372445dc8ff86312b3c69ff7727b"
dependencies = [
- "bitflags 2.9.1",
- "cfg-if 1.0.1",
+ "bitflags 2.9.4",
+ "cfg-if 1.0.3",
"libc",
]
@@ -4003,7 +4100,7 @@ checksum = "03343451ff899767262ec32146f6d559dd759fdadf42ff0e227c7c48f72594b4"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -4013,7 +4110,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a87aa2bb7d2af34197c04845522473242e1aa17c12f4935d5856491a7fb8c97"
dependencies = [
"cesu8",
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
"combine 4.6.7",
"jni-sys",
"log",
@@ -4039,9 +4136,9 @@ dependencies = [
[[package]]
name = "js-sys"
-version = "0.3.77"
+version = "0.3.80"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f"
+checksum = "852f13bec5eba4ba9afbeb93fd7c13fe56147f055939ae21c43a29a0ecb2702e"
dependencies = [
"once_cell",
"wasm-bindgen",
@@ -4179,13 +4276,17 @@ dependencies = [
]
[[package]]
-name = "kaigan"
-version = "0.2.6"
+name = "k256"
+version = "0.13.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2ba15de5aeb137f0f65aa3bf82187647f1285abfe5b20c80c2c37f7007ad519a"
+checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b"
dependencies = [
- "borsh 0.10.3",
- "serde",
+ "cfg-if 1.0.3",
+ "ecdsa",
+ "elliptic-curve",
+ "once_cell",
+ "sha2 0.10.9",
+ "signature 2.2.0",
]
[[package]]
@@ -4230,9 +4331,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55"
[[package]]
name = "libc"
-version = "0.2.174"
+version = "0.2.175"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776"
+checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543"
[[package]]
name = "libloading"
@@ -4240,7 +4341,7 @@ version = "0.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f"
dependencies = [
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
"winapi 0.3.9",
]
@@ -4371,9 +4472,9 @@ dependencies = [
[[package]]
name = "log"
-version = "0.4.27"
+version = "0.4.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94"
+checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432"
[[package]]
name = "lru"
@@ -4450,9 +4551,9 @@ dependencies = [
[[package]]
name = "memmap2"
-version = "0.9.7"
+version = "0.9.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "483758ad303d734cec05e5c12b41d7e93e6a6390c5e9dae6bdeb7c1259012d28"
+checksum = "843a98750cd611cc2965a8213b53b43e715f13c37a9e096c6408e69990961db7"
dependencies = [
"libc",
]
@@ -4531,7 +4632,7 @@ version = "0.11.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4c84490118f2ee2d74570d114f3d0493cbf02790df303d2707606c3e14e07c96"
dependencies = [
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
"downcast",
"fragile",
"lazy_static",
@@ -4546,7 +4647,7 @@ version = "0.11.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "22ce75669015c4f47b289fd4d4f56e894e4c96003ffdf3ac51313126f94c6cbb"
dependencies = [
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
"proc-macro2",
"quote",
"syn 1.0.109",
@@ -4614,8 +4715,8 @@ version = "0.30.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "74523f3a35e05aba87a1d978330aef40f67b0304ac79c1c00b294c9830543db6"
dependencies = [
- "bitflags 2.9.1",
- "cfg-if 1.0.1",
+ "bitflags 2.9.4",
+ "cfg-if 1.0.3",
"cfg_aliases",
"libc",
"memoffset 0.9.1",
@@ -4703,7 +4804,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -4776,7 +4877,7 @@ dependencies = [
"proc-macro-crate 3.1.0",
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -4805,7 +4906,7 @@ checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87"
dependencies = [
"crc32fast",
"hashbrown 0.15.1",
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
"memchr",
]
@@ -4848,8 +4949,8 @@ version = "0.10.73"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8505734d46c8ab1e19a1dce3aef597ad87dcb4c37e7188231769bd6bd51cebf8"
dependencies = [
- "bitflags 2.9.1",
- "cfg-if 1.0.1",
+ "bitflags 2.9.4",
+ "cfg-if 1.0.3",
"foreign-types",
"libc",
"once_cell",
@@ -4865,7 +4966,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -4909,7 +5010,7 @@ dependencies = [
"futures-util",
"js-sys",
"lazy_static",
- "percent-encoding 2.3.1",
+ "percent-encoding 2.3.2",
"pin-project",
"rand 0.8.5",
"thiserror 1.0.69",
@@ -4927,6 +5028,15 @@ version = "3.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c1b04fb49957986fdce4d6ee7a65027d55d4b6d2265e5848bbb507b58ccfdb6f"
+[[package]]
+name = "pairing"
+version = "0.23.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "81fec4625e73cf41ef4bb6846cafa6d44736525f442ba45e407c4a000a13996f"
+dependencies = [
+ "group",
+]
+
[[package]]
name = "parity-tokio-ipc"
version = "0.9.0"
@@ -4974,7 +5084,7 @@ version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216"
dependencies = [
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
"instant",
"libc",
"redox_syscall 0.2.10",
@@ -4988,7 +5098,7 @@ version = "0.9.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447"
dependencies = [
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
"libc",
"redox_syscall 0.3.5",
"smallvec",
@@ -5047,9 +5157,9 @@ checksum = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831"
[[package]]
name = "percent-encoding"
-version = "2.3.1"
+version = "2.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"
+checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220"
[[package]]
name = "percentage"
@@ -5155,6 +5265,16 @@ version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
+[[package]]
+name = "pkcs8"
+version = "0.10.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7"
+dependencies = [
+ "der",
+ "spki",
+]
+
[[package]]
name = "pkg-config"
version = "0.3.22"
@@ -5195,7 +5315,7 @@ version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25"
dependencies = [
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
"cpufeatures",
"opaque-debug 0.3.0",
"universal-hash",
@@ -5361,18 +5481,18 @@ dependencies = [
[[package]]
name = "proptest"
-version = "1.7.0"
+version = "1.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6fcdab19deb5195a31cf7726a210015ff1496ba1464fd42cb4f537b8b01b471f"
+checksum = "2bb0be07becd10686a0bb407298fb425360a5c44a663774406340c59a22de4ce"
dependencies = [
"bit-set",
"bit-vec",
- "bitflags 2.9.1",
+ "bitflags 2.9.4",
"lazy_static",
"num-traits",
"rand 0.9.0",
"rand_chacha 0.9.0",
- "rand_xorshift",
+ "rand_xorshift 0.4.0",
"regex-syntax",
"rusty-fork",
"tempfile",
@@ -5435,7 +5555,7 @@ dependencies = [
[[package]]
name = "proto"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"protobuf-src",
"tonic-build",
@@ -5456,7 +5576,7 @@ version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d464fae65fff2680baf48019211ce37aaec0c78e9264c84a3e484717f965104e"
dependencies = [
- "percent-encoding 2.3.1",
+ "percent-encoding 2.3.2",
]
[[package]]
@@ -5467,7 +5587,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -5493,9 +5613,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0"
[[package]]
name = "quinn"
-version = "0.11.8"
+version = "0.11.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "626214629cda6781b6dc1d316ba307189c85ba657213ce642d9c77670f8202c8"
+checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20"
dependencies = [
"bytes",
"cfg_aliases",
@@ -5503,9 +5623,9 @@ dependencies = [
"quinn-proto",
"quinn-udp",
"rustc-hash 2.0.0",
- "rustls 0.23.31",
- "socket2 0.5.10",
- "thiserror 2.0.12",
+ "rustls 0.23.32",
+ "socket2 0.6.0",
+ "thiserror 2.0.16",
"tokio",
"tracing",
"web-time",
@@ -5513,9 +5633,9 @@ dependencies = [
[[package]]
name = "quinn-proto"
-version = "0.11.12"
+version = "0.11.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "49df843a9161c85bb8aae55f101bc0bac8bcafd637a620d9122fd7e0b2f7422e"
+checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31"
dependencies = [
"bytes",
"fastbloom",
@@ -5524,11 +5644,11 @@ dependencies = [
"rand 0.9.0",
"ring",
"rustc-hash 2.0.0",
- "rustls 0.23.31",
+ "rustls 0.23.32",
"rustls-pki-types",
"rustls-platform-verifier",
"slab",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
"tinyvec",
"tracing",
"web-time",
@@ -5706,6 +5826,15 @@ dependencies = [
"rand_core 0.6.4",
]
+[[package]]
+name = "rand_xorshift"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f"
+dependencies = [
+ "rand_core 0.6.4",
+]
+
[[package]]
name = "rand_xorshift"
version = "0.4.0"
@@ -5730,14 +5859,14 @@ version = "11.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cb9ee317cfe3fbd54b36a511efc1edd42e216903c9cd575e686dd68a2ba90d8d"
dependencies = [
- "bitflags 2.9.1",
+ "bitflags 2.9.4",
]
[[package]]
name = "rayon"
-version = "1.10.0"
+version = "1.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa"
+checksum = "368f01d005bf8fd9b1206fb6fa653e6c4a81ceb1466406b81792d87c5677a58f"
dependencies = [
"either",
"rayon-core",
@@ -5745,9 +5874,9 @@ dependencies = [
[[package]]
name = "rayon-core"
-version = "1.12.1"
+version = "1.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2"
+checksum = "22e18b0f0062d30d4230b2e85ff77fdfe4326feb054b9783a3460d8435c8ab91"
dependencies = [
"crossbeam-deque",
"crossbeam-utils",
@@ -5755,7 +5884,7 @@ dependencies = [
[[package]]
name = "rbpf-cli"
-version = "3.0.0"
+version = "3.1.0"
[[package]]
name = "rdrand"
@@ -5811,9 +5940,9 @@ dependencies = [
[[package]]
name = "regex"
-version = "1.11.1"
+version = "1.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
+checksum = "23d7fd106d8c02486a8d64e778353d1cffe08ce79ac2e82f540c86d0facf6912"
dependencies = [
"aho-corasick 1.0.1",
"memchr",
@@ -5868,7 +5997,7 @@ dependencies = [
"mime",
"native-tls",
"once_cell",
- "percent-encoding 2.3.1",
+ "percent-encoding 2.3.2",
"pin-project-lite",
"rustls 0.21.12",
"rustls-pemfile",
@@ -5882,7 +6011,7 @@ dependencies = [
"tokio-rustls 0.24.1",
"tokio-util 0.7.16",
"tower-service",
- "url 2.5.4",
+ "url 2.5.7",
"wasm-bindgen",
"wasm-bindgen-futures",
"web-sys",
@@ -5892,9 +6021,9 @@ dependencies = [
[[package]]
name = "reqwest"
-version = "0.12.22"
+version = "0.12.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cbc931937e6ca3a06e3b6c0aa7841849b160a90351d6ab467a8b9b9959767531"
+checksum = "d429f34c8092b2d42c7c93cec323bb4adeb7c67698f70839adec842ec10c7ceb"
dependencies = [
"async-compression",
"base64 0.22.1",
@@ -5910,10 +6039,11 @@ dependencies = [
"hyper-util",
"js-sys",
"log",
- "percent-encoding 2.3.1",
+ "percent-encoding 2.3.2",
"pin-project-lite",
"quinn",
- "rustls 0.23.31",
+ "rustls 0.23.32",
+ "rustls-native-certs",
"rustls-pki-types",
"serde",
"serde_json",
@@ -5925,7 +6055,7 @@ dependencies = [
"tower 0.5.2",
"tower-http",
"tower-service",
- "url 2.5.4",
+ "url 2.5.7",
"wasm-bindgen",
"wasm-bindgen-futures",
"web-sys",
@@ -5941,12 +6071,22 @@ dependencies = [
"anyhow",
"async-trait",
"http 1.1.0",
- "reqwest 0.12.22",
+ "reqwest 0.12.23",
"serde",
"thiserror 1.0.69",
"tower-service",
]
+[[package]]
+name = "rfc6979"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2"
+dependencies = [
+ "hmac 0.12.1",
+ "subtle",
+]
+
[[package]]
name = "ring"
version = "0.17.14"
@@ -5954,7 +6094,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7"
dependencies = [
"cc",
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
"getrandom 0.2.15",
"libc",
"untrusted",
@@ -6034,7 +6174,7 @@ version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92"
dependencies = [
- "semver 1.0.26",
+ "semver 1.0.27",
]
[[package]]
@@ -6052,7 +6192,7 @@ version = "0.38.39"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "375116bee2be9ed569afe2154ea6a99dfdffd257f533f187498c2a8f5feaf4ee"
dependencies = [
- "bitflags 2.9.1",
+ "bitflags 2.9.4",
"errno",
"libc",
"linux-raw-sys 0.4.14",
@@ -6065,7 +6205,7 @@ version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f7178faa4b75a30e269c71e61c353ce2748cf3d76f0c44c393f4e60abf49b825"
dependencies = [
- "bitflags 2.9.1",
+ "bitflags 2.9.4",
"errno",
"libc",
"linux-raw-sys 0.9.2",
@@ -6086,14 +6226,14 @@ dependencies = [
[[package]]
name = "rustls"
-version = "0.23.31"
+version = "0.23.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c0ebcbd2f03de0fc1122ad9bb24b127a5a6cd51d72604a3f3c50ac459762b6cc"
+checksum = "cd3c25631629d034ce7cd9940adc9d45762d46de2b0f57193c4443b92c6d4d40"
dependencies = [
"once_cell",
"ring",
"rustls-pki-types",
- "rustls-webpki 0.103.4",
+ "rustls-webpki 0.103.6",
"subtle",
"zeroize",
]
@@ -6131,19 +6271,19 @@ dependencies = [
[[package]]
name = "rustls-platform-verifier"
-version = "0.5.1"
+version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4a5467026f437b4cb2a533865eaa73eb840019a0916f4b9ec563c6e617e086c9"
+checksum = "be59af91596cac372a6942530653ad0c3a246cdd491aaa9dcaee47f88d67d5a0"
dependencies = [
"core-foundation 0.10.0",
"core-foundation-sys",
"jni",
"log",
"once_cell",
- "rustls 0.23.31",
+ "rustls 0.23.32",
"rustls-native-certs",
"rustls-platform-verifier-android",
- "rustls-webpki 0.103.4",
+ "rustls-webpki 0.103.6",
"security-framework 3.2.0",
"security-framework-sys",
"webpki-root-certs",
@@ -6168,9 +6308,9 @@ dependencies = [
[[package]]
name = "rustls-webpki"
-version = "0.103.4"
+version = "0.103.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0a17884ae0c1b773f1ccd2bd4a8c72f16da897310a98b0e84bf349ad5ead92fc"
+checksum = "8572f3c2cb9934231157b45499fc41e1f58c589fdfb81a844ba873265e80f8eb"
dependencies = [
"ring",
"rustls-pki-types",
@@ -6201,15 +6341,6 @@ version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e"
-[[package]]
-name = "safe_arch"
-version = "0.7.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "96b02de82ddbe1b636e6170c21be622223aea188ef2e139be0a5b219ec215323"
-dependencies = [
- "bytemuck",
-]
-
[[package]]
name = "same-file"
version = "1.0.6"
@@ -6251,13 +6382,27 @@ dependencies = [
"untrusted",
]
+[[package]]
+name = "sec1"
+version = "0.7.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc"
+dependencies = [
+ "base16ct",
+ "der",
+ "generic-array 0.14.7",
+ "pkcs8",
+ "subtle",
+ "zeroize",
+]
+
[[package]]
name = "security-framework"
version = "2.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02"
dependencies = [
- "bitflags 2.9.1",
+ "bitflags 2.9.4",
"core-foundation 0.9.4",
"core-foundation-sys",
"libc",
@@ -6270,7 +6415,7 @@ version = "3.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316"
dependencies = [
- "bitflags 2.9.1",
+ "bitflags 2.9.4",
"core-foundation 0.10.0",
"core-foundation-sys",
"libc",
@@ -6298,11 +6443,12 @@ dependencies = [
[[package]]
name = "semver"
-version = "1.0.26"
+version = "1.0.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0"
+checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2"
dependencies = [
"serde",
+ "serde_core",
]
[[package]]
@@ -6325,10 +6471,11 @@ dependencies = [
[[package]]
name = "serde"
-version = "1.0.219"
+version = "1.0.226"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6"
+checksum = "0dca6411025b24b60bfa7ec1fe1f8e710ac09782dca409ee8237ba74b51295fd"
dependencies = [
+ "serde_core",
"serde_derive",
]
@@ -6343,34 +6490,45 @@ dependencies = [
[[package]]
name = "serde_bytes"
-version = "0.11.17"
+version = "0.11.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8437fd221bde2d4ca316d61b90e337e9e702b3820b87d63caa9ba6c02bd06d96"
+checksum = "a5d440709e79d88e51ac01c4b72fc6cb7314017bb7da9eeff678aa94c10e3ea8"
dependencies = [
"serde",
+ "serde_core",
+]
+
+[[package]]
+name = "serde_core"
+version = "1.0.226"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ba2ba63999edb9dac981fb34b3e5c0d111a69b0924e253ed29d83f7c99e966a4"
+dependencies = [
+ "serde_derive",
]
[[package]]
name = "serde_derive"
-version = "1.0.219"
+version = "1.0.226"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
+checksum = "8db53ae22f34573731bafa1db20f04027b2d25e02d8205921b569171699cdb33"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
name = "serde_json"
-version = "1.0.142"
+version = "1.0.145"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "030fedb782600dcbd6f02d479bf0d817ac3bb40d644745b769d6a96bc3afc5a7"
+checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c"
dependencies = [
"itoa",
"memchr",
"ryu",
"serde",
+ "serde_core",
]
[[package]]
@@ -6406,9 +6564,9 @@ dependencies = [
[[package]]
name = "serde_with"
-version = "3.14.0"
+version = "3.14.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f2c45cd61fefa9db6f254525d46e392b852e0e61d9a1fd36e5bd183450a556d5"
+checksum = "c522100790450cf78eeac1507263d0a350d4d5b30df0c8e1fe051a10c22b376e"
dependencies = [
"serde",
"serde_derive",
@@ -6417,14 +6575,14 @@ dependencies = [
[[package]]
name = "serde_with_macros"
-version = "3.14.0"
+version = "3.14.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "de90945e6565ce0d9a25098082ed4ee4002e047cb59892c318d66821e14bb30f"
+checksum = "327ada00f7d64abaac1e55a6911e90cf665aa051b9a561c7006c157f4633135e"
dependencies = [
"darling",
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -6445,7 +6603,7 @@ version = "0.9.34+deprecated"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47"
dependencies = [
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
"itoa",
"ryu",
"serde",
@@ -6474,7 +6632,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -6496,7 +6654,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6"
dependencies = [
"block-buffer 0.9.0",
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
"cpufeatures",
"digest 0.9.0",
"opaque-debug 0.3.0",
@@ -6508,7 +6666,7 @@ version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f"
dependencies = [
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
"cpufeatures",
"digest 0.10.7",
]
@@ -6519,7 +6677,7 @@ version = "0.10.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3"
dependencies = [
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
"cpufeatures",
"digest 0.10.7",
]
@@ -6531,7 +6689,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800"
dependencies = [
"block-buffer 0.9.0",
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
"cpufeatures",
"digest 0.9.0",
"opaque-debug 0.3.0",
@@ -6543,7 +6701,7 @@ version = "0.10.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283"
dependencies = [
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
"cpufeatures",
"digest 0.10.7",
]
@@ -6623,6 +6781,16 @@ version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "02658e48d89f2bec991f9a78e69cfa4c316f8d6a6c4ec12fae1aeb263d486788"
+[[package]]
+name = "signature"
+version = "2.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de"
+dependencies = [
+ "digest 0.10.7",
+ "rand_core 0.6.4",
+]
+
[[package]]
name = "simpl"
version = "0.1.0"
@@ -6653,9 +6821,9 @@ dependencies = [
[[package]]
name = "slab"
-version = "0.4.10"
+version = "0.4.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "04dc19736151f35336d325007ac991178d504a119863a2fcb3758cdb5e52c50d"
+checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589"
[[package]]
name = "smallvec"
@@ -6716,9 +6884,9 @@ dependencies = [
[[package]]
name = "solana-account"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0f949fe4edaeaea78c844023bfc1c898e0b1f5a100f8a8d2d0f85d0a7b090258"
+checksum = "f885ce7f937871ecb56aadbeaaec963b234a580b7d6ebbdb8fa4249a36f92433"
dependencies = [
"bincode",
"qualifier_attr",
@@ -6729,7 +6897,7 @@ dependencies = [
"solana-clock",
"solana-frozen-abi",
"solana-frozen-abi-macro",
- "solana-instruction",
+ "solana-instruction-error",
"solana-logger",
"solana-pubkey",
"solana-sdk-ids",
@@ -6738,7 +6906,7 @@ dependencies = [
[[package]]
name = "solana-account-decoder"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"Inflector",
"assert_matches",
@@ -6753,7 +6921,7 @@ dependencies = [
"solana-account-decoder-client-types",
"solana-address-lookup-table-interface",
"solana-clock",
- "solana-config-program-client",
+ "solana-config-interface",
"solana-epoch-schedule",
"solana-fee-calculator",
"solana-hash",
@@ -6776,13 +6944,13 @@ dependencies = [
"spl-token-group-interface",
"spl-token-interface",
"spl-token-metadata-interface",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
"zstd",
]
[[package]]
name = "solana-account-decoder-client-types"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"base64 0.22.1",
"bs58",
@@ -6796,9 +6964,9 @@ dependencies = [
[[package]]
name = "solana-account-info"
-version = "2.3.0"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c8f5152a288ef1912300fc6efa6c2d1f9bb55d9398eb6c72326360b8063987da"
+checksum = "82f4691b69b172c687d218dd2f1f23fc7ea5e9aa79df9ac26dab3d8dd829ce48"
dependencies = [
"bincode",
"serde",
@@ -6809,7 +6977,7 @@ dependencies = [
[[package]]
name = "solana-accounts-cluster-bench"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"clap 2.33.3",
"log",
@@ -6855,7 +7023,7 @@ dependencies = [
[[package]]
name = "solana-accounts-db"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"agave-io-uring",
"agave-reserved-account-keys",
@@ -6870,14 +7038,14 @@ dependencies = [
"criterion",
"crossbeam-channel",
"dashmap",
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
"io-uring",
"itertools 0.12.1",
"libc",
"libsecp256k1",
"log",
"lz4",
- "memmap2 0.9.7",
+ "memmap2 0.9.8",
"memoffset 0.9.1",
"modular-bitfield",
"num_cpus",
@@ -6915,7 +7083,6 @@ dependencies = [
"solana-pubkey",
"solana-rayon-threadlimit",
"solana-rent",
- "solana-rent-collector",
"solana-reward-info",
"solana-sdk-ids",
"solana-sha256-hasher",
@@ -6924,6 +7091,7 @@ dependencies = [
"solana-slot-hashes",
"solana-slot-history",
"solana-stake-program",
+ "solana-svm",
"solana-svm-transaction",
"solana-system-interface",
"solana-sysvar",
@@ -6939,15 +7107,40 @@ dependencies = [
"tar",
"tempfile",
"test-case",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
"tikv-jemallocator",
]
+[[package]]
+name = "solana-address"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0a7a457086457ea9db9a5199d719dc8734dc2d0342fad0d8f77633c31eb62f19"
+dependencies = [
+ "arbitrary",
+ "borsh",
+ "bytemuck",
+ "bytemuck_derive",
+ "curve25519-dalek 4.1.3",
+ "five8",
+ "five8_const",
+ "rand 0.8.5",
+ "serde",
+ "serde_derive",
+ "solana-atomic-u64",
+ "solana-define-syscall 3.0.0",
+ "solana-frozen-abi",
+ "solana-frozen-abi-macro",
+ "solana-program-error",
+ "solana-sanitize",
+ "solana-sha256-hasher",
+]
+
[[package]]
name = "solana-address-lookup-table-interface"
-version = "2.2.2"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d1673f67efe870b64a65cb39e6194be5b26527691ce5922909939961a6e6b395"
+checksum = "e2f56cac5e70517a2f27d05e5100b20de7182473ffd0035b23ea273307905987"
dependencies = [
"bincode",
"bytemuck",
@@ -6955,6 +7148,7 @@ dependencies = [
"serde_derive",
"solana-clock",
"solana-instruction",
+ "solana-instruction-error",
"solana-pubkey",
"solana-sdk-ids",
"solana-slot-hashes",
@@ -6962,16 +7156,16 @@ dependencies = [
[[package]]
name = "solana-atomic-u64"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d52e52720efe60465b052b9e7445a01c17550666beec855cce66f44766697bc2"
+checksum = "a933ff1e50aff72d02173cfcd7511bd8540b027ee720b75f353f594f834216d0"
dependencies = [
"parking_lot 0.12.3",
]
[[package]]
name = "solana-banking-bench"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"agave-banking-stage-ingress-types",
"assert_matches",
@@ -7007,9 +7201,9 @@ dependencies = [
[[package]]
name = "solana-banks-client"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
- "borsh 1.5.7",
+ "borsh",
"futures 0.3.31",
"solana-account",
"solana-banks-interface",
@@ -7030,14 +7224,14 @@ dependencies = [
"solana-transaction-context",
"solana-transaction-error",
"tarpc",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
"tokio",
"tokio-serde",
]
[[package]]
name = "solana-banks-interface"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"serde",
"serde_derive",
@@ -7056,7 +7250,7 @@ dependencies = [
[[package]]
name = "solana-banks-server"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"agave-feature-set",
"bincode",
@@ -7084,7 +7278,7 @@ dependencies = [
[[package]]
name = "solana-bench-streamer"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"clap 3.2.23",
"crossbeam-channel",
@@ -7096,7 +7290,7 @@ dependencies = [
[[package]]
name = "solana-bench-tps"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"agave-feature-set",
"chrono",
@@ -7157,13 +7351,13 @@ dependencies = [
"solana-version",
"spl-instruction-padding-interface",
"tempfile",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
"tikv-jemallocator",
]
[[package]]
name = "solana-bench-vote"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"bincode",
"clap 2.33.3",
@@ -7183,45 +7377,45 @@ dependencies = [
"solana-version",
"solana-vote-program",
"tikv-jemallocator",
+ "tokio-util 0.7.16",
]
[[package]]
name = "solana-big-mod-exp"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "75db7f2bbac3e62cfd139065d15bcda9e2428883ba61fc8d27ccb251081e7567"
+checksum = "30c80fb6d791b3925d5ec4bf23a7c169ef5090c013059ec3ed7d0b2c04efa085"
dependencies = [
"num-bigint 0.4.6",
"num-traits",
- "solana-define-syscall",
+ "solana-define-syscall 3.0.0",
]
[[package]]
name = "solana-bincode"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "19a3787b8cf9c9fe3dd360800e8b70982b9e5a8af9e11c354b6665dd4a003adc"
+checksum = "534a37aecd21986089224d0c01006a75b96ac6fb2f418c24edc15baf0d2a4c99"
dependencies = [
"bincode",
"serde",
- "solana-instruction",
+ "solana-instruction-error",
]
[[package]]
name = "solana-blake3-hasher"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a1a0801e25a1b31a14494fc80882a036be0ffd290efc4c2d640bfcca120a4672"
+checksum = "ffa2e3bdac3339c6d0423275e45dafc5ac25f4d43bf344d026a3cc9a85e244a6"
dependencies = [
"blake3",
- "solana-define-syscall",
+ "solana-define-syscall 3.0.0",
"solana-hash",
- "solana-sanitize",
]
[[package]]
name = "solana-bloom"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"bencher",
"bv",
@@ -7239,34 +7433,59 @@ dependencies = [
"solana-time-utils",
]
+[[package]]
+name = "solana-bls-signatures"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a40ce56d14f58c3ebe9275c3739c4052748ec5c4922854c12dc823dbf450ebd1"
+dependencies = [
+ "base64 0.22.1",
+ "blst",
+ "blstrs",
+ "bytemuck",
+ "cfg_eval",
+ "ff",
+ "group",
+ "pairing",
+ "rand 0.8.5",
+ "serde",
+ "serde_json",
+ "serde_with",
+ "solana-frozen-abi",
+ "solana-frozen-abi-macro",
+ "solana-signature",
+ "solana-signer",
+ "subtle",
+ "thiserror 2.0.16",
+]
+
[[package]]
name = "solana-bn254"
-version = "2.2.2"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4420f125118732833f36facf96a27e7b78314b2d642ba07fa9ffdacd8d79e243"
+checksum = "20a5f01e99addb316d95d4ed31aa6eacfda557fffc00ae316b919e8ba0fc5b91"
dependencies = [
"ark-bn254",
"ark-ec",
"ark-ff",
"ark-serialize",
"bytemuck",
- "solana-define-syscall",
- "thiserror 2.0.12",
+ "solana-define-syscall 3.0.0",
+ "thiserror 2.0.16",
]
[[package]]
name = "solana-borsh"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "718333bcd0a1a7aed6655aa66bef8d7fb047944922b2d3a18f49cbc13e73d004"
+checksum = "dc402b16657abbfa9991cd5cbfac5a11d809f7e7d28d3bb291baeb088b39060e"
dependencies = [
- "borsh 0.10.3",
- "borsh 1.5.7",
+ "borsh",
]
[[package]]
name = "solana-bpf-loader-program"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"agave-syscalls",
"assert_matches",
@@ -7285,8 +7504,6 @@ dependencies = [
"solana-last-restart-slot",
"solana-loader-v3-interface",
"solana-loader-v4-interface",
- "solana-log-collector",
- "solana-measure",
"solana-packet",
"solana-program",
"solana-program-entrypoint",
@@ -7298,16 +7515,18 @@ dependencies = [
"solana-slot-hashes",
"solana-svm-callback",
"solana-svm-feature-set",
+ "solana-svm-log-collector",
+ "solana-svm-measure",
+ "solana-svm-type-overrides",
"solana-system-interface",
"solana-transaction-context",
- "solana-type-overrides",
"static_assertions",
"test-case",
]
[[package]]
name = "solana-bpf-loader-program-tests"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"assert_matches",
"bincode",
@@ -7327,13 +7546,13 @@ dependencies = [
[[package]]
name = "solana-bucket-map"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"bv",
"bytemuck",
"bytemuck_derive",
"fs_extra",
- "memmap2 0.9.7",
+ "memmap2 0.9.8",
"modular-bitfield",
"num_enum",
"rand 0.8.5",
@@ -7348,7 +7567,7 @@ dependencies = [
[[package]]
name = "solana-builtins"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"agave-feature-set",
"solana-bpf-loader-program",
@@ -7367,11 +7586,12 @@ dependencies = [
[[package]]
name = "solana-builtins-default-costs"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"agave-feature-set",
"ahash 0.8.11",
"log",
+ "qualifier_attr",
"rand 0.8.5",
"solana-bpf-loader-program",
"solana-compute-budget-program",
@@ -7387,7 +7607,7 @@ dependencies = [
[[package]]
name = "solana-cargo-build-sbf"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"assert_cmd",
"bzip2",
@@ -7397,8 +7617,8 @@ dependencies = [
"log",
"predicates",
"regex",
- "reqwest 0.12.22",
- "semver 1.0.26",
+ "reqwest 0.12.23",
+ "semver 1.0.27",
"serial_test",
"solana-file-download",
"solana-keypair",
@@ -7408,7 +7628,7 @@ dependencies = [
[[package]]
name = "solana-cargo-test-sbf"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"cargo_metadata",
"clap 3.2.23",
@@ -7420,7 +7640,7 @@ dependencies = [
[[package]]
name = "solana-clap-utils"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"assert_matches",
"chrono",
@@ -7442,15 +7662,15 @@ dependencies = [
"solana-signer",
"solana-system-interface",
"tempfile",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
"tiny-bip39",
"uriparse",
- "url 2.5.4",
+ "url 2.5.7",
]
[[package]]
name = "solana-clap-v3-utils"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"assert_matches",
"chrono",
@@ -7472,17 +7692,17 @@ dependencies = [
"solana-signature",
"solana-signer",
"solana-system-interface",
- "solana-zk-sdk 3.0.0",
+ "solana-zk-sdk",
"tempfile",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
"tiny-bip39",
"uriparse",
- "url 2.5.4",
+ "url 2.5.7",
]
[[package]]
name = "solana-cli"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"agave-feature-set",
"agave-syscalls",
@@ -7490,7 +7710,7 @@ dependencies = [
"bincode",
"bs58",
"clap 2.33.3",
- "console 0.16.0",
+ "console 0.16.1",
"const_format",
"criterion-stats",
"crossbeam-channel",
@@ -7500,8 +7720,8 @@ dependencies = [
"log",
"num-traits",
"pretty-hex",
- "reqwest 0.12.22",
- "semver 1.0.26",
+ "reqwest 0.12.23",
+ "semver 1.0.27",
"serde",
"serde_derive",
"serde_json",
@@ -7518,11 +7738,9 @@ dependencies = [
"solana-commitment-config",
"solana-compute-budget-interface",
"solana-config-interface",
- "solana-config-program-client",
"solana-connection-cache",
"solana-epoch-schedule",
"solana-faucet",
- "solana-feature-gate-client",
"solana-feature-gate-interface",
"solana-fee-calculator",
"solana-fee-structure",
@@ -7573,13 +7791,13 @@ dependencies = [
"spl-memo-interface",
"tempfile",
"test-case",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
"tiny-bip39",
]
[[package]]
name = "solana-cli-config"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"anyhow",
"dirs-next",
@@ -7588,24 +7806,24 @@ dependencies = [
"serde_yaml 0.9.34+deprecated",
"solana-clap-utils",
"solana-commitment-config",
- "url 2.5.4",
+ "url 2.5.7",
]
[[package]]
name = "solana-cli-output"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"Inflector",
"agave-reserved-account-keys",
"base64 0.22.1",
"chrono",
"clap 2.33.3",
- "console 0.16.0",
- "ed25519-dalek",
+ "console 0.16.1",
+ "ed25519-dalek 1.0.1",
"humantime",
"indicatif 0.18.0",
"pretty-hex",
- "semver 1.0.26",
+ "semver 1.0.27",
"serde",
"serde_json",
"solana-account",
@@ -7618,7 +7836,6 @@ dependencies = [
"solana-hash",
"solana-keypair",
"solana-message",
- "solana-native-token",
"solana-packet",
"solana-pubkey",
"solana-rpc-client-api",
@@ -7627,7 +7844,6 @@ dependencies = [
"solana-signer",
"solana-stake-interface",
"solana-system-interface",
- "solana-sysvar",
"solana-transaction",
"solana-transaction-context",
"solana-transaction-error",
@@ -7639,7 +7855,7 @@ dependencies = [
[[package]]
name = "solana-client"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"async-trait",
"bincode",
@@ -7647,7 +7863,7 @@ dependencies = [
"dashmap",
"futures 0.3.31",
"futures-util",
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
"indicatif 0.18.0",
"log",
"quinn",
@@ -7679,13 +7895,14 @@ dependencies = [
"solana-transaction-error",
"solana-transaction-status-client-types",
"solana-udp-client",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
"tokio",
+ "tokio-util 0.7.16",
]
[[package]]
name = "solana-client-test"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"futures-util",
"serde_json",
@@ -7722,9 +7939,9 @@ dependencies = [
[[package]]
name = "solana-client-traits"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "83f0071874e629f29e0eb3dab8a863e98502ac7aba55b7e0df1803fc5cac72a7"
+checksum = "08618ed587e128105510c54ae3e456b9a06d674d8640db75afe66dad65cb4e02"
dependencies = [
"solana-account",
"solana-commitment-config",
@@ -7743,9 +7960,9 @@ dependencies = [
[[package]]
name = "solana-clock"
-version = "2.2.2"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1bb482ab70fced82ad3d7d3d87be33d466a3498eb8aa856434ff3c0dfc2e2e31"
+checksum = "fb62e9381182459a4520b5fe7fb22d423cae736239a6427fc398a88743d0ed59"
dependencies = [
"serde",
"serde_derive",
@@ -7756,9 +7973,9 @@ dependencies = [
[[package]]
name = "solana-cluster-type"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7ace9fea2daa28354d107ea879cff107181d85cd4e0f78a2bedb10e1a428c97e"
+checksum = "eb7692fa6bf10a1a86b450c4775526f56d7e0e2116a53313f2533b5694abea64"
dependencies = [
"serde",
"serde_derive",
@@ -7767,9 +7984,9 @@ dependencies = [
[[package]]
name = "solana-commitment-config"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ac49c4dde3edfa832de1697e9bcdb7c3b3f7cb7a1981b7c62526c8bb6700fb73"
+checksum = "5fa5933a62dadb7d3ed35e6329de5cebb0678acc8f9cfdf413269084eeccc63f"
dependencies = [
"serde",
"serde_derive",
@@ -7777,7 +7994,7 @@ dependencies = [
[[package]]
name = "solana-compute-budget"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"qualifier_attr",
"solana-fee-structure",
@@ -7787,7 +8004,7 @@ dependencies = [
[[package]]
name = "solana-compute-budget-instruction"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"agave-feature-set",
"bincode",
@@ -7811,30 +8028,30 @@ dependencies = [
"solana-system-interface",
"solana-transaction",
"solana-transaction-error",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
]
[[package]]
name = "solana-compute-budget-interface"
-version = "2.2.2"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8432d2c4c22d0499aa06d62e4f7e333f81777b3d7c96050ae9e5cb71a8c3aee4"
+checksum = "8292c436b269ad23cecc8b24f7da3ab07ca111661e25e00ce0e1d22771951ab9"
dependencies = [
- "borsh 1.5.7",
+ "borsh",
"solana-instruction",
"solana-sdk-ids",
]
[[package]]
name = "solana-compute-budget-program"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"solana-program-runtime",
]
[[package]]
name = "solana-compute-budget-program-bench"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"agave-feature-set",
"criterion",
@@ -7849,9 +8066,9 @@ dependencies = [
[[package]]
name = "solana-config-interface"
-version = "1.0.0"
+version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3fbdbcfedb467322ac9686ca61da0a1fdede2fd99a01fb2ed52b49452abd22e0"
+checksum = "63e401ae56aed512821cc7a0adaa412ff97fecd2dff4602be7b1330d2daec0c4"
dependencies = [
"bincode",
"serde",
@@ -7864,29 +8081,15 @@ dependencies = [
"solana-system-interface",
]
-[[package]]
-name = "solana-config-program-client"
-version = "1.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ef9867b9ffae6e48a97ce6349e7796fcb34084298e909a8fa1fe427f41b52fd4"
-dependencies = [
- "bincode",
- "borsh 0.10.3",
- "kaigan",
- "serde",
- "solana-config-interface",
- "solana-program",
-]
-
[[package]]
name = "solana-connection-cache"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"async-trait",
"bincode",
"crossbeam-channel",
"futures-util",
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
"indicatif 0.18.0",
"log",
"rand 0.8.5",
@@ -7899,19 +8102,20 @@ dependencies = [
"solana-net-utils",
"solana-time-utils",
"solana-transaction-error",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
"tokio",
]
[[package]]
name = "solana-core"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"agave-banking-stage-ingress-types",
"agave-feature-set",
"agave-reserved-account-keys",
"agave-transaction-view",
"agave-verified-packet-receiver",
+ "agave-votor",
"ahash 0.8.11",
"anyhow",
"arrayvec",
@@ -7920,6 +8124,7 @@ dependencies = [
"base64 0.22.1",
"bincode",
"bs58",
+ "bytemuck",
"bytes",
"chrono",
"conditional-mod",
@@ -7927,7 +8132,6 @@ dependencies = [
"crossbeam-channel",
"dashmap",
"derive_more 1.0.0",
- "etcd-client",
"fs_extra",
"futures 0.3.31",
"histogram",
@@ -7944,7 +8148,7 @@ dependencies = [
"rand_chacha 0.3.1",
"rayon",
"rolling-file",
- "rustls 0.23.31",
+ "rustls 0.23.32",
"serde",
"serde_bytes",
"serde_derive",
@@ -7960,6 +8164,7 @@ dependencies = [
"solana-builtins-default-costs",
"solana-client",
"solana-clock",
+ "solana-cluster-type",
"solana-compute-budget",
"solana-compute-budget-instruction",
"solana-compute-budget-interface",
@@ -7995,6 +8200,7 @@ dependencies = [
"solana-perf",
"solana-poh",
"solana-poh-config",
+ "solana-program-binaries",
"solana-program-runtime",
"solana-pubkey",
"solana-quic-client",
@@ -8018,13 +8224,13 @@ dependencies = [
"solana-stake-program",
"solana-streamer",
"solana-svm",
+ "solana-svm-timings",
"solana-svm-transaction",
"solana-system-interface",
"solana-system-program",
"solana-system-transaction",
"solana-sysvar",
"solana-time-utils",
- "solana-timings",
"solana-tls-utils",
"solana-tpu-client",
"solana-tpu-client-next",
@@ -8047,7 +8253,7 @@ dependencies = [
"sysctl",
"tempfile",
"test-case",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
"tikv-jemallocator",
"tokio",
"tokio-util 0.7.16",
@@ -8056,7 +8262,7 @@ dependencies = [
[[package]]
name = "solana-cost-model"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"agave-feature-set",
"agave-reserved-account-keys",
@@ -8102,12 +8308,12 @@ dependencies = [
[[package]]
name = "solana-cpi"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8dc71126edddc2ba014622fc32d0f5e2e78ec6c5a1e0eb511b85618c09e9ea11"
+checksum = "16238feb63d1cbdf915fb287f29ef7a7ebf81469bd6214f8b72a53866b593f8f"
dependencies = [
"solana-account-info",
- "solana-define-syscall",
+ "solana-define-syscall 3.0.0",
"solana-instruction",
"solana-program-error",
"solana-pubkey",
@@ -8116,50 +8322,47 @@ dependencies = [
[[package]]
name = "solana-curve25519"
-version = "2.2.15"
+version = "2.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "def3cfe5279edb64fc39111cff6dcf77b01fbfba2c02c13ced41e6a48baf4cbe"
+checksum = "b162f50499b391b785d57b2f2c73e3b9754d88fd4894bef444960b00bda8dcca"
dependencies = [
"bytemuck",
"bytemuck_derive",
"curve25519-dalek 4.1.3",
- "solana-define-syscall",
+ "solana-define-syscall 2.3.0",
"subtle",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
]
[[package]]
name = "solana-curve25519"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"bytemuck",
"bytemuck_derive",
"curve25519-dalek 4.1.3",
- "solana-define-syscall",
+ "solana-define-syscall 3.0.0",
"subtle",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
]
[[package]]
-name = "solana-decode-error"
-version = "2.2.1"
+name = "solana-define-syscall"
+version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "10a6a6383af236708048f8bd8d03db8ca4ff7baf4a48e5d580f4cce545925470"
-dependencies = [
- "num-traits",
-]
+checksum = "2ae3e2abcf541c8122eafe9a625d4d194b4023c20adde1e251f94e056bb1aee2"
[[package]]
name = "solana-define-syscall"
-version = "2.3.0"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2ae3e2abcf541c8122eafe9a625d4d194b4023c20adde1e251f94e056bb1aee2"
+checksum = "f9697086a4e102d28a156b8d6b521730335d6951bd39a5e766512bbe09007cee"
[[package]]
name = "solana-derivation-path"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "939756d798b25c5ec3cca10e06212bdca3b1443cb9bb740a38124f58b258737b"
+checksum = "ff71743072690fdbdfcdc37700ae1cb77485aaad49019473a81aee099b1e0b8c"
dependencies = [
"derivation-path",
"qstring",
@@ -8168,7 +8371,7 @@ dependencies = [
[[package]]
name = "solana-dos"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"bincode",
"clap 3.2.23",
@@ -8211,7 +8414,7 @@ dependencies = [
[[package]]
name = "solana-download-utils"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"log",
"solana-clock",
@@ -8222,25 +8425,22 @@ dependencies = [
[[package]]
name = "solana-ed25519-program"
-version = "2.2.3"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a1feafa1691ea3ae588f99056f4bdd1293212c7ece28243d7da257c443e84753"
+checksum = "e1419197f1c06abf760043f6d64ba9d79a03ad5a43f18c7586471937122094da"
dependencies = [
"bytemuck",
"bytemuck_derive",
- "ed25519-dalek",
- "solana-feature-set",
"solana-instruction",
- "solana-precompile-error",
"solana-sdk-ids",
]
[[package]]
name = "solana-ed25519-program-tests"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"assert_matches",
- "ed25519-dalek",
+ "ed25519-dalek 1.0.1",
"rand 0.8.5",
"solana-ed25519-program",
"solana-instruction",
@@ -8253,7 +8453,7 @@ dependencies = [
[[package]]
name = "solana-entry"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"agave-reserved-account-keys",
"assert_matches",
@@ -8287,9 +8487,9 @@ dependencies = [
[[package]]
name = "solana-epoch-info"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "90ef6f0b449290b0b9f32973eefd95af35b01c5c0c34c569f936c34c5b20d77b"
+checksum = "f8a6b69bd71386f61344f2bcf0f527f5fd6dd3b22add5880e2e1bf1dd1fa8059"
dependencies = [
"serde",
"serde_derive",
@@ -8297,9 +8497,9 @@ dependencies = [
[[package]]
name = "solana-epoch-rewards"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "86b575d3dd323b9ea10bb6fe89bf6bf93e249b215ba8ed7f68f1a3633f384db7"
+checksum = "b319a4ed70390af911090c020571f0ff1f4ec432522d05ab89f5c08080381995"
dependencies = [
"serde",
"serde_derive",
@@ -8311,9 +8511,9 @@ dependencies = [
[[package]]
name = "solana-epoch-rewards-hasher"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "96c5fd2662ae7574810904585fd443545ed2b568dbd304b25a31e79ccc76e81b"
+checksum = "e507099d0c2c5d7870c9b1848281ea67bbeee80d171ca85003ee5767994c9c38"
dependencies = [
"siphasher 0.3.11",
"solana-hash",
@@ -8322,9 +8522,9 @@ dependencies = [
[[package]]
name = "solana-epoch-schedule"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3fce071fbddecc55d727b1d7ed16a629afe4f6e4c217bc8d00af3b785f6f67ed"
+checksum = "6e5481e72cc4d52c169db73e4c0cd16de8bc943078aac587ec4817a75cc6388f"
dependencies = [
"serde",
"serde_derive",
@@ -8335,11 +8535,21 @@ dependencies = [
"solana-sysvar-id",
]
+[[package]]
+name = "solana-epoch-stake"
+version = "3.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fcc6693d0ea833b880514b9b88d95afb80b42762dca98b0712465d1fcbbcb89e"
+dependencies = [
+ "solana-define-syscall 3.0.0",
+ "solana-pubkey",
+]
+
[[package]]
name = "solana-example-mocks"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "84461d56cbb8bb8d539347151e0525b53910102e4bced875d49d5139708e39d3"
+checksum = "978855d164845c1b0235d4b4d101cadc55373fffaf0b5b6cfa2194d25b2ed658"
dependencies = [
"serde",
"serde_derive",
@@ -8353,12 +8563,12 @@ dependencies = [
"solana-pubkey",
"solana-sdk-ids",
"solana-system-interface",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
]
[[package]]
name = "solana-faucet"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"bincode",
"clap 2.33.3",
@@ -8368,13 +8578,15 @@ dependencies = [
"serde_derive",
"solana-clap-utils",
"solana-cli-config",
+ "solana-cli-output",
+ "solana-faucet",
"solana-hash",
"solana-instruction",
"solana-keypair",
"solana-logger",
"solana-message",
"solana-metrics",
- "solana-native-token",
+ "solana-net-utils",
"solana-packet",
"solana-pubkey",
"solana-signer",
@@ -8383,28 +8595,15 @@ dependencies = [
"solana-transaction",
"solana-version",
"spl-memo-interface",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
"tokio",
]
-[[package]]
-name = "solana-feature-gate-client"
-version = "0.0.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f1056507c534839b5cd1b1010ffedb9e8c92313269786fb5066ff53b30326dc3"
-dependencies = [
- "borsh 0.10.3",
- "num-derive",
- "num-traits",
- "solana-program",
- "thiserror 2.0.12",
-]
-
[[package]]
name = "solana-feature-gate-interface"
-version = "2.2.2"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "43f5c5382b449e8e4e3016fb05e418c53d57782d8b5c30aa372fc265654b956d"
+checksum = "7347ab62e6d47a82e340c865133795b394feea7c2b2771d293f57691c6544c3f"
dependencies = [
"bincode",
"serde",
@@ -8419,23 +8618,9 @@ dependencies = [
"solana-system-interface",
]
-[[package]]
-name = "solana-feature-set"
-version = "2.2.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "92f6c09cc41059c0e03ccbee7f5d4cc0a315d68ef0d59b67eb90246adfd8cc35"
-dependencies = [
- "ahash 0.8.11",
- "lazy_static",
- "solana-epoch-schedule",
- "solana-hash",
- "solana-pubkey",
- "solana-sha256-hasher",
-]
-
[[package]]
name = "solana-fee"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"agave-feature-set",
"solana-fee-structure",
@@ -8444,9 +8629,9 @@ dependencies = [
[[package]]
name = "solana-fee-calculator"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d89bc408da0fb3812bc3008189d148b4d3e08252c79ad810b245482a3f70cd8d"
+checksum = "2a73cc03ca4bed871ca174558108835f8323e85917bb38b9c81c7af2ab853efe"
dependencies = [
"log",
"serde",
@@ -8457,22 +8642,20 @@ dependencies = [
[[package]]
name = "solana-fee-structure"
-version = "2.3.0"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "33adf673581c38e810bf618f745bf31b683a0a4a4377682e6aaac5d9a058dd4e"
+checksum = "5e2abdb1223eea8ec64136f39cb1ffcf257e00f915c957c35c0dd9e3f4e700b0"
dependencies = [
"serde",
"serde_derive",
"solana-frozen-abi",
- "solana-message",
- "solana-native-token",
]
[[package]]
name = "solana-file-download"
-version = "2.2.2"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "05a9744774fdbd7ae8575e5bd6d5df6946f321fb9b6019410b300a515369a37d"
+checksum = "842227f0ae5ebffdfe686597a909cb406d2bd9b92432c516503b8cbd490a3ea6"
dependencies = [
"console 0.15.11",
"indicatif 0.17.12",
@@ -8482,9 +8665,9 @@ dependencies = [
[[package]]
name = "solana-frozen-abi"
-version = "2.3.0"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4ac93e831736e9cbd1571c5c692fa7533a304f184f77cba52e5b83c4c7eeebda"
+checksum = "f19aad3b79cf84cd24de85e711ed1718de1e5bf46a710fa73179efa6a117d707"
dependencies = [
"boxcar",
"bs58",
@@ -8499,23 +8682,23 @@ dependencies = [
"serde_with",
"sha2 0.10.9",
"solana-frozen-abi-macro",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
]
[[package]]
name = "solana-frozen-abi-macro"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b83f88a126213cbcb57672c5e70ddb9791eff9b480e9f39fe9285fd2abca66fa"
+checksum = "d42809b90c84963eb5f2e17afafb1384892341b0d8ec12ae8f4a8c69a96138e4"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
name = "solana-genesis"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"agave-feature-set",
"base64 0.22.1",
@@ -8531,6 +8714,7 @@ dependencies = [
"solana-clap-utils",
"solana-cli-config",
"solana-clock",
+ "solana-cluster-type",
"solana-commitment-config",
"solana-entry",
"solana-epoch-schedule",
@@ -8561,9 +8745,9 @@ dependencies = [
[[package]]
name = "solana-genesis-config"
-version = "2.3.0"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b3725085d47b96d37fef07a29d78d2787fc89a0b9004c66eed7753d1e554989f"
+checksum = "749eccc960e85c9b33608450093d256006253e1cb436b8380e71777840a3f675"
dependencies = [
"bincode",
"chrono",
@@ -8578,7 +8762,6 @@ dependencies = [
"solana-hash",
"solana-inflation",
"solana-keypair",
- "solana-logger",
"solana-poh-config",
"solana-pubkey",
"solana-rent",
@@ -8591,7 +8774,7 @@ dependencies = [
[[package]]
name = "solana-genesis-utils"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"log",
"solana-accounts-db",
@@ -8603,7 +8786,7 @@ dependencies = [
[[package]]
name = "solana-geyser-plugin-manager"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"agave-geyser-plugin-interface",
"bs58",
@@ -8627,15 +8810,16 @@ dependencies = [
"solana-signature",
"solana-transaction",
"solana-transaction-status",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
"tokio",
]
[[package]]
name = "solana-gossip"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"agave-feature-set",
+ "agave-low-pass-filter",
"anyhow",
"arrayvec",
"assert_matches",
@@ -8646,7 +8830,7 @@ dependencies = [
"criterion",
"crossbeam-channel",
"flate2",
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
"itertools 0.12.1",
"log",
"lru",
@@ -8663,10 +8847,12 @@ dependencies = [
"serde_derive",
"serial_test",
"siphasher 1.0.1",
+ "solana-account",
"solana-bloom",
"solana-clap-utils",
"solana-client",
"solana-clock",
+ "solana-cluster-type",
"solana-connection-cache",
"solana-entry",
"solana-epoch-schedule",
@@ -8704,14 +8890,14 @@ dependencies = [
"solana-vote-program",
"static_assertions",
"test-case",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
]
[[package]]
name = "solana-hard-forks"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b6c28371f878e2ead55611d8ba1b5fb879847156d04edea13693700ad1a28baf"
+checksum = "0abacc4b66ce471f135f48f22facf75cbbb0f8a252fbe2c1e0aa59d5b203f519"
dependencies = [
"serde",
"serde_derive",
@@ -8721,29 +8907,27 @@ dependencies = [
[[package]]
name = "solana-hash"
-version = "2.3.0"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b5b96e9f0300fa287b545613f007dfe20043d7812bee255f418c1eb649c93b63"
+checksum = "8a063723b9e84c14d8c0d2cdf0268207dc7adecf546e31251f9e07c7b00b566c"
dependencies = [
- "borsh 1.5.7",
+ "borsh",
"bytemuck",
"bytemuck_derive",
"five8",
- "js-sys",
"serde",
"serde_derive",
"solana-atomic-u64",
"solana-frozen-abi",
"solana-frozen-abi-macro",
"solana-sanitize",
- "wasm-bindgen",
]
[[package]]
name = "solana-inflation"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "23eef6a09eb8e568ce6839573e4966850e85e9ce71e6ae1a6c930c1c43947de3"
+checksum = "e92f37a14e7c660628752833250dd3dcd8e95309876aee751d7f8769a27947c6"
dependencies = [
"serde",
"serde_derive",
@@ -8753,33 +8937,45 @@ dependencies = [
[[package]]
name = "solana-instruction"
-version = "2.3.0"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "47298e2ce82876b64f71e9d13a46bc4b9056194e7f9937ad3084385befa50885"
+checksum = "8df4e8fcba01d7efa647ed20a081c234475df5e11a93acb4393cc2c9a7b99bab"
dependencies = [
"bincode",
- "borsh 1.5.7",
- "getrandom 0.2.15",
- "js-sys",
- "num-traits",
+ "borsh",
"serde",
"serde_derive",
- "solana-define-syscall",
+ "solana-define-syscall 3.0.0",
"solana-frozen-abi",
"solana-frozen-abi-macro",
+ "solana-instruction-error",
"solana-pubkey",
- "wasm-bindgen",
+]
+
+[[package]]
+name = "solana-instruction-error"
+version = "2.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b1f0d483b8ae387178d9210e0575b666b05cdd4bd0f2f188128249f6e454d39d"
+dependencies = [
+ "num-traits",
+ "serde",
+ "serde_derive",
+ "solana-frozen-abi",
+ "solana-frozen-abi-macro",
+ "solana-program-error",
]
[[package]]
name = "solana-instructions-sysvar"
-version = "2.2.2"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e0e85a6fad5c2d0c4f5b91d34b8ca47118fc593af706e523cdbedf846a954f57"
+checksum = "7ddf67876c541aa1e21ee1acae35c95c6fbc61119814bfef70579317a5e26955"
dependencies = [
- "bitflags 2.9.1",
+ "bitflags 2.9.4",
"solana-account-info",
"solana-instruction",
+ "solana-instruction-error",
"solana-program-error",
"solana-pubkey",
"solana-sanitize",
@@ -8790,19 +8986,18 @@ dependencies = [
[[package]]
name = "solana-keccak-hasher"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c7aeb957fbd42a451b99235df4942d96db7ef678e8d5061ef34c9b34cae12f79"
+checksum = "57eebd3012946913c8c1b8b43cdf8a6249edb09c0b6be3604ae910332a3acd97"
dependencies = [
"sha3",
- "solana-define-syscall",
+ "solana-define-syscall 3.0.0",
"solana-hash",
- "solana-sanitize",
]
[[package]]
name = "solana-keygen"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"bs58",
"clap 3.2.23",
@@ -8826,28 +9021,27 @@ dependencies = [
[[package]]
name = "solana-keypair"
-version = "2.2.1"
+version = "3.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3dbb7042c2e0c561afa07242b2099d55c57bd1b1da3b6476932197d84e15e3e4"
+checksum = "952ed9074c12edd2060cb09c2a8c664303f4ab7f7056a407ac37dd1da7bdaa3e"
dependencies = [
- "bs58",
- "ed25519-dalek",
+ "ed25519-dalek 2.2.0",
"ed25519-dalek-bip32",
- "rand 0.7.3",
+ "five8",
+ "rand 0.8.5",
"solana-derivation-path",
"solana-pubkey",
"solana-seed-derivable",
"solana-seed-phrase",
"solana-signature",
"solana-signer",
- "wasm-bindgen",
]
[[package]]
name = "solana-last-restart-slot"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4a6360ac2fdc72e7463565cd256eedcf10d7ef0c28a1249d261ec168c1b55cdd"
+checksum = "dcda154ec827f5fc1e4da0af3417951b7e9b8157540f81f936c4a8b1156134d0"
dependencies = [
"serde",
"serde_derive",
@@ -8858,7 +9052,7 @@ dependencies = [
[[package]]
name = "solana-lattice-hash"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"base64 0.22.1",
"blake3",
@@ -8871,18 +9065,20 @@ dependencies = [
[[package]]
name = "solana-ledger"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"agave-feature-set",
"agave-reserved-account-keys",
"anyhow",
"assert_matches",
"bincode",
- "bitflags 2.9.1",
+ "bitflags 2.9.4",
"bs58",
+ "bytes",
"bzip2",
"chrono",
"chrono-humanize",
+ "conditional-mod",
"criterion",
"crossbeam-channel",
"dashmap",
@@ -8931,6 +9127,7 @@ dependencies = [
"solana-metrics",
"solana-native-token",
"solana-net-utils",
+ "solana-nohash-hasher",
"solana-packet",
"solana-perf",
"solana-program-option",
@@ -8951,11 +9148,11 @@ dependencies = [
"solana-storage-proto",
"solana-streamer",
"solana-svm",
+ "solana-svm-timings",
"solana-svm-transaction",
"solana-system-interface",
"solana-system-transaction",
"solana-time-utils",
- "solana-timings",
"solana-transaction",
"solana-transaction-context",
"solana-transaction-error",
@@ -8970,7 +9167,7 @@ dependencies = [
"tar",
"tempfile",
"test-case",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
"tokio",
"tokio-stream",
"trees",
@@ -8978,9 +9175,9 @@ dependencies = [
[[package]]
name = "solana-loader-v2-interface"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d8ab08006dad78ae7cd30df8eea0539e207d08d91eaefb3e1d49a446e1c49654"
+checksum = "1e4a6f0ad4fd9c30679bfee2ce3ea6a449cac38049f210480b751f65676dfe82"
dependencies = [
"serde",
"serde_bytes",
@@ -8992,9 +9189,9 @@ dependencies = [
[[package]]
name = "solana-loader-v3-interface"
-version = "5.0.0"
+version = "6.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6f7162a05b8b0773156b443bccd674ea78bb9aa406325b467ea78c06c99a63a2"
+checksum = "dee44c9b1328c5c712c68966fb8de07b47f3e7bac006e74ddd1bb053d3e46e5d"
dependencies = [
"serde",
"serde_bytes",
@@ -9007,9 +9204,9 @@ dependencies = [
[[package]]
name = "solana-loader-v4-interface"
-version = "2.2.1"
+version = "3.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "706a777242f1f39a83e2a96a2a6cb034cb41169c6ecbee2cf09cb873d9659e7e"
+checksum = "e4c948b33ff81fa89699911b207059e493defdba9647eaf18f23abdf3674e0fb"
dependencies = [
"serde",
"serde_bytes",
@@ -9022,7 +9219,7 @@ dependencies = [
[[package]]
name = "solana-loader-v4-program"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"bincode",
"log",
@@ -9034,21 +9231,21 @@ dependencies = [
"solana-instruction",
"solana-loader-v3-interface",
"solana-loader-v4-interface",
- "solana-log-collector",
- "solana-measure",
"solana-packet",
"solana-program-runtime",
"solana-pubkey",
"solana-sbpf",
"solana-sdk-ids",
+ "solana-svm-log-collector",
+ "solana-svm-measure",
+ "solana-svm-type-overrides",
"solana-sysvar",
"solana-transaction-context",
- "solana-type-overrides",
]
[[package]]
name = "solana-local-cluster"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"assert_matches",
"crossbeam-channel",
@@ -9064,6 +9261,7 @@ dependencies = [
"solana-client",
"solana-client-traits",
"solana-clock",
+ "solana-cluster-type",
"solana-commitment-config",
"solana-core",
"solana-download-utils",
@@ -9081,9 +9279,11 @@ dependencies = [
"solana-native-token",
"solana-net-utils",
"solana-poh-config",
+ "solana-program-binaries",
"solana-pubkey",
"solana-pubsub-client",
"solana-quic-client",
+ "solana-rent",
"solana-rpc-client",
"solana-rpc-client-api",
"solana-runtime",
@@ -9111,33 +9311,13 @@ dependencies = [
"trees",
]
-[[package]]
-name = "solana-log-analyzer"
-version = "3.0.0"
-dependencies = [
- "byte-unit",
- "clap 3.2.23",
- "serde",
- "serde_derive",
- "serde_json",
- "solana-logger",
- "solana-version",
-]
-
-[[package]]
-name = "solana-log-collector"
-version = "3.0.0"
-dependencies = [
- "log",
-]
-
[[package]]
name = "solana-logger"
-version = "2.3.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "db8e777ec1afd733939b532a42492d888ec7c88d8b4127a5d867eb45c6eb5cd5"
+checksum = "ef7421d1092680d72065edbf5c7605856719b021bf5f173656c71febcdd5d003"
dependencies = [
- "env_logger 0.9.3",
+ "env_logger",
"lazy_static",
"libc",
"log",
@@ -9146,15 +9326,15 @@ dependencies = [
[[package]]
name = "solana-measure"
-version = "3.0.0"
+version = "3.1.0"
[[package]]
name = "solana-memory-management"
-version = "3.0.0"
+version = "3.1.0"
[[package]]
name = "solana-merkle-tree"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"fast-math",
"hex",
@@ -9164,79 +9344,66 @@ dependencies = [
[[package]]
name = "solana-message"
-version = "2.4.0"
+version = "3.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1796aabce376ff74bf89b78d268fa5e683d7d7a96a0a4e4813ec34de49d5314b"
+checksum = "85666605c9fd727f865ed381665db0a8fc29f984a030ecc1e40f43bfb2541623"
dependencies = [
"bincode",
"blake3",
"lazy_static",
"serde",
"serde_derive",
- "solana-bincode",
+ "solana-address",
"solana-hash",
"solana-instruction",
- "solana-pubkey",
"solana-sanitize",
"solana-sdk-ids",
"solana-short-vec",
- "solana-system-interface",
"solana-transaction-error",
- "wasm-bindgen",
]
[[package]]
name = "solana-metrics"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"bencher",
"crossbeam-channel",
- "env_logger 0.11.8",
+ "env_logger",
"gethostname",
"log",
"rand 0.8.5",
- "reqwest 0.12.22",
+ "reqwest 0.12.23",
"serial_test",
"solana-cluster-type",
"solana-sha256-hasher",
"solana-time-utils",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
]
[[package]]
name = "solana-msg"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f36a1a14399afaabc2781a1db09cb14ee4cc4ee5c7a5a3cfcc601811379a8092"
+checksum = "264275c556ea7e22b9d3f87d56305546a38d4eee8ec884f3b126236cb7dcbbb4"
dependencies = [
- "solana-define-syscall",
+ "solana-define-syscall 3.0.0",
]
[[package]]
name = "solana-native-token"
-version = "2.2.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "307fb2f78060995979e9b4f68f833623565ed4e55d3725f100454ce78a99a1a3"
-
-[[package]]
-name = "solana-net-shaper"
version = "3.0.0"
-dependencies = [
- "clap 3.2.23",
- "rand 0.8.5",
- "serde",
- "serde_derive",
- "serde_json",
- "solana-logger",
-]
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ae8dd4c280dca9d046139eb5b7a5ac9ad10403fbd64964c7d7571214950d758f"
[[package]]
name = "solana-net-utils"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"anyhow",
"bincode",
"bytes",
+ "cfg-if 1.0.3",
+ "dashmap",
"hxdmp",
"itertools 0.12.1",
"log",
@@ -9245,11 +9412,13 @@ dependencies = [
"rand 0.8.5",
"serde",
"serde_derive",
+ "shuttle",
"socket2 0.6.0",
"solana-logger",
"solana-serde",
+ "solana-svm-type-overrides",
"tokio",
- "url 2.5.4",
+ "url 2.5.7",
]
[[package]]
@@ -9260,9 +9429,9 @@ checksum = "8b8a731ed60e89177c8a7ab05fe0f1511cedd3e70e773f288f9de33a9cfdc21e"
[[package]]
name = "solana-nonce"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "703e22eb185537e06204a5bd9d509b948f0066f2d1d814a6f475dafb3ddf1325"
+checksum = "abbdc6c8caf1c08db9f36a50967539d0f72b9f1d4aea04fec5430f532e5afadc"
dependencies = [
"serde",
"serde_derive",
@@ -9274,9 +9443,9 @@ dependencies = [
[[package]]
name = "solana-nonce-account"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cde971a20b8dbf60144d6a84439dda86b5466e00e2843091fe731083cda614da"
+checksum = "805fd25b29e5a1a0e6c3dd6320c9da80f275fbe4ff6e392617c303a2085c435e"
dependencies = [
"solana-account",
"solana-hash",
@@ -9286,19 +9455,19 @@ dependencies = [
[[package]]
name = "solana-notifier"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"log",
- "reqwest 0.12.22",
+ "reqwest 0.12.23",
"serde_json",
"solana-hash",
]
[[package]]
name = "solana-offchain-message"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b526398ade5dea37f1f147ce55dae49aa017a5d7326606359b0445ca8d946581"
+checksum = "f6e2a1141a673f72a05cf406b99e4b2b8a457792b7c01afa07b3f00d4e2de393"
dependencies = [
"num_enum",
"solana-hash",
@@ -9312,12 +9481,12 @@ dependencies = [
[[package]]
name = "solana-packet"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "004f2d2daf407b3ec1a1ca5ec34b3ccdfd6866dd2d3c7d0715004a96e4b6d127"
+checksum = "6edf2f25743c95229ac0fdc32f8f5893ef738dbf332c669e9861d33ddb0f469d"
dependencies = [
"bincode",
- "bitflags 2.9.1",
+ "bitflags 2.9.4",
"cfg_eval",
"serde",
"serde_derive",
@@ -9328,7 +9497,7 @@ dependencies = [
[[package]]
name = "solana-perf"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"ahash 0.8.11",
"assert_matches",
@@ -9375,7 +9544,7 @@ dependencies = [
[[package]]
name = "solana-poh"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"arc-swap",
"assert_matches",
@@ -9404,13 +9573,13 @@ dependencies = [
"solana-system-transaction",
"solana-time-utils",
"solana-transaction",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
"tikv-jemallocator",
]
[[package]]
name = "solana-poh-bench"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"clap 3.2.23",
"log",
@@ -9426,9 +9595,9 @@ dependencies = [
[[package]]
name = "solana-poh-config"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d650c3b4b9060082ac6b0efbbb66865089c58405bfb45de449f3f2b91eccee75"
+checksum = "2f1fef1f2ff2480fdbcc64bef5e3c47bec6e1647270db88b43f23e3a55f8d9cf"
dependencies = [
"serde",
"serde_derive",
@@ -9436,46 +9605,28 @@ dependencies = [
[[package]]
name = "solana-poseidon"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"ark-bn254",
"light-poseidon",
- "solana-define-syscall",
- "thiserror 2.0.12",
+ "solana-define-syscall 3.0.0",
+ "thiserror 2.0.16",
]
[[package]]
name = "solana-precompile-error"
-version = "2.2.2"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4d87b2c1f5de77dfe2b175ee8dd318d196aaca4d0f66f02842f80c852811f9f8"
+checksum = "cafcd950de74c6c39d55dc8ca108bbb007799842ab370ef26cf45a34453c31e1"
dependencies = [
"num-traits",
- "solana-decode-error",
]
[[package]]
-name = "solana-precompiles"
-version = "2.2.1"
+name = "solana-presigner"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6a460ab805ec063802105b463ecb5eb02c3ffe469e67a967eea8a6e778e0bc06"
-dependencies = [
- "lazy_static",
- "solana-ed25519-program",
- "solana-feature-set",
- "solana-message",
- "solana-precompile-error",
- "solana-pubkey",
- "solana-sdk-ids",
- "solana-secp256k1-program",
- "solana-secp256r1-program",
-]
-
-[[package]]
-name = "solana-presigner"
-version = "2.2.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "81a57a24e6a4125fc69510b6774cd93402b943191b6cddad05de7281491c90fe"
+checksum = "0f704eaf825be3180832445b9e4983b875340696e8e7239bf2d535b0f86c14a2"
dependencies = [
"solana-pubkey",
"solana-signature",
@@ -9484,57 +9635,30 @@ dependencies = [
[[package]]
name = "solana-program"
-version = "2.3.0"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "98eca145bd3545e2fbb07166e895370576e47a00a7d824e325390d33bf467210"
+checksum = "91b12305dd81045d705f427acd0435a2e46444b65367d7179d7bdcfc3bc5f5eb"
dependencies = [
- "bincode",
- "blake3",
- "borsh 0.10.3",
- "borsh 1.5.7",
- "bs58",
- "bytemuck",
- "console_error_panic_hook",
- "console_log",
- "getrandom 0.2.15",
- "lazy_static",
- "log",
"memoffset 0.9.1",
- "num-bigint 0.4.6",
- "num-derive",
- "num-traits",
- "rand 0.8.5",
- "serde",
- "serde_bytes",
- "serde_derive",
"solana-account-info",
- "solana-address-lookup-table-interface",
- "solana-atomic-u64",
"solana-big-mod-exp",
- "solana-bincode",
"solana-blake3-hasher",
- "solana-borsh",
"solana-clock",
"solana-cpi",
- "solana-decode-error",
- "solana-define-syscall",
+ "solana-define-syscall 3.0.0",
"solana-epoch-rewards",
"solana-epoch-schedule",
+ "solana-epoch-stake",
"solana-example-mocks",
- "solana-feature-gate-interface",
"solana-fee-calculator",
"solana-hash",
"solana-instruction",
+ "solana-instruction-error",
"solana-instructions-sysvar",
"solana-keccak-hasher",
"solana-last-restart-slot",
- "solana-loader-v2-interface",
- "solana-loader-v3-interface",
- "solana-loader-v4-interface",
- "solana-message",
"solana-msg",
"solana-native-token",
- "solana-nonce",
"solana-program-entrypoint",
"solana-program-error",
"solana-program-memory",
@@ -9542,9 +9666,7 @@ dependencies = [
"solana-program-pack",
"solana-pubkey",
"solana-rent",
- "solana-sanitize",
"solana-sdk-ids",
- "solana-sdk-macro",
"solana-secp256k1-recover",
"solana-serde-varint",
"solana-serialize-utils",
@@ -9553,22 +9675,32 @@ dependencies = [
"solana-slot-hashes",
"solana-slot-history",
"solana-stable-layout",
- "solana-stake-interface",
- "solana-system-interface",
"solana-sysvar",
"solana-sysvar-id",
- "solana-vote-interface",
- "thiserror 2.0.12",
- "wasm-bindgen",
+]
+
+[[package]]
+name = "solana-program-binaries"
+version = "3.1.0"
+dependencies = [
+ "bincode",
+ "serde",
+ "solana-account",
+ "solana-loader-v3-interface",
+ "solana-pubkey",
+ "solana-rent",
+ "solana-sdk-ids",
+ "spl-generic-token",
]
[[package]]
name = "solana-program-entrypoint"
-version = "2.3.0"
+version = "3.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "32ce041b1a0ed275290a5008ee1a4a6c48f5054c8a3d78d313c08958a06aedbd"
+checksum = "6557cf5b5e91745d1667447438a1baa7823c6086e4ece67f8e6ebfa7a8f72660"
dependencies = [
"solana-account-info",
+ "solana-define-syscall 3.0.0",
"solana-msg",
"solana-program-error",
"solana-pubkey",
@@ -9576,52 +9708,46 @@ dependencies = [
[[package]]
name = "solana-program-error"
-version = "2.2.2"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9ee2e0217d642e2ea4bee237f37bd61bb02aec60da3647c48ff88f6556ade775"
+checksum = "a1af32c995a7b692a915bb7414d5f8e838450cf7c70414e763d8abcae7b51f28"
dependencies = [
- "borsh 1.5.7",
- "num-traits",
+ "borsh",
"serde",
"serde_derive",
- "solana-decode-error",
- "solana-instruction",
- "solana-msg",
- "solana-pubkey",
]
[[package]]
name = "solana-program-memory"
-version = "2.3.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3a5426090c6f3fd6cfdc10685322fede9ca8e5af43cd6a59e98bfe4e91671712"
+checksum = "10e5660c60749c7bfb30b447542529758e4dbcecd31b1e8af1fdc92e2bdde90a"
dependencies = [
- "solana-define-syscall",
+ "solana-define-syscall 3.0.0",
]
[[package]]
name = "solana-program-option"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dc677a2e9bc616eda6dbdab834d463372b92848b2bfe4a1ed4e4b4adba3397d0"
+checksum = "8e7b4ddb464f274deb4a497712664c3b612e3f5f82471d4e47710fc4ab1c3095"
[[package]]
name = "solana-program-pack"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "319f0ef15e6e12dc37c597faccb7d62525a509fec5f6975ecb9419efddeb277b"
+checksum = "c169359de21f6034a63ebf96d6b380980307df17a8d371344ff04a883ec4e9d0"
dependencies = [
"solana-program-error",
]
[[package]]
name = "solana-program-runtime"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"assert_matches",
"base64 0.22.1",
"bincode",
- "enum-iterator",
"itertools 0.12.1",
"log",
"percentage",
@@ -9637,34 +9763,39 @@ dependencies = [
"solana-frozen-abi-macro",
"solana-hash",
"solana-instruction",
+ "solana-instruction-error",
+ "solana-keypair",
"solana-last-restart-slot",
- "solana-log-collector",
- "solana-measure",
- "solana-metrics",
+ "solana-loader-v3-interface",
"solana-program-entrypoint",
"solana-program-runtime",
"solana-pubkey",
"solana-rent",
"solana-sbpf",
"solana-sdk-ids",
+ "solana-signer",
"solana-slot-hashes",
"solana-stable-layout",
+ "solana-stake-interface",
"solana-svm-callback",
"solana-svm-feature-set",
+ "solana-svm-log-collector",
+ "solana-svm-measure",
+ "solana-svm-timings",
"solana-svm-transaction",
+ "solana-svm-type-overrides",
"solana-system-interface",
"solana-sysvar",
"solana-sysvar-id",
- "solana-timings",
+ "solana-transaction",
"solana-transaction-context",
- "solana-type-overrides",
"test-case",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
]
[[package]]
name = "solana-program-test"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"agave-feature-set",
"assert_matches",
@@ -9682,6 +9813,7 @@ dependencies = [
"solana-banks-interface",
"solana-banks-server",
"solana-clock",
+ "solana-cluster-type",
"solana-commitment-config",
"solana-compute-budget",
"solana-cpi",
@@ -9693,13 +9825,13 @@ dependencies = [
"solana-instruction",
"solana-keypair",
"solana-loader-v3-interface",
- "solana-log-collector",
"solana-logger",
"solana-message",
"solana-msg",
"solana-native-token",
"solana-poh-config",
"solana-program",
+ "solana-program-binaries",
"solana-program-entrypoint",
"solana-program-error",
"solana-program-runtime",
@@ -9713,60 +9845,41 @@ dependencies = [
"solana-stake-interface",
"solana-stake-program",
"solana-svm",
+ "solana-svm-log-collector",
+ "solana-svm-timings",
"solana-system-interface",
"solana-sysvar",
"solana-sysvar-id",
- "solana-timings",
"solana-transaction",
"solana-transaction-context",
"solana-transaction-error",
"solana-vote-program",
"spl-generic-token",
"test-case",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
"tokio",
]
[[package]]
name = "solana-pubkey"
-version = "2.4.0"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9b62adb9c3261a052ca1f999398c388f1daf558a1b492f60a6d9e64857db4ff1"
+checksum = "8909d399deb0851aa524420beeb5646b115fd253ef446e35fe4504c904da3941"
dependencies = [
- "arbitrary",
- "borsh 0.10.3",
- "borsh 1.5.7",
- "bytemuck",
- "bytemuck_derive",
- "curve25519-dalek 4.1.3",
- "five8",
- "five8_const",
- "getrandom 0.2.15",
- "js-sys",
- "num-traits",
"rand 0.8.5",
- "serde",
- "serde_derive",
- "solana-atomic-u64",
- "solana-decode-error",
- "solana-define-syscall",
- "solana-frozen-abi",
- "solana-frozen-abi-macro",
- "solana-sanitize",
- "solana-sha256-hasher",
- "wasm-bindgen",
+ "solana-address",
]
[[package]]
name = "solana-pubsub-client"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"anyhow",
"crossbeam-channel",
"futures-util",
"http 0.2.12",
"log",
- "semver 1.0.26",
+ "semver 1.0.27",
"serde",
"serde_derive",
"serde_json",
@@ -9776,17 +9889,17 @@ dependencies = [
"solana-pubkey",
"solana-rpc-client-types",
"solana-signature",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
"tokio",
"tokio-stream",
"tokio-tungstenite",
"tungstenite",
- "url 2.5.4",
+ "url 2.5.7",
]
[[package]]
name = "solana-quic-client"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"async-lock",
"async-trait",
@@ -9796,7 +9909,7 @@ dependencies = [
"log",
"quinn",
"quinn-proto",
- "rustls 0.23.31",
+ "rustls 0.23.32",
"solana-connection-cache",
"solana-keypair",
"solana-logger",
@@ -9812,22 +9925,23 @@ dependencies = [
"solana-streamer",
"solana-tls-utils",
"solana-transaction-error",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
"tokio",
+ "tokio-util 0.7.16",
]
[[package]]
name = "solana-quic-definitions"
-version = "2.3.0"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7011ee2af2baad991762b6d63ea94b08d06f7928effb76ce273b232c9902c205"
+checksum = "15319accf7d3afd845817aeffa6edd8cc185f135cefbc6b985df29cfd8c09609"
dependencies = [
"solana-keypair",
]
[[package]]
name = "solana-rayon-threadlimit"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"log",
"num_cpus",
@@ -9835,10 +9949,10 @@ dependencies = [
[[package]]
name = "solana-remote-wallet"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"assert_matches",
- "console 0.16.0",
+ "console 0.16.1",
"dialoguer",
"hidapi",
"log",
@@ -9846,21 +9960,21 @@ dependencies = [
"num-traits",
"parking_lot 0.12.3",
"qstring",
- "semver 1.0.26",
+ "semver 1.0.27",
"solana-derivation-path",
"solana-offchain-message",
"solana-pubkey",
"solana-signature",
"solana-signer",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
"uriparse",
]
[[package]]
name = "solana-rent"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d1aea8fdea9de98ca6e8c2da5827707fb3842833521b528a713810ca685d2480"
+checksum = "b702d8c43711e3c8a9284a4f1bbc6a3de2553deb25b0c8142f9a44ef0ce5ddc1"
dependencies = [
"serde",
"serde_derive",
@@ -9871,30 +9985,11 @@ dependencies = [
"solana-sysvar-id",
]
-[[package]]
-name = "solana-rent-collector"
-version = "2.2.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7c1e19f5d5108b0d824244425e43bc78bbb9476e2199e979b0230c9f632d3bf4"
-dependencies = [
- "serde",
- "serde_derive",
- "solana-account",
- "solana-clock",
- "solana-epoch-schedule",
- "solana-frozen-abi",
- "solana-frozen-abi-macro",
- "solana-genesis-config",
- "solana-pubkey",
- "solana-rent",
- "solana-sdk-ids",
-]
-
[[package]]
name = "solana-reward-info"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "18205b69139b1ae0ab8f6e11cdcb627328c0814422ad2482000fa2ca54ae4a2f"
+checksum = "82be7946105c2ee6be9f9ee7bd18a068b558389221d29efa92b906476102bfcc"
dependencies = [
"serde",
"serde_derive",
@@ -9902,7 +9997,7 @@ dependencies = [
[[package]]
name = "solana-rpc"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"agave-feature-set",
"agave-reserved-account-keys",
@@ -9930,8 +10025,10 @@ dependencies = [
"solana-account-decoder",
"solana-accounts-db",
"solana-address-lookup-table-interface",
+ "solana-cli-output",
"solana-client",
"solana-clock",
+ "solana-cluster-type",
"solana-commitment-config",
"solana-compute-budget-interface",
"solana-entry",
@@ -9947,7 +10044,6 @@ dependencies = [
"solana-instruction",
"solana-keypair",
"solana-ledger",
- "solana-log-collector",
"solana-measure",
"solana-message",
"solana-metrics",
@@ -9980,6 +10076,7 @@ dependencies = [
"solana-storage-bigtable",
"solana-streamer",
"solana-svm",
+ "solana-svm-log-collector",
"solana-system-interface",
"solana-system-transaction",
"solana-sysvar",
@@ -10001,14 +10098,14 @@ dependencies = [
"stream-cancel",
"symlink",
"test-case",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
"tokio",
"tokio-util 0.7.16",
]
[[package]]
name = "solana-rpc-client"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"assert_matches",
"async-trait",
@@ -10021,9 +10118,9 @@ dependencies = [
"jsonrpc-core",
"jsonrpc-http-server",
"log",
- "reqwest 0.12.22",
+ "reqwest 0.12.23",
"reqwest-middleware",
- "semver 1.0.26",
+ "semver 1.0.27",
"serde",
"serde_derive",
"serde_json",
@@ -10050,16 +10147,17 @@ dependencies = [
"solana-version",
"solana-vote-interface",
"static_assertions",
+ "test-case",
"tokio",
]
[[package]]
name = "solana-rpc-client-api"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"anyhow",
"jsonrpc-core",
- "reqwest 0.12.22",
+ "reqwest 0.12.23",
"reqwest-middleware",
"serde",
"serde_derive",
@@ -10071,12 +10169,12 @@ dependencies = [
"solana-transaction-error",
"solana-transaction-status-client-types",
"test-case",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
]
[[package]]
name = "solana-rpc-client-nonce-utils"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"anyhow",
"clap 2.33.3",
@@ -10099,23 +10197,24 @@ dependencies = [
"solana-signer",
"solana-system-interface",
"solana-transaction",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
"tokio",
]
[[package]]
name = "solana-rpc-client-types"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"base64 0.22.1",
"bs58",
"const_format",
- "semver 1.0.26",
+ "semver 1.0.27",
"serde",
"serde_derive",
"serde_json",
"solana-account",
"solana-account-decoder-client-types",
+ "solana-address",
"solana-clock",
"solana-commitment-config",
"solana-fee-calculator",
@@ -10125,19 +10224,19 @@ dependencies = [
"solana-transaction-status-client-types",
"solana-version",
"spl-generic-token",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
]
[[package]]
name = "solana-rpc-test"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"bincode",
"bs58",
"crossbeam-channel",
"futures-util",
"log",
- "reqwest 0.12.22",
+ "reqwest 0.12.23",
"serde",
"serde_json",
"solana-account-decoder",
@@ -10168,7 +10267,7 @@ dependencies = [
[[package]]
name = "solana-runtime"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"agave-feature-set",
"agave-precompiles",
@@ -10188,7 +10287,7 @@ dependencies = [
"crossbeam-channel",
"dashmap",
"dir-diff",
- "ed25519-dalek",
+ "ed25519-dalek 1.0.1",
"fnv",
"im",
"itertools 0.12.1",
@@ -10196,7 +10295,7 @@ dependencies = [
"libsecp256k1",
"log",
"lz4",
- "memmap2 0.9.7",
+ "memmap2 0.9.8",
"memoffset 0.9.1",
"mockall",
"modular-bitfield",
@@ -10211,6 +10310,7 @@ dependencies = [
"rand_chacha 0.3.1",
"rayon",
"regex",
+ "semver 1.0.27",
"serde",
"serde_derive",
"serde_json",
@@ -10219,11 +10319,13 @@ dependencies = [
"solana-account-info",
"solana-accounts-db",
"solana-address-lookup-table-interface",
+ "solana-bls-signatures",
"solana-bpf-loader-program",
"solana-bucket-map",
"solana-builtins",
"solana-client-traits",
"solana-clock",
+ "solana-cluster-type",
"solana-commitment-config",
"solana-compute-budget",
"solana-compute-budget-instruction",
@@ -10245,6 +10347,7 @@ dependencies = [
"solana-hash",
"solana-inflation",
"solana-instruction",
+ "solana-instruction-error",
"solana-keypair",
"solana-lattice-hash",
"solana-loader-v3-interface",
@@ -10261,11 +10364,11 @@ dependencies = [
"solana-perf",
"solana-poh-config",
"solana-precompile-error",
+ "solana-program-binaries",
"solana-program-runtime",
"solana-pubkey",
"solana-rayon-threadlimit",
"solana-rent",
- "solana-rent-collector",
"solana-reward-info",
"solana-runtime",
"solana-runtime-transaction",
@@ -10282,6 +10385,7 @@ dependencies = [
"solana-stake-program",
"solana-svm",
"solana-svm-callback",
+ "solana-svm-timings",
"solana-svm-transaction",
"solana-system-interface",
"solana-system-program",
@@ -10289,7 +10393,6 @@ dependencies = [
"solana-sysvar",
"solana-sysvar-id",
"solana-time-utils",
- "solana-timings",
"solana-transaction",
"solana-transaction-context",
"solana-transaction-error",
@@ -10307,13 +10410,13 @@ dependencies = [
"tar",
"tempfile",
"test-case",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
"zstd",
]
[[package]]
name = "solana-runtime-transaction"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"agave-feature-set",
"agave-reserved-account-keys",
@@ -10339,20 +10442,20 @@ dependencies = [
"solana-transaction",
"solana-transaction-error",
"solana-vote-interface",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
]
[[package]]
name = "solana-sanitize"
-version = "2.2.1"
+version = "3.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "61f1bc1357b8188d9c4a3af3fc55276e56987265eb7ad073ae6f8180ee54cecf"
+checksum = "dcf09694a0fc14e5ffb18f9b7b7c0f15ecb6eac5b5610bf76a1853459d19daf9"
[[package]]
name = "solana-sbpf"
-version = "0.12.0"
+version = "0.12.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3c7a3d3cff34df928b804917bf111d3ede779af406703580cd7ed8fb239f5acf"
+checksum = "0f224d906c14efc7ed7f42bc5fe9588f3f09db8cabe7f6023adda62a69678e1a"
dependencies = [
"byteorder",
"combine 3.8.1",
@@ -10363,89 +10466,85 @@ dependencies = [
"rand 0.8.5",
"rustc-demangle",
"shuttle",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
"winapi 0.3.9",
]
[[package]]
name = "solana-sdk-ids"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5c5d8b9cc68d5c88b062a33e23a6466722467dde0035152d8fb1afbcdf350a5f"
+checksum = "b1b6d6aaf60669c592838d382266b173881c65fb1cdec83b37cb8ce7cb89f9ad"
dependencies = [
"solana-pubkey",
]
[[package]]
name = "solana-sdk-macro"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "86280da8b99d03560f6ab5aca9de2e38805681df34e0bb8f238e69b29433b9df"
+checksum = "d6430000e97083460b71d9fbadc52a2ab2f88f53b3a4c5e58c5ae3640a0e8c00"
dependencies = [
"bs58",
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
name = "solana-secp256k1-program"
-version = "2.2.3"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f19833e4bc21558fe9ec61f239553abe7d05224347b57d65c2218aeeb82d6149"
+checksum = "8efa767b0188f577edae7080e8bf080e5db9458e2b6ee5beaa73e2e6bb54e99d"
dependencies = [
"bincode",
"digest 0.10.7",
- "libsecp256k1",
+ "k256",
"serde",
"serde_derive",
"sha3",
- "solana-feature-set",
"solana-instruction",
- "solana-precompile-error",
"solana-sdk-ids",
"solana-signature",
]
[[package]]
name = "solana-secp256k1-recover"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "baa3120b6cdaa270f39444f5093a90a7b03d296d362878f7a6991d6de3bbe496"
+checksum = "394a4470477d66296af5217970a905b1c5569032a7732c367fb69e5666c8607e"
dependencies = [
- "libsecp256k1",
- "solana-define-syscall",
- "thiserror 2.0.12",
+ "k256",
+ "solana-define-syscall 3.0.0",
+ "thiserror 2.0.16",
]
[[package]]
name = "solana-secp256r1-program"
-version = "2.2.4"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ce0ae46da3071a900f02d367d99b2f3058fe2e90c5062ac50c4f20cfedad8f0f"
+checksum = "445d8e12592631d76fc4dc57858bae66c9fd7cc838c306c62a472547fc9d0ce6"
dependencies = [
"bytemuck",
"openssl",
- "solana-feature-set",
"solana-instruction",
- "solana-precompile-error",
"solana-sdk-ids",
]
[[package]]
name = "solana-seed-derivable"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3beb82b5adb266c6ea90e5cf3967235644848eac476c5a1f2f9283a143b7c97f"
+checksum = "ff7bdb72758e3bec33ed0e2658a920f1f35dfb9ed576b951d20d63cb61ecd95c"
dependencies = [
"solana-derivation-path",
]
[[package]]
name = "solana-seed-phrase"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "36187af2324f079f65a675ec22b31c24919cb4ac22c79472e85d819db9bbbc15"
+checksum = "dc905b200a95f2ea9146e43f2a7181e3aeb55de6bc12afb36462d00a3c7310de"
dependencies = [
"hmac 0.12.1",
"pbkdf2 0.11.0",
@@ -10454,7 +10553,7 @@ dependencies = [
[[package]]
name = "solana-send-transaction-service"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"async-trait",
"crossbeam-channel",
@@ -10489,49 +10588,49 @@ dependencies = [
[[package]]
name = "solana-serde"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1931484a408af466e14171556a47adaa215953c7f48b24e5f6b0282763818b04"
+checksum = "709a93cab694c70f40b279d497639788fc2ccbcf9b4aa32273d4b361322c02dd"
dependencies = [
"serde",
]
[[package]]
name = "solana-serde-varint"
-version = "2.2.2"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2a7e155eba458ecfb0107b98236088c3764a09ddf0201ec29e52a0be40857113"
+checksum = "3e5174c57d5ff3c1995f274d17156964664566e2cde18a07bba1586d35a70d3b"
dependencies = [
"serde",
]
[[package]]
name = "solana-serialize-utils"
-version = "2.2.1"
+version = "3.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "817a284b63197d2b27afdba829c5ab34231da4a9b4e763466a003c40ca4f535e"
+checksum = "56e41dd8feea239516c623a02f0a81c2367f4b604d7965237fed0751aeec33ed"
dependencies = [
- "solana-instruction",
+ "solana-instruction-error",
"solana-pubkey",
"solana-sanitize",
]
[[package]]
name = "solana-sha256-hasher"
-version = "2.3.0"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5aa3feb32c28765f6aa1ce8f3feac30936f16c5c3f7eb73d63a5b8f6f8ecdc44"
+checksum = "a9b912ba6f71cb202c0c3773ec77bf898fa9fe0c78691a2d6859b3b5b8954719"
dependencies = [
"sha2 0.10.9",
- "solana-define-syscall",
+ "solana-define-syscall 3.0.0",
"solana-hash",
]
[[package]]
name = "solana-short-vec"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5c54c66f19b9766a56fa0057d060de8378676cb64987533fa088861858fc5a69"
+checksum = "b69d029da5428fc1c57f7d49101b2077c61f049d4112cd5fb8456567cc7d2638"
dependencies = [
"serde",
"solana-frozen-abi",
@@ -10540,9 +10639,9 @@ dependencies = [
[[package]]
name = "solana-shred-version"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "afd3db0461089d1ad1a78d9ba3f15b563899ca2386351d38428faa5350c60a98"
+checksum = "94953e22ca28fe4541a3447d6baeaf519cc4ddc063253bfa673b721f34c136bb"
dependencies = [
"solana-hard-forks",
"solana-hash",
@@ -10551,11 +10650,11 @@ dependencies = [
[[package]]
name = "solana-signature"
-version = "2.3.0"
+version = "3.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "64c8ec8e657aecfc187522fc67495142c12f35e55ddeca8698edbb738b8dbd8c"
+checksum = "4bb8057cc0e9f7b5e89883d49de6f407df655bb6f3a71d0b7baf9986a2218fd9"
dependencies = [
- "ed25519-dalek",
+ "ed25519-dalek 2.2.0",
"five8",
"rand 0.8.5",
"serde",
@@ -10568,20 +10667,31 @@ dependencies = [
[[package]]
name = "solana-signer"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7c41991508a4b02f021c1342ba00bcfa098630b213726ceadc7cb032e051975b"
+checksum = "5bfea97951fee8bae0d6038f39a5efcb6230ecdfe33425ac75196d1a1e3e3235"
dependencies = [
"solana-pubkey",
"solana-signature",
"solana-transaction-error",
]
+[[package]]
+name = "solana-signer-store"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "36329bba208f0e41954389ae4ad5d973fe15952672cfd71a9b49deb7d2ecbc2f"
+dependencies = [
+ "bitvec",
+ "num-derive",
+ "num-traits",
+]
+
[[package]]
name = "solana-slot-hashes"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0c8691982114513763e88d04094c9caa0376b867a29577939011331134c301ce"
+checksum = "80a293f952293281443c04f4d96afd9d547721923d596e92b4377ed2360f1746"
dependencies = [
"serde",
"serde_derive",
@@ -10592,9 +10702,9 @@ dependencies = [
[[package]]
name = "solana-slot-history"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "97ccc1b2067ca22754d5283afb2b0126d61eae734fc616d23871b0943b0d935e"
+checksum = "f914f6b108f5bba14a280b458d023e3621c9973f27f015a4d755b50e88d89e97"
dependencies = [
"bv",
"serde",
@@ -10605,9 +10715,9 @@ dependencies = [
[[package]]
name = "solana-stable-layout"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9f14f7d02af8f2bc1b5efeeae71bc1c2b7f0f65cd75bcc7d8180f2c762a57f54"
+checksum = "1da74507795b6e8fb60b7c7306c0c36e2c315805d16eaaf479452661234685ac"
dependencies = [
"solana-instruction",
"solana-pubkey",
@@ -10615,12 +10725,13 @@ dependencies = [
[[package]]
name = "solana-stake-accounts"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"clap 2.33.3",
"solana-account",
"solana-clap-utils",
"solana-cli-config",
+ "solana-cli-output",
"solana-client-traits",
"solana-clock",
"solana-commitment-config",
@@ -10630,6 +10741,7 @@ dependencies = [
"solana-keypair",
"solana-message",
"solana-native-token",
+ "solana-program-binaries",
"solana-pubkey",
"solana-remote-wallet",
"solana-rpc-client",
@@ -10639,97 +10751,57 @@ dependencies = [
"solana-signer",
"solana-stake-interface",
"solana-stake-program",
+ "solana-sysvar",
"solana-transaction",
"solana-version",
]
[[package]]
name = "solana-stake-interface"
-version = "1.2.1"
+version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5269e89fde216b4d7e1d1739cf5303f8398a1ff372a81232abbee80e554a838c"
+checksum = "f6f912ae679b683365348dea482dbd9468d22ff258b554fd36e3d3683c2122e3"
dependencies = [
- "borsh 0.10.3",
- "borsh 1.5.7",
+ "borsh",
"num-traits",
"serde",
"serde_derive",
"solana-clock",
"solana-cpi",
- "solana-decode-error",
"solana-frozen-abi",
"solana-frozen-abi-macro",
"solana-instruction",
"solana-program-error",
"solana-pubkey",
"solana-system-interface",
+ "solana-sysvar",
"solana-sysvar-id",
]
[[package]]
name = "solana-stake-program"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"agave-feature-set",
- "assert_matches",
"bincode",
- "criterion",
"log",
- "proptest",
"solana-account",
- "solana-bincode",
"solana-clock",
- "solana-compute-budget",
- "solana-config-program-client",
- "solana-epoch-rewards",
- "solana-epoch-schedule",
+ "solana-config-interface",
"solana-genesis-config",
- "solana-instruction",
- "solana-log-collector",
"solana-native-token",
- "solana-packet",
- "solana-program-runtime",
"solana-pubkey",
"solana-rent",
"solana-sdk-ids",
"solana-stake-interface",
- "solana-svm-callback",
- "solana-svm-feature-set",
"solana-sysvar",
- "solana-sysvar-id",
"solana-transaction-context",
- "solana-type-overrides",
"solana-vote-interface",
- "solana-vote-program",
- "test-case",
-]
-
-[[package]]
-name = "solana-stake-program-tests"
-version = "3.0.0"
-dependencies = [
- "agave-feature-set",
- "assert_matches",
- "bincode",
- "solana-account",
- "solana-instruction",
- "solana-keypair",
- "solana-program-error",
- "solana-program-test",
- "solana-pubkey",
- "solana-signer",
- "solana-stake-interface",
- "solana-system-interface",
- "solana-sysvar",
- "solana-transaction",
- "solana-transaction-error",
- "solana-vote-program",
- "test-case",
]
[[package]]
name = "solana-storage-bigtable"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"agave-reserved-account-keys",
"backoff",
@@ -10765,7 +10837,7 @@ dependencies = [
"solana-transaction-context",
"solana-transaction-error",
"solana-transaction-status",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
"tokio",
"tonic",
"zstd",
@@ -10773,7 +10845,7 @@ dependencies = [
[[package]]
name = "solana-storage-proto"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"bincode",
"bs58",
@@ -10798,19 +10870,20 @@ dependencies = [
[[package]]
name = "solana-streamer"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
+ "anyhow",
"arc-swap",
"assert_matches",
- "async-channel",
"bytes",
+ "clap 4.5.31",
"crossbeam-channel",
"dashmap",
"futures 0.3.31",
"futures-util",
"governor",
"histogram",
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
"itertools 0.12.1",
"libc",
"log",
@@ -10821,7 +10894,7 @@ dependencies = [
"quinn",
"quinn-proto",
"rand 0.8.5",
- "rustls 0.23.31",
+ "rustls 0.23.32",
"smallvec",
"socket2 0.6.0",
"solana-keypair",
@@ -10840,7 +10913,7 @@ dependencies = [
"solana-tls-utils",
"solana-transaction-error",
"solana-transaction-metrics-tracker",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
"tokio",
"tokio-util 0.7.16",
"x509-parser",
@@ -10848,21 +10921,17 @@ dependencies = [
[[package]]
name = "solana-svm"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
- "agave-feature-set",
- "agave-reserved-account-keys",
"agave-syscalls",
"ahash 0.8.11",
"assert_matches",
"bincode",
- "ed25519-dalek",
- "itertools 0.12.1",
+ "ed25519-dalek 1.0.1",
"libsecp256k1",
"log",
"openssl",
"percentage",
- "prost",
"qualifier_attr",
"rand 0.7.3",
"serde",
@@ -10871,7 +10940,7 @@ dependencies = [
"solana-account",
"solana-bpf-loader-program",
"solana-clock",
- "solana-compute-budget-instruction",
+ "solana-compute-budget",
"solana-compute-budget-interface",
"solana-compute-budget-program",
"solana-ed25519-program",
@@ -10887,73 +10956,83 @@ dependencies = [
"solana-loader-v3-interface",
"solana-loader-v4-interface",
"solana-loader-v4-program",
- "solana-log-collector",
"solana-logger",
- "solana-measure",
"solana-message",
"solana-native-token",
"solana-nonce",
"solana-nonce-account",
"solana-precompile-error",
+ "solana-program-binaries",
"solana-program-entrypoint",
"solana-program-pack",
"solana-program-runtime",
"solana-pubkey",
"solana-rent",
- "solana-rent-collector",
"solana-sbpf",
"solana-sdk-ids",
"solana-secp256k1-program",
"solana-secp256r1-program",
"solana-signature",
"solana-signer",
- "solana-slot-hashes",
"solana-svm",
"solana-svm-callback",
- "solana-svm-conformance",
"solana-svm-feature-set",
+ "solana-svm-log-collector",
+ "solana-svm-measure",
+ "solana-svm-timings",
"solana-svm-transaction",
+ "solana-svm-type-overrides",
"solana-system-interface",
"solana-system-program",
"solana-system-transaction",
"solana-sysvar",
"solana-sysvar-id",
- "solana-timings",
"solana-transaction",
"solana-transaction-context",
"solana-transaction-error",
- "solana-type-overrides",
"spl-generic-token",
"spl-token-interface",
"test-case",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
]
[[package]]
name = "solana-svm-callback"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"solana-account",
+ "solana-clock",
"solana-precompile-error",
"solana-pubkey",
]
[[package]]
-name = "solana-svm-conformance"
-version = "3.0.0"
+name = "solana-svm-feature-set"
+version = "3.1.0"
+
+[[package]]
+name = "solana-svm-log-collector"
+version = "3.1.0"
dependencies = [
- "prost",
- "prost-build",
- "prost-types",
+ "log",
]
[[package]]
-name = "solana-svm-feature-set"
-version = "3.0.0"
+name = "solana-svm-measure"
+version = "3.1.0"
+
+[[package]]
+name = "solana-svm-timings"
+version = "3.1.0"
+dependencies = [
+ "eager",
+ "enum-iterator",
+ "solana-pubkey",
+]
[[package]]
name = "solana-svm-transaction"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"solana-hash",
"solana-message",
@@ -10967,25 +11046,33 @@ dependencies = [
"test-case",
]
+[[package]]
+name = "solana-svm-type-overrides"
+version = "3.1.0"
+dependencies = [
+ "futures 0.3.31",
+ "rand 0.8.5",
+ "shuttle",
+]
+
[[package]]
name = "solana-system-interface"
-version = "1.0.0"
+version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "94d7c18cb1a91c6be5f5a8ac9276a1d7c737e39a21beba9ea710ab4b9c63bc90"
+checksum = "4e1790547bfc3061f1ee68ea9d8dc6c973c02a163697b24263a8e9f2e6d4afa2"
dependencies = [
- "js-sys",
"num-traits",
"serde",
"serde_derive",
- "solana-decode-error",
"solana-instruction",
+ "solana-msg",
+ "solana-program-error",
"solana-pubkey",
- "wasm-bindgen",
]
[[package]]
name = "solana-system-program"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"agave-feature-set",
"assert_matches",
@@ -11000,7 +11087,6 @@ dependencies = [
"solana-fee-calculator",
"solana-hash",
"solana-instruction",
- "solana-log-collector",
"solana-nonce",
"solana-nonce-account",
"solana-packet",
@@ -11011,17 +11097,18 @@ dependencies = [
"solana-sha256-hasher",
"solana-svm-callback",
"solana-svm-feature-set",
+ "solana-svm-log-collector",
+ "solana-svm-type-overrides",
"solana-system-interface",
"solana-sysvar",
"solana-transaction-context",
- "solana-type-overrides",
]
[[package]]
name = "solana-system-transaction"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5bd98a25e5bcba8b6be8bcbb7b84b24c2a6a8178d7fb0e3077a916855ceba91a"
+checksum = "a31b5699ec533621515e714f1533ee6b3b0e71c463301d919eb59b8c1e249d30"
dependencies = [
"solana-hash",
"solana-keypair",
@@ -11034,9 +11121,9 @@ dependencies = [
[[package]]
name = "solana-sysvar"
-version = "2.2.2"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d50c92bc019c590f5e42c61939676e18d14809ed00b2a59695dd5c67ae72c097"
+checksum = "63205e68d680bcc315337dec311b616ab32fea0a612db3b883ce4de02e0953f9"
dependencies = [
"base64 0.22.1",
"bincode",
@@ -11047,33 +11134,32 @@ dependencies = [
"serde_derive",
"solana-account-info",
"solana-clock",
- "solana-define-syscall",
+ "solana-define-syscall 3.0.0",
"solana-epoch-rewards",
"solana-epoch-schedule",
"solana-fee-calculator",
+ "solana-frozen-abi",
+ "solana-frozen-abi-macro",
"solana-hash",
"solana-instruction",
- "solana-instructions-sysvar",
"solana-last-restart-slot",
"solana-program-entrypoint",
"solana-program-error",
"solana-program-memory",
"solana-pubkey",
"solana-rent",
- "solana-sanitize",
"solana-sdk-ids",
"solana-sdk-macro",
"solana-slot-hashes",
"solana-slot-history",
- "solana-stake-interface",
"solana-sysvar-id",
]
[[package]]
name = "solana-sysvar-id"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5762b273d3325b047cfda250787f8d796d781746860d5d0a746ee29f3e8812c1"
+checksum = "5051bc1a16d5d96a96bc33b5b2ec707495c48fe978097bdaba68d3c47987eb32"
dependencies = [
"solana-pubkey",
"solana-sdk-ids",
@@ -11081,7 +11167,7 @@ dependencies = [
[[package]]
name = "solana-test-validator"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"agave-feature-set",
"base64 0.22.1",
@@ -11112,6 +11198,7 @@ dependencies = [
"solana-message",
"solana-native-token",
"solana-net-utils",
+ "solana-program-binaries",
"solana-program-test",
"solana-pubkey",
"solana-rent",
@@ -11125,29 +11212,20 @@ dependencies = [
"solana-tpu-client",
"solana-transaction",
"solana-validator-exit",
- "tokio",
-]
-
-[[package]]
-name = "solana-time-utils"
-version = "2.2.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6af261afb0e8c39252a04d026e3ea9c405342b08c871a2ad8aa5448e068c784c"
-
-[[package]]
-name = "solana-timings"
-version = "3.0.0"
-dependencies = [
- "eager",
- "enum-iterator",
- "solana-pubkey",
+ "tokio",
]
[[package]]
-name = "solana-tls-utils"
+name = "solana-time-utils"
version = "3.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0ced92c60aa76ec4780a9d93f3bd64dfa916e1b998eacc6f1c110f3f444f02c9"
+
+[[package]]
+name = "solana-tls-utils"
+version = "3.1.0"
dependencies = [
- "rustls 0.23.31",
+ "rustls 0.23.32",
"solana-keypair",
"solana-pubkey",
"solana-signer",
@@ -11156,16 +11234,16 @@ dependencies = [
[[package]]
name = "solana-tokens"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"assert_matches",
"bincode",
"chrono",
"clap 2.33.3",
- "console 0.16.0",
+ "console 0.16.1",
"csv",
"ctrlc",
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
"indicatif 0.18.0",
"pickledb",
"serde",
@@ -11173,6 +11251,7 @@ dependencies = [
"solana-account-decoder",
"solana-clap-utils",
"solana-cli-config",
+ "solana-cli-output",
"solana-clock",
"solana-commitment-config",
"solana-hash",
@@ -11200,12 +11279,12 @@ dependencies = [
"spl-associated-token-account-interface",
"spl-token-interface",
"tempfile",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
]
[[package]]
name = "solana-tps-client"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"log",
"solana-account",
@@ -11218,6 +11297,7 @@ dependencies = [
"solana-hash",
"solana-keypair",
"solana-message",
+ "solana-net-utils",
"solana-pubkey",
"solana-quic-client",
"solana-rpc-client",
@@ -11231,17 +11311,17 @@ dependencies = [
"solana-transaction-error",
"solana-transaction-status",
"tempfile",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
]
[[package]]
name = "solana-tpu-client"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"async-trait",
"bincode",
"futures-util",
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
"indicatif 0.18.0",
"log",
"rayon",
@@ -11262,21 +11342,22 @@ dependencies = [
"solana-signer",
"solana-transaction",
"solana-transaction-error",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
"tokio",
]
[[package]]
name = "solana-tpu-client-next"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"async-trait",
"crossbeam-channel",
"futures 0.3.31",
"log",
"lru",
+ "qualifier_attr",
"quinn",
- "rustls 0.23.31",
+ "rustls 0.23.32",
"solana-cli-config",
"solana-clock",
"solana-commitment-config",
@@ -11293,49 +11374,47 @@ dependencies = [
"solana-time-utils",
"solana-tls-utils",
"solana-tpu-client",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
"tokio",
"tokio-util 0.7.16",
+ "tracing",
]
[[package]]
name = "solana-transaction"
-version = "2.2.3"
+version = "3.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "80657d6088f721148f5d889c828ca60c7daeedac9a8679f9ec215e0c42bcbf41"
+checksum = "64928e6af3058dcddd6da6680cbe08324b4e071ad73115738235bbaa9e9f72a5"
dependencies = [
"bincode",
"serde",
"serde_derive",
- "solana-bincode",
- "solana-feature-set",
+ "solana-address",
"solana-hash",
"solana-instruction",
- "solana-keypair",
+ "solana-instruction-error",
"solana-message",
- "solana-precompiles",
- "solana-pubkey",
"solana-sanitize",
"solana-sdk-ids",
"solana-short-vec",
"solana-signature",
"solana-signer",
- "solana-system-interface",
"solana-transaction-error",
- "wasm-bindgen",
]
[[package]]
name = "solana-transaction-context"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"bincode",
+ "qualifier_attr",
"serde",
"serde_derive",
"solana-account",
"solana-account-info",
"solana-instruction",
"solana-instructions-sysvar",
+ "solana-program-entrypoint",
"solana-pubkey",
"solana-rent",
"solana-sbpf",
@@ -11348,7 +11427,7 @@ dependencies = [
[[package]]
name = "solana-transaction-dos"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"bincode",
"clap 2.33.3",
@@ -11385,21 +11464,21 @@ dependencies = [
[[package]]
name = "solana-transaction-error"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "222a9dc8fdb61c6088baab34fc3a8b8473a03a7a5fd404ed8dd502fa79b67cb1"
+checksum = "4222065402340d7e6aec9dc3e54d22992ddcf923d91edcd815443c2bfca3144a"
dependencies = [
"serde",
"serde_derive",
"solana-frozen-abi",
"solana-frozen-abi-macro",
- "solana-instruction",
+ "solana-instruction-error",
"solana-sanitize",
]
[[package]]
name = "solana-transaction-metrics-tracker"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"base64 0.22.1",
"bincode",
@@ -11417,14 +11496,14 @@ dependencies = [
[[package]]
name = "solana-transaction-status"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"Inflector",
"agave-reserved-account-keys",
"base64 0.22.1",
"bencher",
"bincode",
- "borsh 1.5.7",
+ "borsh",
"bs58",
"bytemuck",
"log",
@@ -11457,12 +11536,12 @@ dependencies = [
"spl-token-group-interface",
"spl-token-interface",
"spl-token-metadata-interface",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
]
[[package]]
name = "solana-transaction-status-client-types"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"base64 0.22.1",
"bincode",
@@ -11481,12 +11560,12 @@ dependencies = [
"solana-transaction-context",
"solana-transaction-error",
"test-case",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
]
[[package]]
name = "solana-turbine"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"agave-feature-set",
"agave-xdp",
@@ -11496,6 +11575,7 @@ dependencies = [
"bs58",
"bytes",
"caps",
+ "conditional-mod",
"crossbeam-channel",
"futures 0.3.31",
"itertools 0.12.1",
@@ -11506,7 +11586,7 @@ dependencies = [
"rand 0.8.5",
"rand_chacha 0.3.1",
"rayon",
- "rustls 0.23.31",
+ "rustls 0.23.32",
"solana-clock",
"solana-cluster-type",
"solana-entry",
@@ -11520,6 +11600,7 @@ dependencies = [
"solana-metrics",
"solana-native-token",
"solana-net-utils",
+ "solana-nohash-hasher",
"solana-perf",
"solana-poh",
"solana-pubkey",
@@ -11536,24 +11617,16 @@ dependencies = [
"solana-tls-utils",
"solana-transaction",
"solana-transaction-error",
+ "solana-turbine",
"static_assertions",
"test-case",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
"tokio",
]
-[[package]]
-name = "solana-type-overrides"
-version = "3.0.0"
-dependencies = [
- "futures 0.3.31",
- "rand 0.8.5",
- "shuttle",
-]
-
[[package]]
name = "solana-udp-client"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"async-trait",
"solana-connection-cache",
@@ -11562,13 +11635,13 @@ dependencies = [
"solana-packet",
"solana-streamer",
"solana-transaction-error",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
"tokio",
]
[[package]]
name = "solana-unified-scheduler-logic"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"assert_matches",
"solana-instruction",
@@ -11582,7 +11655,7 @@ dependencies = [
[[package]]
name = "solana-unified-scheduler-pool"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"agave-banking-stage-ingress-types",
"aquamarine",
@@ -11608,8 +11681,8 @@ dependencies = [
"solana-runtime",
"solana-runtime-transaction",
"solana-svm",
+ "solana-svm-timings",
"solana-system-transaction",
- "solana-timings",
"solana-transaction",
"solana-transaction-error",
"solana-unified-scheduler-logic",
@@ -11620,27 +11693,19 @@ dependencies = [
"vec_extract_if_polyfill",
]
-[[package]]
-name = "solana-upload-perf"
-version = "3.0.0"
-dependencies = [
- "serde_json",
- "solana-metrics",
-]
-
[[package]]
name = "solana-validator-exit"
-version = "2.2.1"
+version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7bbf6d7a3c0b28dd5335c52c0e9eae49d0ae489a8f324917faf0ded65a812c1d"
+checksum = "c5d2face763df5afeaa9509b9019968860e69cc1531ec8b4a2e6c7b702204d5a"
[[package]]
name = "solana-version"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"agave-feature-set",
"rand 0.8.5",
- "semver 1.0.26",
+ "semver 1.0.27",
"serde",
"serde_derive",
"solana-frozen-abi",
@@ -11651,11 +11716,10 @@ dependencies = [
[[package]]
name = "solana-vortexor"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"agave-banking-stage-ingress-types",
"assert_matches",
- "async-channel",
"bytes",
"clap 4.5.31",
"crossbeam-channel",
@@ -11664,7 +11728,7 @@ dependencies = [
"futures-util",
"governor",
"histogram",
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
"itertools 0.12.1",
"libc",
"log",
@@ -11674,7 +11738,7 @@ dependencies = [
"quinn",
"quinn-proto",
"rand 0.8.5",
- "rustls 0.23.31",
+ "rustls 0.23.32",
"signal-hook",
"smallvec",
"socket2 0.6.0",
@@ -11697,15 +11761,16 @@ dependencies = [
"solana-streamer",
"solana-transaction-metrics-tracker",
"solana-version",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
"tokio",
- "url 2.5.4",
+ "tokio-util 0.7.16",
+ "url 2.5.7",
"x509-parser",
]
[[package]]
name = "solana-vote"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"arbitrary",
"bencher",
@@ -11735,27 +11800,29 @@ dependencies = [
"solana-transaction",
"solana-vote-interface",
"static_assertions",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
]
[[package]]
name = "solana-vote-interface"
-version = "2.2.6"
+version = "4.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b80d57478d6599d30acc31cc5ae7f93ec2361a06aefe8ea79bc81739a08af4c3"
+checksum = "c33f1a30b1e61944e52afef0992a2be93720c5770eaf1f6d8e6e34f87d90e754"
dependencies = [
"arbitrary",
"bincode",
+ "cfg_eval",
"num-derive",
"num-traits",
"serde",
"serde_derive",
+ "serde_with",
"solana-clock",
- "solana-decode-error",
"solana-frozen-abi",
"solana-frozen-abi-macro",
"solana-hash",
"solana-instruction",
+ "solana-instruction-error",
"solana-pubkey",
"solana-rent",
"solana-sdk-ids",
@@ -11767,7 +11834,7 @@ dependencies = [
[[package]]
name = "solana-vote-program"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"agave-feature-set",
"assert_matches",
@@ -11802,12 +11869,25 @@ dependencies = [
"solana-transaction-context",
"solana-vote-interface",
"test-case",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
+]
+
+[[package]]
+name = "solana-votor-messages"
+version = "3.1.0"
+dependencies = [
+ "serde",
+ "solana-bls-signatures",
+ "solana-clock",
+ "solana-frozen-abi",
+ "solana-frozen-abi-macro",
+ "solana-hash",
+ "solana-logger",
]
[[package]]
name = "solana-wen-restart"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"anyhow",
"assert_matches",
@@ -11833,8 +11913,8 @@ dependencies = [
"solana-shred-version",
"solana-signer",
"solana-streamer",
+ "solana-svm-timings",
"solana-time-utils",
- "solana-timings",
"solana-vote",
"solana-vote-interface",
"solana-vote-program",
@@ -11843,7 +11923,7 @@ dependencies = [
[[package]]
name = "solana-zk-elgamal-proof-program"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"agave-feature-set",
"bytemuck",
@@ -11852,15 +11932,15 @@ dependencies = [
"num-derive",
"num-traits",
"solana-instruction",
- "solana-log-collector",
"solana-program-runtime",
"solana-sdk-ids",
- "solana-zk-sdk 3.0.0",
+ "solana-svm-log-collector",
+ "solana-zk-sdk",
]
[[package]]
name = "solana-zk-elgamal-proof-program-tests"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"bytemuck",
"solana-account",
@@ -11874,69 +11954,14 @@ dependencies = [
"solana-system-interface",
"solana-transaction",
"solana-transaction-error",
- "solana-zk-sdk 3.0.0",
-]
-
-[[package]]
-name = "solana-zk-keygen"
-version = "3.0.0"
-dependencies = [
- "bs58",
- "clap 3.2.23",
- "dirs-next",
- "solana-clap-v3-utils",
- "solana-pubkey",
- "solana-remote-wallet",
- "solana-seed-derivable",
- "solana-signer",
- "solana-version",
- "solana-zk-token-sdk",
- "tempfile",
- "thiserror 2.0.12",
- "tiny-bip39",
-]
-
-[[package]]
-name = "solana-zk-sdk"
-version = "2.3.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "05857892ac50fe03c125d8445fd790c6768015b76f4ad1e4b4b1499938b357f0"
-dependencies = [
- "aes-gcm-siv",
- "base64 0.22.1",
- "bincode",
- "bytemuck",
- "bytemuck_derive",
- "curve25519-dalek 4.1.3",
- "itertools 0.12.1",
- "js-sys",
- "merlin",
- "num-derive",
- "num-traits",
- "rand 0.8.5",
- "serde",
- "serde_derive",
- "serde_json",
- "sha3",
- "solana-derivation-path",
- "solana-instruction",
- "solana-pubkey",
- "solana-sdk-ids",
- "solana-seed-derivable",
- "solana-seed-phrase",
- "solana-signature",
- "solana-signer",
- "subtle",
- "thiserror 2.0.12",
- "wasm-bindgen",
- "zeroize",
+ "solana-zk-sdk",
]
[[package]]
name = "solana-zk-sdk"
-version = "3.0.0"
+version = "4.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5dffbd0b7537f4249d69b74c632f8eac1d2726572022791f9ead65a67d3f6905"
+checksum = "9602bcb1f7af15caef92b91132ec2347e1c51a72ecdbefdaefa3eac4b8711475"
dependencies = [
"aes-gcm-siv",
"base64 0.22.1",
@@ -11944,6 +11969,7 @@ dependencies = [
"bytemuck",
"bytemuck_derive",
"curve25519-dalek 4.1.3",
+ "getrandom 0.2.15",
"itertools 0.12.1",
"js-sys",
"merlin",
@@ -11963,14 +11989,14 @@ dependencies = [
"solana-signature",
"solana-signer",
"subtle",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
"wasm-bindgen",
"zeroize",
]
[[package]]
name = "solana-zk-token-proof-program"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"agave-feature-set",
"bytemuck",
@@ -11979,15 +12005,15 @@ dependencies = [
"num-derive",
"num-traits",
"solana-instruction",
- "solana-log-collector",
"solana-program-runtime",
"solana-sdk-ids",
+ "solana-svm-log-collector",
"solana-zk-token-sdk",
]
[[package]]
name = "solana-zk-token-sdk"
-version = "3.0.0"
+version = "3.1.0"
dependencies = [
"aes-gcm-siv",
"base64 0.22.1",
@@ -12004,7 +12030,7 @@ dependencies = [
"serde_derive",
"serde_json",
"sha3",
- "solana-curve25519 3.0.0",
+ "solana-curve25519 3.1.0",
"solana-derivation-path",
"solana-instruction",
"solana-keypair",
@@ -12015,7 +12041,7 @@ dependencies = [
"solana-signature",
"solana-signer",
"subtle",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
"tiny-bip39",
"zeroize",
]
@@ -12035,22 +12061,32 @@ dependencies = [
"lock_api",
]
+[[package]]
+name = "spki"
+version = "0.7.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d"
+dependencies = [
+ "base64ct",
+ "der",
+]
+
[[package]]
name = "spl-associated-token-account-interface"
-version = "1.0.0"
+version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5e6bbe0794e532ac08428d3abf5bf8ae75bd81dfddd785c388e326c00c92c6f5"
+checksum = "e6433917b60441d68d99a17e121d9db0ea15a9a69c0e5afa34649cf5ba12612f"
dependencies = [
- "borsh 1.5.7",
+ "borsh",
"solana-instruction",
"solana-pubkey",
]
[[package]]
name = "spl-discriminator"
-version = "0.4.0"
+version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0a20542d4c8264856d205c0090512f374dbf7b3124479a3d93ab6184ae3631aa"
+checksum = "d48cc11459e265d5b501534144266620289720b4c44522a47bc6b63cd295d2f3"
dependencies = [
"bytemuck",
"solana-program-error",
@@ -12066,7 +12102,7 @@ checksum = "d9e8418ea6269dcfb01c712f0444d2c75542c04448b480e87de59d2865edc750"
dependencies = [
"quote",
"spl-discriminator-syn",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -12078,15 +12114,15 @@ dependencies = [
"proc-macro2",
"quote",
"sha2 0.10.9",
- "syn 2.0.104",
+ "syn 2.0.106",
"thiserror 1.0.69",
]
[[package]]
name = "spl-generic-token"
-version = "1.0.1"
+version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "741a62a566d97c58d33f9ed32337ceedd4e35109a686e31b1866c5dfa56abddc"
+checksum = "233df81b75ab99b42f002b5cdd6e65a7505ffa930624f7096a7580a56765e9cf"
dependencies = [
"bytemuck",
"solana-pubkey",
@@ -12094,9 +12130,9 @@ dependencies = [
[[package]]
name = "spl-instruction-padding-interface"
-version = "0.1.0"
+version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0f738b75144edbb32c01de832632eecad71113b62a48ef8e55e60c5a692bae4e"
+checksum = "9c3a77c0c9b83b111ee29bc6aa6eaab54b82e1ed5db40ba9786527b80283c3ef"
dependencies = [
"num_enum",
"solana-instruction",
@@ -12106,9 +12142,9 @@ dependencies = [
[[package]]
name = "spl-memo-interface"
-version = "1.0.0"
+version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "24af0730130fea732616be9425fe8eb77782e2aab2f0e76837b6a66aaba96c6b"
+checksum = "3d4e2aedd58f858337fa609af5ad7100d4a243fdaf6a40d6eb4c28c5f19505d3"
dependencies = [
"solana-instruction",
"solana-pubkey",
@@ -12116,29 +12152,28 @@ dependencies = [
[[package]]
name = "spl-pod"
-version = "0.5.1"
+version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d994afaf86b779104b4a95ba9ca75b8ced3fdb17ee934e38cb69e72afbe17799"
+checksum = "b1233fdecd7461611d69bb87bc2e95af742df47291975d21232a0be8217da9de"
dependencies = [
- "borsh 1.5.7",
+ "borsh",
"bytemuck",
"bytemuck_derive",
"num-derive",
"num-traits",
- "solana-decode-error",
- "solana-msg",
+ "num_enum",
"solana-program-error",
"solana-program-option",
"solana-pubkey",
- "solana-zk-sdk 2.3.6",
- "thiserror 2.0.12",
+ "solana-zk-sdk",
+ "thiserror 2.0.16",
]
[[package]]
name = "spl-token-2022-interface"
-version = "1.0.0"
+version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "62d7ae2ee6b856f8ddcbdc3b3a9f4d2141582bbe150f93e5298ee97e0251fa04"
+checksum = "0888304af6b3d839e435712e6c84025e09513017425ff62045b6b8c41feb77d9"
dependencies = [
"arrayref",
"bytemuck",
@@ -12146,79 +12181,76 @@ dependencies = [
"num-traits",
"num_enum",
"solana-account-info",
- "solana-decode-error",
"solana-instruction",
- "solana-msg",
"solana-program-error",
"solana-program-option",
"solana-program-pack",
"solana-pubkey",
"solana-sdk-ids",
- "solana-zk-sdk 2.3.6",
+ "solana-zk-sdk",
"spl-pod",
"spl-token-confidential-transfer-proof-extraction",
"spl-token-confidential-transfer-proof-generation",
"spl-token-group-interface",
"spl-token-metadata-interface",
"spl-type-length-value",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
]
[[package]]
name = "spl-token-confidential-transfer-proof-extraction"
-version = "0.4.0"
+version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bedc4675c80409a004da46978674e4073c65c4b1c611bf33d120381edeffe036"
+checksum = "7a22217af69b7a61ca813f47c018afb0b00b02a74a4c70ff099cd4287740bc3d"
dependencies = [
"bytemuck",
"solana-account-info",
- "solana-curve25519 2.2.15",
+ "solana-curve25519 2.3.7",
"solana-instruction",
"solana-instructions-sysvar",
"solana-msg",
"solana-program-error",
"solana-pubkey",
"solana-sdk-ids",
- "solana-zk-sdk 2.3.6",
+ "solana-zk-sdk",
"spl-pod",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
]
[[package]]
name = "spl-token-confidential-transfer-proof-generation"
-version = "0.4.0"
+version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ae5b124840d4aed474cef101d946a798b806b46a509ee4df91021e1ab1cef3ef"
+checksum = "f63a2b41095945dc15274b924b21ccae9b3ec9dc2fdd43dbc08de8c33bbcd915"
dependencies = [
"curve25519-dalek 4.1.3",
- "solana-zk-sdk 2.3.6",
- "thiserror 2.0.12",
+ "solana-zk-sdk",
+ "thiserror 2.0.16",
]
[[package]]
name = "spl-token-group-interface"
-version = "0.6.0"
+version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5597b4cd76f85ce7cd206045b7dc22da8c25516573d42d267c8d1fd128db5129"
+checksum = "452d0f758af20caaa10d9a6f7608232e000d4c74462f248540b3d2ddfa419776"
dependencies = [
"bytemuck",
"num-derive",
"num-traits",
- "solana-decode-error",
+ "num_enum",
"solana-instruction",
- "solana-msg",
"solana-program-error",
"solana-pubkey",
"spl-discriminator",
"spl-pod",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
]
[[package]]
name = "spl-token-interface"
-version = "1.0.0"
+version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "06e0c2d4e38ef5834cf7fb1b592b8a8c6eab8485f5ac7a04a151b502c63a0aaa"
+checksum = "8c564ac05a7c8d8b12e988a37d82695b5ba4db376d07ea98bc4882c81f96c7f3"
dependencies = [
"arrayref",
"bytemuck",
@@ -12231,46 +12263,44 @@ dependencies = [
"solana-program-pack",
"solana-pubkey",
"solana-sdk-ids",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
]
[[package]]
name = "spl-token-metadata-interface"
-version = "0.7.0"
+version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "304d6e06f0de0c13a621464b1fd5d4b1bebf60d15ca71a44d3839958e0da16ee"
+checksum = "9c467c7c3bd056f8fe60119e7ec34ddd6f23052c2fa8f1f51999098063b72676"
dependencies = [
- "borsh 1.5.7",
+ "borsh",
"num-derive",
"num-traits",
"solana-borsh",
- "solana-decode-error",
"solana-instruction",
- "solana-msg",
"solana-program-error",
"solana-pubkey",
"spl-discriminator",
"spl-pod",
"spl-type-length-value",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
]
[[package]]
name = "spl-type-length-value"
-version = "0.8.0"
+version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d417eb548214fa822d93f84444024b4e57c13ed6719d4dcc68eec24fb481e9f5"
+checksum = "ca20a1a19f4507a98ca4b28ff5ed54cac9b9d34ed27863e2bde50a3238f9a6ac"
dependencies = [
"bytemuck",
"num-derive",
"num-traits",
+ "num_enum",
"solana-account-info",
- "solana-decode-error",
"solana-msg",
"solana-program-error",
"spl-discriminator",
"spl-pod",
- "thiserror 2.0.12",
+ "thiserror 2.0.16",
]
[[package]]
@@ -12361,9 +12391,9 @@ dependencies = [
[[package]]
name = "syn"
-version = "2.0.104"
+version = "2.0.106"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40"
+checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6"
dependencies = [
"proc-macro2",
"quote",
@@ -12405,7 +12435,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -12520,15 +12550,15 @@ dependencies = [
[[package]]
name = "tempfile"
-version = "3.20.0"
+version = "3.22.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1"
+checksum = "84fa4d11fadde498443cca10fd3ac23c951f0dc59e080e9f4b93d4df4e4eea53"
dependencies = [
"fastrand",
"getrandom 0.3.3",
"once_cell",
"rustix 1.0.2",
- "windows-sys 0.59.0",
+ "windows-sys 0.61.0",
]
[[package]]
@@ -12561,11 +12591,11 @@ version = "3.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "54c25e2cb8f5fcd7318157634e8838aa6f7e4715c96637f969fabaccd1ef5462"
dependencies = [
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
"proc-macro-error",
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -12577,7 +12607,7 @@ dependencies = [
"proc-macro-error",
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
"test-case-core",
]
@@ -12607,11 +12637,11 @@ dependencies = [
[[package]]
name = "thiserror"
-version = "2.0.12"
+version = "2.0.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708"
+checksum = "3467d614147380f2e4e374161426ff399c91084acd2363eaf549172b3d5e60c0"
dependencies = [
- "thiserror-impl 2.0.12",
+ "thiserror-impl 2.0.16",
]
[[package]]
@@ -12622,18 +12652,18 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
name = "thiserror-impl"
-version = "2.0.12"
+version = "2.0.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d"
+checksum = "6c5e1be1c48b9172ee610da68fd9cd2770e7a4056cb3fc98710ee6906f0c7960"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -12642,8 +12672,8 @@ version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cfe075d7053dae61ac5413a34ea7d4913b6e6207844fd726bdd858b37ff72bf5"
dependencies = [
- "bitflags 2.9.1",
- "cfg-if 1.0.1",
+ "bitflags 2.9.4",
+ "cfg-if 1.0.3",
"libc",
"log",
"rustversion",
@@ -12665,6 +12695,15 @@ dependencies = [
"once_cell",
]
+[[package]]
+name = "threadpool"
+version = "1.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa"
+dependencies = [
+ "num_cpus",
+]
+
[[package]]
name = "tikv-jemalloc-sys"
version = "0.6.0+5.3.0-1-ge13ca993e8ccb9ba9847cc330696e02839f328f7"
@@ -12795,7 +12834,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -12824,7 +12863,7 @@ version = "0.26.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b"
dependencies = [
- "rustls 0.23.31",
+ "rustls 0.23.32",
"tokio",
]
@@ -12935,7 +12974,7 @@ version = "0.21.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1"
dependencies = [
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
"toml_datetime",
"winnow 0.5.16",
]
@@ -12946,7 +12985,7 @@ version = "0.22.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d3328d4f68a705b2a4498da1d580585d39a6510f98318a2cec3018a7ec61ddef"
dependencies = [
- "indexmap 2.10.0",
+ "indexmap 2.11.4",
"serde",
"serde_spanned",
"toml_datetime",
@@ -12971,7 +13010,7 @@ dependencies = [
"http-body 0.4.5",
"hyper 0.14.32",
"hyper-timeout",
- "percent-encoding 2.3.1",
+ "percent-encoding 2.3.2",
"pin-project",
"prost",
"rustls-pemfile",
@@ -13039,7 +13078,7 @@ version = "0.6.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2"
dependencies = [
- "bitflags 2.9.1",
+ "bitflags 2.9.4",
"bytes",
"futures-util",
"http 1.1.0",
@@ -13065,9 +13104,9 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3"
[[package]]
name = "tracing"
-version = "0.1.40"
+version = "0.1.41"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef"
+checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0"
dependencies = [
"log",
"pin-project-lite",
@@ -13077,20 +13116,20 @@ dependencies = [
[[package]]
name = "tracing-attributes"
-version = "0.1.27"
+version = "0.1.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7"
+checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
name = "tracing-core"
-version = "0.1.32"
+version = "0.1.34"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54"
+checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678"
dependencies = [
"once_cell",
"valuable",
@@ -13110,9 +13149,9 @@ dependencies = [
[[package]]
name = "tracing-subscriber"
-version = "0.3.7"
+version = "0.3.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5312f325fe3588e277415f5a6cca1f4ccad0f248c4cd5a4bd33032d7286abc22"
+checksum = "2054a14f5307d601f88daf0553e1cbf472acc4f2c51afab632431cdcd72124d5"
dependencies = [
"sharded-slab",
"thread_local",
@@ -13158,7 +13197,7 @@ dependencies = [
"rustls 0.21.12",
"sha1",
"thiserror 1.0.69",
- "url 2.5.4",
+ "url 2.5.7",
"utf-8",
"webpki-roots 0.24.0",
]
@@ -13301,13 +13340,14 @@ dependencies = [
[[package]]
name = "url"
-version = "2.5.4"
+version = "2.5.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60"
+checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b"
dependencies = [
"form_urlencoded",
- "idna 1.0.3",
- "percent-encoding 2.3.1",
+ "idna 1.1.0",
+ "percent-encoding 2.3.2",
+ "serde",
]
[[package]]
@@ -13322,12 +13362,6 @@ version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246"
-[[package]]
-name = "utf8-width"
-version = "0.1.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7cf7d77f457ef8dfa11e4cd5933c5ddb5dc52a94664071951219a97710f0a32b"
-
[[package]]
name = "utf8_iter"
version = "1.0.4"
@@ -13429,27 +13463,28 @@ dependencies = [
[[package]]
name = "wasm-bindgen"
-version = "0.2.100"
+version = "0.2.103"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5"
+checksum = "ab10a69fbd0a177f5f649ad4d8d3305499c42bab9aef2f7ff592d0ec8f833819"
dependencies = [
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
"once_cell",
"rustversion",
"wasm-bindgen-macro",
+ "wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-backend"
-version = "0.2.100"
+version = "0.2.103"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6"
+checksum = "0bb702423545a6007bbc368fde243ba47ca275e549c8a28617f56f6ba53b1d1c"
dependencies = [
"bumpalo",
"log",
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
"wasm-bindgen-shared",
]
@@ -13459,7 +13494,7 @@ version = "0.4.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e8d7523cb1f2a4c96c1317ca690031b714a51cc14e05f712446691f413f5d39"
dependencies = [
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
"js-sys",
"wasm-bindgen",
"web-sys",
@@ -13467,9 +13502,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-macro"
-version = "0.2.100"
+version = "0.2.103"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407"
+checksum = "fc65f4f411d91494355917b605e1480033152658d71f722a90647f56a70c88a0"
dependencies = [
"quote",
"wasm-bindgen-macro-support",
@@ -13477,22 +13512,22 @@ dependencies = [
[[package]]
name = "wasm-bindgen-macro-support"
-version = "0.2.100"
+version = "0.2.103"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de"
+checksum = "ffc003a991398a8ee604a401e194b6b3a39677b3173d6e74495eb51b82e99a32"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-shared"
-version = "0.2.100"
+version = "0.2.103"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d"
+checksum = "293c37f4efa430ca14db3721dfbe48d8c33308096bd44d80ebaa775ab71ba1cf"
dependencies = [
"unicode-ident",
]
@@ -13519,9 +13554,9 @@ dependencies = [
[[package]]
name = "webpki-root-certs"
-version = "0.26.6"
+version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e8c6dfa3ac045bc517de14c7b1384298de1dbd229d38e08e169d9ae8c170937c"
+checksum = "4e4ffd8df1c57e87c325000a3d6ef93db75279dc3a231125aac571650f22b12a"
dependencies = [
"rustls-pki-types",
]
@@ -13570,16 +13605,6 @@ dependencies = [
"libc",
]
-[[package]]
-name = "wide"
-version = "0.7.32"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "41b5576b9a81633f3e8df296ce0063042a73507636cbe956c61133dd7034ab22"
-dependencies = [
- "bytemuck",
- "safe_arch",
-]
-
[[package]]
name = "winapi"
version = "0.2.8"
@@ -13645,9 +13670,9 @@ dependencies = [
[[package]]
name = "windows-link"
-version = "0.1.0"
+version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6dccfd733ce2b1753b03b6d3c65edf020262ea35e20ccdf3e288043e6dd620e3"
+checksum = "45e46c0661abb7180e7b9c281db115305d49ca1709ab8242adf09666d2173c65"
[[package]]
name = "windows-result"
@@ -13696,11 +13721,11 @@ dependencies = [
[[package]]
name = "windows-sys"
-version = "0.60.2"
+version = "0.61.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb"
+checksum = "e201184e40b2ede64bc2ea34968b28e33622acdbbf37104f0e4a33f7abe657aa"
dependencies = [
- "windows-targets 0.53.2",
+ "windows-link",
]
[[package]]
@@ -13742,29 +13767,13 @@ dependencies = [
"windows_aarch64_gnullvm 0.52.6",
"windows_aarch64_msvc 0.52.6",
"windows_i686_gnu 0.52.6",
- "windows_i686_gnullvm 0.52.6",
+ "windows_i686_gnullvm",
"windows_i686_msvc 0.52.6",
"windows_x86_64_gnu 0.52.6",
"windows_x86_64_gnullvm 0.52.6",
"windows_x86_64_msvc 0.52.6",
]
-[[package]]
-name = "windows-targets"
-version = "0.53.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c66f69fcc9ce11da9966ddb31a40968cad001c5bedeb5c2b82ede4253ab48aef"
-dependencies = [
- "windows_aarch64_gnullvm 0.53.0",
- "windows_aarch64_msvc 0.53.0",
- "windows_i686_gnu 0.53.0",
- "windows_i686_gnullvm 0.53.0",
- "windows_i686_msvc 0.53.0",
- "windows_x86_64_gnu 0.53.0",
- "windows_x86_64_gnullvm 0.53.0",
- "windows_x86_64_msvc 0.53.0",
-]
-
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.42.2"
@@ -13783,12 +13792,6 @@ version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
-[[package]]
-name = "windows_aarch64_gnullvm"
-version = "0.53.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764"
-
[[package]]
name = "windows_aarch64_msvc"
version = "0.42.2"
@@ -13807,12 +13810,6 @@ version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
-[[package]]
-name = "windows_aarch64_msvc"
-version = "0.53.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c"
-
[[package]]
name = "windows_i686_gnu"
version = "0.42.2"
@@ -13831,24 +13828,12 @@ version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
-[[package]]
-name = "windows_i686_gnu"
-version = "0.53.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3"
-
[[package]]
name = "windows_i686_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
-[[package]]
-name = "windows_i686_gnullvm"
-version = "0.53.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11"
-
[[package]]
name = "windows_i686_msvc"
version = "0.42.2"
@@ -13867,12 +13852,6 @@ version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
-[[package]]
-name = "windows_i686_msvc"
-version = "0.53.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d"
-
[[package]]
name = "windows_x86_64_gnu"
version = "0.42.2"
@@ -13891,12 +13870,6 @@ version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
-[[package]]
-name = "windows_x86_64_gnu"
-version = "0.53.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba"
-
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.42.2"
@@ -13915,12 +13888,6 @@ version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
-[[package]]
-name = "windows_x86_64_gnullvm"
-version = "0.53.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57"
-
[[package]]
name = "windows_x86_64_msvc"
version = "0.42.2"
@@ -13939,12 +13906,6 @@ version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
-[[package]]
-name = "windows_x86_64_msvc"
-version = "0.53.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486"
-
[[package]]
name = "winnow"
version = "0.5.16"
@@ -13969,7 +13930,7 @@ version = "0.50.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1"
dependencies = [
- "cfg-if 1.0.1",
+ "cfg-if 1.0.3",
"windows-sys 0.48.0",
]
@@ -13979,7 +13940,7 @@ version = "0.39.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1"
dependencies = [
- "bitflags 2.9.1",
+ "bitflags 2.9.4",
]
[[package]]
@@ -14067,7 +14028,7 @@ checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
"synstructure 0.13.1",
]
@@ -14097,7 +14058,7 @@ checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -14108,7 +14069,7 @@ checksum = "6352c01d0edd5db859a63e2605f4ea3183ddbd15e2c4a9e7d32184df75e4f154"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -14128,7 +14089,7 @@ checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
"synstructure 0.13.1",
]
@@ -14149,7 +14110,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
@@ -14171,7 +14132,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.104",
+ "syn 2.0.106",
]
[[package]]
diff --git a/Cargo.toml b/Cargo.toml
index 9079b324e1e4a0..4ae6690152f2e2 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -50,13 +50,11 @@ members = [
"ledger",
"ledger-tool",
"local-cluster",
- "log-analyzer",
- "log-collector",
+ "low-pass-filter",
"measure",
"memory-management",
"merkle-tree",
"metrics",
- "net-shaper",
"net-utils",
"notifier",
"perf",
@@ -67,6 +65,7 @@ members = [
"poh-bench",
"poseidon",
"precompiles",
+ "program-binaries",
"program-runtime",
"program-test",
"programs/bpf-loader-tests",
@@ -76,7 +75,6 @@ members = [
"programs/ed25519-tests",
"programs/loader-v4",
"programs/stake",
- "programs/stake-tests",
"programs/system",
"programs/vote",
"programs/zk-elgamal-proof",
@@ -96,6 +94,7 @@ members = [
"rpc-test",
"runtime",
"runtime-transaction",
+ "scheduler-bindings",
"send-transaction-service",
"stake-accounts",
"storage-bigtable",
@@ -104,14 +103,16 @@ members = [
"streamer",
"svm",
"svm-callback",
- "svm-conformance",
"svm-feature-set",
+ "svm-log-collector",
+ "svm-measure",
+ "svm-timings",
"svm-transaction",
+ "svm-type-overrides",
"syscalls",
"syscalls/gen-syscall-list",
"test-validator",
"thread-manager",
- "timings",
"tls-utils",
"tokens",
"tps-client",
@@ -124,30 +125,30 @@ members = [
"transaction-status-client-types",
"transaction-view",
"turbine",
- "type-overrides",
"udp-client",
"unified-scheduler-logic",
"unified-scheduler-pool",
- "upload-perf",
"validator",
"verified-packet-receiver",
"version",
"vortexor",
"vote",
+ "votor",
+ "votor-messages",
"watchtower",
"wen-restart",
"xdp",
- "zk-keygen",
"zk-token-sdk",
]
-exclude = ["programs/sbf", "svm/examples", "svm/tests/example-programs"]
+exclude = ["programs/sbf", "svm/tests/example-programs"]
resolver = "2"
[workspace.package]
-version = "3.0.0"
+version = "3.1.0"
authors = ["Anza Maintainers "]
+description = "Blockchain, Rebuilt for Scale"
repository = "https://github.com/anza-xyz/agave"
homepage = "https://anza.xyz/"
license = "Apache-2.0"
@@ -173,22 +174,25 @@ used_underscore_binding = "deny"
[workspace.dependencies]
Inflector = "0.11.4"
aes-gcm-siv = "0.11.1"
-agave-banking-stage-ingress-types = { path = "banking-stage-ingress-types", version = "=3.0.0" }
-agave-cargo-registry = { path = "cargo-registry", version = "=3.0.0" }
-agave-feature-set = { path = "feature-set", version = "=3.0.0" }
-agave-geyser-plugin-interface = { path = "geyser-plugin-interface", version = "=3.0.0" }
-agave-io-uring = { path = "io-uring", version = "=3.0.0" }
-agave-precompiles = { path = "precompiles", version = "=3.0.0" }
-agave-reserved-account-keys = { path = "reserved-account-keys", version = "=3.0.0" }
-agave-syscalls = { path = "syscalls", version = "=3.0.0" }
-agave-thread-manager = { path = "thread-manager", version = "=3.0.0" }
-agave-transaction-view = { path = "transaction-view", version = "=3.0.0" }
-agave-verified-packet-receiver = { path = "verified-packet-receiver", version = "=3.0.0" }
-agave-xdp = { path = "xdp", version = "=3.0.0" }
+agave-banking-stage-ingress-types = { path = "banking-stage-ingress-types", version = "=3.1.0" }
+agave-cargo-registry = { path = "cargo-registry", version = "=3.1.0" }
+agave-feature-set = { path = "feature-set", version = "=3.1.0" }
+agave-geyser-plugin-interface = { path = "geyser-plugin-interface", version = "=3.1.0" }
+agave-io-uring = { path = "io-uring", version = "=3.1.0" }
+agave-low-pass-filter = { path = "low-pass-filter", version = "=3.1.0" }
+agave-precompiles = { path = "precompiles", version = "=3.1.0" }
+agave-reserved-account-keys = { path = "reserved-account-keys", version = "=3.1.0" }
+agave-scheduler-bindings = { path = "scheduler-bindings", version = "=3.1.0" }
+agave-syscalls = { path = "syscalls", version = "=3.1.0" }
+agave-thread-manager = { path = "thread-manager", version = "=3.1.0" }
+agave-transaction-view = { path = "transaction-view", version = "=3.1.0" }
+agave-verified-packet-receiver = { path = "verified-packet-receiver", version = "=3.1.0" }
+agave-votor = { path = "votor", version = "=3.1.0" }
+agave-xdp = { path = "xdp", version = "=3.1.0" }
ahash = "0.8.11"
-anyhow = "1.0.98"
+anyhow = "1.0.100"
aquamarine = "0.6.0"
-arbitrary = "1.4.1"
+arbitrary = "1.4.2"
arc-swap = "1.7.1"
ark-bn254 = "0.4.0"
ark-ec = "0.4.0"
@@ -199,9 +203,8 @@ arrayref = "0.3.9"
arrayvec = "0.7.6"
assert_cmd = "2.0"
assert_matches = "1.5.0"
-async-channel = "1.9.0"
async-lock = "3.4.1"
-async-trait = "0.1.88"
+async-trait = "0.1.89"
atty = "0.2.11"
axum = "0.7.9"
aya = "0.13"
@@ -209,27 +212,27 @@ backoff = "0.4.0"
base64 = "0.22.1"
bencher = "0.1.5"
bincode = "1.3.3"
-bitflags = { version = "2.9.1" }
+bitflags = { version = "2.9.4" }
+bitvec = { version = "1.0.1", features = ["serde"] }
blake3 = "1.8.2"
borsh = { version = "1.5.7", features = ["derive", "unstable__schema"] }
-borsh0-10 = { package = "borsh", version = "0.10.3" }
bs58 = { version = "0.5.1", default-features = false }
bv = "0.11.1"
byte-unit = "4.0.19"
-bytemuck = "1.23.1"
-bytemuck_derive = "1.10.0"
+bytemuck = "1.23.2"
+bytemuck_derive = "1.10.1"
bytes = "1.10"
bzip2 = "0.4.4"
caps = "0.5.5"
cargo_metadata = "0.15.4"
-cfg-if = "1.0.1"
+cfg-if = "1.0.3"
cfg_eval = "0.1.2"
-chrono = { version = "0.4.41", default-features = false }
+chrono = { version = "0.4.42", default-features = false }
chrono-humanize = "0.2.3"
clap = "2.33.1"
# Remove this dependency when procedural macros will support non-inline modules.
conditional-mod = "0.1.0"
-console = "0.16.0"
+console = "0.16.1"
console_error_panic_hook = "0.1.7"
console_log = "0.2.2"
const_format = "0.2.34"
@@ -238,11 +241,11 @@ criterion = "0.5.1"
criterion-stats = "0.3.0"
crossbeam-channel = "0.5.15"
csv = "1.3.1"
-ctrlc = "3.4.7"
+ctrlc = "3.5.0"
curve25519-dalek = { version = "4.1.3", features = ["digest", "rand_core"] }
dashmap = "5.5.3"
derivation-path = { version = "0.2.0", default-features = false }
-derive-where = "1.5.0"
+derive-where = "1.6.0"
derive_more = { version = "1.0.0", features = ["full"] }
dialoguer = "0.10.4"
digest = "0.10.7"
@@ -255,7 +258,6 @@ ed25519-dalek = "=1.0.1"
ed25519-dalek-bip32 = "0.2.0"
enum-iterator = "1.5.0"
env_logger = "0.11.8"
-etcd-client = "0.11.1"
fast-math = "0.1"
fd-lock = "3.0.13"
five8_const = "0.1.4"
@@ -274,18 +276,18 @@ hidapi = { version = "2.6.3", default-features = false }
histogram = "0.6.9"
hmac = "0.12.1"
http = "0.2.12"
-humantime = "2.2.0"
+humantime = "2.3.0"
hyper = "0.14.32"
hyper-proxy = "0.9.1"
im = "15.1.0"
-indexmap = "2.10.0"
+indexmap = "2.11.4"
indicatif = "0.18.0"
-io-uring = "0.7.9"
+io-uring = "0.7.10"
itertools = "0.12.1"
jemallocator = { package = "tikv-jemallocator", version = "0.6.0", features = [
"unprefixed_malloc_on_supported_platforms",
] }
-js-sys = "0.3.77"
+js-sys = "0.3.80"
json5 = "0.4.1"
jsonrpc-core = "18.0.0"
jsonrpc-core-client = "18.0.0"
@@ -294,17 +296,17 @@ jsonrpc-http-server = "18.0.0"
jsonrpc-ipc-server = "18.0.0"
jsonrpc-pubsub = "18.0.0"
lazy-lru = "0.1.3"
-libc = "0.2.174"
+libc = "0.2.175"
libloading = "0.7.4"
libsecp256k1 = { version = "0.6.0", default-features = false, features = [
"std",
"static-context",
] }
light-poseidon = "0.2.0"
-log = "0.4.27"
+log = "0.4.28"
lru = "0.7.7"
lz4 = "1.28.1"
-memmap2 = "0.9.7"
+memmap2 = "0.9.8"
memoffset = "0.9"
merlin = { version = "3", default-features = false }
min-max-heap = "1.3.0"
@@ -326,38 +328,38 @@ predicates = "2.1"
pretty-hex = "0.3.0"
pretty_assertions = "1.4.1"
prio-graph = "0.3.0"
-proc-macro2 = "1.0.95"
-proptest = "1.7"
+proc-macro2 = "1.0.97"
+proptest = "1.8"
prost = "0.11.9"
prost-build = "0.11.9"
prost-types = "0.11.9"
protobuf-src = "1.1.0"
qstring = "0.7.2"
qualifier_attr = { version = "0.2.2", default-features = false }
-quinn = "0.11.8"
-quinn-proto = "0.11.12"
+quinn = "0.11.9"
+quinn-proto = "0.11.13"
quote = "1.0"
rand = "0.8.5"
rand0-7 = { package = "rand", version = "0.7" }
rand_chacha = "0.3.1"
rand_chacha0-2 = { package = "rand_chacha", version = "0.2.2" }
-rayon = "1.10.0"
+rayon = "1.11.0"
reed-solomon-erasure = "6.0.0"
-regex = "1.11.1"
-reqwest = { version = "0.12.22", default-features = false }
+regex = "1.11.2"
+reqwest = { version = "0.12.23", default-features = false }
reqwest-middleware = "0.4.2"
rolling-file = "0.2.0"
rpassword = "7.4"
-rustls = { version = "0.23.31", features = ["std"], default-features = false }
+rustls = { version = "0.23.32", features = ["std"], default-features = false }
scopeguard = "1.2.0"
-semver = "1.0.26"
+semver = "1.0.27"
seqlock = "0.2.0"
-serde = "1.0.219" # must match the serde_derive version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251
+serde = "1.0.226" # must match the serde_derive version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251
serde-big-array = "0.5.1"
-serde_bytes = "0.11.17"
-serde_derive = "1.0.219" # must match the serde version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251
-serde_json = "1.0.142"
-serde_with = { version = "3.14.0", default-features = false }
+serde_bytes = "0.11.19"
+serde_derive = "1.0.224" # must match the serde version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251
+serde_json = "1.0.145"
+serde_with = { version = "3.14.1", default-features = false }
serde_yaml = "0.9.34"
serial_test = "2.0.0"
sha2 = "0.10.9"
@@ -365,210 +367,212 @@ sha3 = "0.10.8"
shuttle = "0.7.1"
signal-hook = "0.3.18"
siphasher = "1.0.1"
-slab = "0.4.10"
-smallvec = "1.15.1"
+slab = "0.4.11"
+smallvec = { version = "1.15.1", default-features = false, features = ["union"] }
smpl_jwt = "0.7.1"
socket2 = "0.6.0"
soketto = "0.7"
-solana-account = "2.2.1"
-solana-account-decoder = { path = "account-decoder", version = "=3.0.0" }
-solana-account-decoder-client-types = { path = "account-decoder-client-types", version = "=3.0.0" }
-solana-account-info = "2.3.0"
-solana-accounts-db = { path = "accounts-db", version = "=3.0.0" }
-solana-address-lookup-table-interface = "2.2.2"
-solana-atomic-u64 = "2.2.1"
-solana-banks-client = { path = "banks-client", version = "=3.0.0" }
-solana-banks-interface = { path = "banks-interface", version = "=3.0.0" }
-solana-banks-server = { path = "banks-server", version = "=3.0.0" }
-solana-bench-tps = { path = "bench-tps", version = "=3.0.0" }
-solana-big-mod-exp = "2.2.1"
-solana-bincode = "2.2.1"
-solana-blake3-hasher = "2.2.1"
-solana-bloom = { path = "bloom", version = "=3.0.0" }
-solana-bn254 = "2.2.2"
-solana-borsh = "2.2.1"
-solana-bpf-loader-program = { path = "programs/bpf_loader", version = "=3.0.0" }
-solana-bucket-map = { path = "bucket_map", version = "=3.0.0" }
-solana-builtins = { path = "builtins", version = "=3.0.0" }
-solana-builtins-default-costs = { path = "builtins-default-costs", version = "=3.0.0" }
-solana-clap-utils = { path = "clap-utils", version = "=3.0.0" }
-solana-clap-v3-utils = { path = "clap-v3-utils", version = "=3.0.0" }
-solana-cli = { path = "cli", version = "=3.0.0" }
-solana-cli-config = { path = "cli-config", version = "=3.0.0" }
-solana-cli-output = { path = "cli-output", version = "=3.0.0" }
-solana-client = { path = "client", version = "=3.0.0" }
-solana-client-traits = "2.2.1"
-solana-clock = "2.2.2"
-solana-cluster-type = "2.2.1"
-solana-commitment-config = "2.2.1"
-solana-compute-budget = { path = "compute-budget", version = "=3.0.0" }
-solana-compute-budget-instruction = { path = "compute-budget-instruction", version = "=3.0.0" }
-solana-compute-budget-interface = "2.2.2"
-solana-compute-budget-program = { path = "programs/compute-budget", version = "=3.0.0" }
-solana-config-interface = "1.0.0"
-solana-config-program-client = "1.1.0"
-solana-connection-cache = { path = "connection-cache", version = "=3.0.0", default-features = false }
-solana-core = { path = "core", version = "=3.0.0" }
-solana-cost-model = { path = "cost-model", version = "=3.0.0" }
-solana-cpi = "2.2.1"
-solana-curve25519 = { path = "curves/curve25519", version = "=3.0.0" }
-solana-define-syscall = "2.3.0"
-solana-derivation-path = "2.2.1"
-solana-download-utils = { path = "download-utils", version = "=3.0.0" }
-solana-ed25519-program = "2.2.3"
-solana-entry = { path = "entry", version = "=3.0.0" }
-solana-epoch-info = "2.2.1"
-solana-epoch-rewards = "2.2.1"
-solana-epoch-rewards-hasher = "2.2.1"
-solana-epoch-schedule = "2.2.1"
-solana-example-mocks = "2.2.1"
-solana-faucet = { path = "faucet", version = "=3.0.0" }
-solana-feature-gate-client = "0.0.2"
-solana-feature-gate-interface = "2.2.2"
-solana-fee = { path = "fee", version = "=3.0.0" }
-solana-fee-calculator = "2.2.1"
-solana-fee-structure = "2.3.0"
-solana-file-download = "2.2.2"
-solana-frozen-abi = "2.3.0"
-solana-frozen-abi-macro = "2.2.1"
-solana-genesis = { path = "genesis", version = "=3.0.0" }
-solana-genesis-config = "2.3.0"
-solana-genesis-utils = { path = "genesis-utils", version = "=3.0.0" }
-solana-geyser-plugin-manager = { path = "geyser-plugin-manager", version = "=3.0.0" }
-solana-gossip = { path = "gossip", version = "=3.0.0" }
-solana-hard-forks = "2.2.1"
-solana-hash = "2.3.0"
-solana-inflation = "2.2.1"
-solana-instruction = "2.3.0"
-solana-instructions-sysvar = "2.2.2"
-solana-keccak-hasher = "2.2.1"
-solana-keypair = "2.2.1"
-solana-last-restart-slot = "2.2.1"
-solana-lattice-hash = { path = "lattice-hash", version = "=3.0.0" }
-solana-ledger = { path = "ledger", version = "=3.0.0" }
-solana-loader-v2-interface = "2.2.1"
-solana-loader-v3-interface = "5.0.0"
-solana-loader-v4-interface = "2.2.1"
-solana-loader-v4-program = { path = "programs/loader-v4", version = "=3.0.0" }
-solana-local-cluster = { path = "local-cluster", version = "=3.0.0" }
-solana-log-collector = { path = "log-collector", version = "=3.0.0" }
-solana-logger = "2.3.1"
-solana-measure = { path = "measure", version = "=3.0.0" }
-solana-merkle-tree = { path = "merkle-tree", version = "=3.0.0" }
-solana-message = "2.4.0"
-solana-metrics = { path = "metrics", version = "=3.0.0" }
-solana-msg = "2.2.1"
-solana-native-token = "2.2.2"
-solana-net-utils = { path = "net-utils", version = "=3.0.0" }
+solana-account = "3.0.0"
+solana-account-decoder = { path = "account-decoder", version = "=3.1.0" }
+solana-account-decoder-client-types = { path = "account-decoder-client-types", version = "=3.1.0" }
+solana-account-info = "3.0.0"
+solana-accounts-db = { path = "accounts-db", version = "=3.1.0" }
+solana-address = "1.0.0"
+solana-address-lookup-table-interface = "3.0.0"
+solana-atomic-u64 = "3.0.0"
+solana-banks-client = { path = "banks-client", version = "=3.1.0" }
+solana-banks-interface = { path = "banks-interface", version = "=3.1.0" }
+solana-banks-server = { path = "banks-server", version = "=3.1.0" }
+solana-bench-tps = { path = "bench-tps", version = "=3.1.0" }
+solana-big-mod-exp = "3.0.0"
+solana-bincode = "3.0.0"
+solana-blake3-hasher = "3.0.0"
+solana-bloom = { path = "bloom", version = "=3.1.0" }
+solana-bls-signatures = { version = "0.3.0", features = ["serde"] }
+solana-bn254 = "3.0.0"
+solana-borsh = "3.0.0"
+solana-bpf-loader-program = { path = "programs/bpf_loader", version = "=3.1.0" }
+solana-bucket-map = { path = "bucket_map", version = "=3.1.0" }
+solana-builtins = { path = "builtins", version = "=3.1.0" }
+solana-builtins-default-costs = { path = "builtins-default-costs", version = "=3.1.0" }
+solana-clap-utils = { path = "clap-utils", version = "=3.1.0" }
+solana-clap-v3-utils = { path = "clap-v3-utils", version = "=3.1.0" }
+solana-cli = { path = "cli", version = "=3.1.0" }
+solana-cli-config = { path = "cli-config", version = "=3.1.0" }
+solana-cli-output = { path = "cli-output", version = "=3.1.0" }
+solana-client = { path = "client", version = "=3.1.0" }
+solana-client-traits = "3.0.0"
+solana-clock = "3.0.0"
+solana-cluster-type = "3.0.0"
+solana-commitment-config = "3.0.0"
+solana-compute-budget = { path = "compute-budget", version = "=3.1.0" }
+solana-compute-budget-instruction = { path = "compute-budget-instruction", version = "=3.1.0" }
+solana-compute-budget-interface = "3.0.0"
+solana-compute-budget-program = { path = "programs/compute-budget", version = "=3.1.0" }
+solana-config-interface = "2.0.0"
+solana-connection-cache = { path = "connection-cache", version = "=3.1.0", default-features = false }
+solana-core = { path = "core", version = "=3.1.0" }
+solana-cost-model = { path = "cost-model", version = "=3.1.0" }
+solana-cpi = "3.0.0"
+solana-curve25519 = { path = "curves/curve25519", version = "=3.1.0" }
+solana-define-syscall = "3.0.0"
+solana-derivation-path = "3.0.0"
+solana-download-utils = { path = "download-utils", version = "=3.1.0" }
+solana-ed25519-program = "3.0.0"
+solana-entry = { path = "entry", version = "=3.1.0" }
+solana-epoch-info = "3.0.0"
+solana-epoch-rewards = "3.0.0"
+solana-epoch-rewards-hasher = "3.0.0"
+solana-epoch-schedule = "3.0.0"
+solana-example-mocks = "3.0.0"
+solana-faucet = { path = "faucet", version = "=3.1.0" }
+solana-feature-gate-interface = "3.0.0"
+solana-fee = { path = "fee", version = "=3.1.0" }
+solana-fee-calculator = "3.0.0"
+solana-fee-structure = "3.0.0"
+solana-file-download = "3.0.0"
+solana-frozen-abi = "3.0.0"
+solana-frozen-abi-macro = "3.0.0"
+solana-genesis = { path = "genesis", version = "=3.1.0" }
+solana-genesis-config = "3.0.0"
+solana-genesis-utils = { path = "genesis-utils", version = "=3.1.0" }
+solana-geyser-plugin-manager = { path = "geyser-plugin-manager", version = "=3.1.0" }
+solana-gossip = { path = "gossip", version = "=3.1.0" }
+solana-hard-forks = "3.0.0"
+solana-hash = "3.0.0"
+solana-inflation = "3.0.0"
+solana-instruction = "3.0.0"
+solana-instruction-error = "2.0.0"
+solana-instructions-sysvar = "3.0.0"
+solana-keccak-hasher = "3.0.0"
+solana-keypair = "3.0.1"
+solana-last-restart-slot = "3.0.0"
+solana-lattice-hash = { path = "lattice-hash", version = "=3.1.0" }
+solana-ledger = { path = "ledger", version = "=3.1.0" }
+solana-loader-v2-interface = "3.0.0"
+solana-loader-v3-interface = "6.1.0"
+solana-loader-v4-interface = "3.1.0"
+solana-loader-v4-program = { path = "programs/loader-v4", version = "=3.1.0" }
+solana-local-cluster = { path = "local-cluster", version = "=3.1.0" }
+solana-logger = "3.0.0"
+solana-measure = { path = "measure", version = "=3.1.0" }
+solana-merkle-tree = { path = "merkle-tree", version = "=3.1.0" }
+solana-message = "3.0.1"
+solana-metrics = { path = "metrics", version = "=3.1.0" }
+solana-msg = "3.0.0"
+solana-native-token = "3.0.0"
+solana-net-utils = { path = "net-utils", version = "=3.1.0" }
solana-nohash-hasher = "0.2.1"
-solana-nonce = "2.2.1"
-solana-nonce-account = "2.2.1"
-solana-notifier = { path = "notifier", version = "=3.0.0" }
-solana-offchain-message = "2.2.1"
-solana-packet = "2.2.1"
-solana-perf = { path = "perf", version = "=3.0.0" }
-solana-poh = { path = "poh", version = "=3.0.0" }
-solana-poh-config = "2.2.1"
-solana-poseidon = { path = "poseidon", version = "=3.0.0" }
-solana-precompile-error = "2.2.2"
-solana-presigner = "2.2.1"
-solana-program = { version = "2.3.0", default-features = false }
-solana-program-entrypoint = "2.3.0"
-solana-program-error = "2.2.2"
-solana-program-memory = "2.3.1"
-solana-program-option = "2.2.1"
-solana-program-pack = "2.2.1"
-solana-program-runtime = { path = "program-runtime", version = "=3.0.0" }
-solana-program-test = { path = "program-test", version = "=3.0.0" }
-solana-pubkey = { version = "2.4.0", default-features = false }
-solana-pubsub-client = { path = "pubsub-client", version = "=3.0.0" }
-solana-quic-client = { path = "quic-client", version = "=3.0.0" }
-solana-quic-definitions = "2.3.0"
-solana-rayon-threadlimit = { path = "rayon-threadlimit", version = "=3.0.0" }
-solana-remote-wallet = { path = "remote-wallet", version = "=3.0.0", default-features = false }
-solana-rent = "2.2.1"
-solana-rent-collector = "2.2.1"
-solana-reward-info = "2.2.1"
-solana-rpc = { path = "rpc", version = "=3.0.0" }
-solana-rpc-client = { path = "rpc-client", version = "=3.0.0", default-features = false }
-solana-rpc-client-api = { path = "rpc-client-api", version = "=3.0.0" }
-solana-rpc-client-nonce-utils = { path = "rpc-client-nonce-utils", version = "=3.0.0" }
-solana-rpc-client-types = { path = "rpc-client-types", version = "=3.0.0" }
-solana-runtime = { path = "runtime", version = "=3.0.0" }
-solana-runtime-transaction = { path = "runtime-transaction", version = "=3.0.0" }
-solana-sanitize = "2.2.1"
-solana-sbpf = "=0.12.0"
-solana-sdk-ids = "2.2.1"
-solana-secp256k1-program = "2.2.3"
-solana-secp256k1-recover = "2.2.1"
-solana-secp256r1-program = "2.2.4"
-solana-seed-derivable = "2.2.1"
-solana-seed-phrase = "2.2.1"
-solana-send-transaction-service = { path = "send-transaction-service", version = "=3.0.0" }
-solana-serde = "2.2.1"
-solana-serde-varint = "2.2.2"
-solana-serialize-utils = "2.2.1"
-solana-sha256-hasher = "2.3.0"
-solana-short-vec = "2.2.1"
-solana-shred-version = "2.2.1"
-solana-signature = { version = "2.3.0", default-features = false }
-solana-signer = "2.2.1"
-solana-slot-hashes = "2.2.1"
-solana-slot-history = "2.2.1"
-solana-stable-layout = "2.2.1"
-solana-stake-interface = { version = "1.2.1" }
-solana-stake-program = { path = "programs/stake", version = "=3.0.0" }
-solana-storage-bigtable = { path = "storage-bigtable", version = "=3.0.0" }
-solana-storage-proto = { path = "storage-proto", version = "=3.0.0" }
-solana-streamer = { path = "streamer", version = "=3.0.0" }
-solana-svm = { path = "svm", version = "=3.0.0" }
-solana-svm-callback = { path = "svm-callback", version = "=3.0.0" }
-solana-svm-conformance = { path = "svm-conformance", version = "=3.0.0" }
-solana-svm-feature-set = { path = "svm-feature-set", version = "=3.0.0" }
-solana-svm-transaction = { path = "svm-transaction", version = "=3.0.0" }
-solana-system-interface = "1.0"
-solana-system-program = { path = "programs/system", version = "=3.0.0" }
-solana-system-transaction = "2.2.1"
-solana-sysvar = "2.2.2"
-solana-sysvar-id = "2.2.1"
-solana-test-validator = { path = "test-validator", version = "=3.0.0" }
-solana-time-utils = "2.2.1"
-solana-timings = { path = "timings", version = "=3.0.0" }
-solana-tls-utils = { path = "tls-utils", version = "=3.0.0" }
-solana-tps-client = { path = "tps-client", version = "=3.0.0" }
-solana-tpu-client = { path = "tpu-client", version = "=3.0.0", default-features = false }
-solana-tpu-client-next = { path = "tpu-client-next", version = "=3.0.0" }
-solana-transaction = "2.2.3"
-solana-transaction-context = { path = "transaction-context", version = "=3.0.0", features = ["bincode"] }
-solana-transaction-error = "2.2.1"
-solana-transaction-metrics-tracker = { path = "transaction-metrics-tracker", version = "=3.0.0" }
-solana-transaction-status = { path = "transaction-status", version = "=3.0.0" }
-solana-transaction-status-client-types = { path = "transaction-status-client-types", version = "=3.0.0" }
-solana-turbine = { path = "turbine", version = "=3.0.0" }
-solana-type-overrides = { path = "type-overrides", version = "=3.0.0" }
-solana-udp-client = { path = "udp-client", version = "=3.0.0" }
-solana-unified-scheduler-logic = { path = "unified-scheduler-logic", version = "=3.0.0" }
-solana-unified-scheduler-pool = { path = "unified-scheduler-pool", version = "=3.0.0" }
-solana-validator-exit = "2.2.1"
-solana-version = { path = "version", version = "=3.0.0" }
-solana-vote = { path = "vote", version = "=3.0.0" }
-solana-vote-interface = "2.2.6"
-solana-vote-program = { path = "programs/vote", version = "=3.0.0", default-features = false }
-solana-wen-restart = { path = "wen-restart", version = "=3.0.0" }
-solana-zk-elgamal-proof-program = { path = "programs/zk-elgamal-proof", version = "=3.0.0" }
-solana-zk-keygen = { path = "zk-keygen", version = "=3.0.0" }
-solana-zk-sdk = "3.0.0"
-solana-zk-token-proof-program = { path = "programs/zk-token-proof", version = "=3.0.0" }
-solana-zk-token-sdk = { path = "zk-token-sdk", version = "=3.0.0" }
-spl-associated-token-account-interface = "1.0.0"
-spl-generic-token = "1.0.1"
-spl-memo-interface = "1.0.0"
-spl-pod = "0.5.1"
-spl-token-2022-interface = "1.0.0"
-spl-token-confidential-transfer-proof-extraction = "0.4.0"
-spl-token-group-interface = "0.6.0"
-spl-token-interface = "1.0.0"
-spl-token-metadata-interface = "0.7.0"
+solana-nonce = "3.0.0"
+solana-nonce-account = "3.0.0"
+solana-notifier = { path = "notifier", version = "=3.1.0" }
+solana-offchain-message = "3.0.0"
+solana-packet = "3.0.0"
+solana-perf = { path = "perf", version = "=3.1.0" }
+solana-poh = { path = "poh", version = "=3.1.0" }
+solana-poh-config = "3.0.0"
+solana-poseidon = { path = "poseidon", version = "=3.1.0" }
+solana-precompile-error = "3.0.0"
+solana-presigner = "3.0.0"
+solana-program = { version = "3.0.0", default-features = false }
+solana-program-binaries = { path = "program-binaries", version = "=3.1.0" }
+solana-program-entrypoint = "3.1.0"
+solana-program-error = "3.0.0"
+solana-program-memory = "3.0.0"
+solana-program-option = "3.0.0"
+solana-program-pack = "3.0.0"
+solana-program-runtime = { path = "program-runtime", version = "=3.1.0" }
+solana-program-test = { path = "program-test", version = "=3.1.0" }
+solana-pubkey = { version = "3.0.0", default-features = false }
+solana-pubsub-client = { path = "pubsub-client", version = "=3.1.0" }
+solana-quic-client = { path = "quic-client", version = "=3.1.0" }
+solana-quic-definitions = "3.0.0"
+solana-rayon-threadlimit = { path = "rayon-threadlimit", version = "=3.1.0" }
+solana-remote-wallet = { path = "remote-wallet", version = "=3.1.0", default-features = false }
+solana-rent = "3.0.0"
+solana-reward-info = "3.0.0"
+solana-rpc = { path = "rpc", version = "=3.1.0" }
+solana-rpc-client = { path = "rpc-client", version = "=3.1.0", default-features = false }
+solana-rpc-client-api = { path = "rpc-client-api", version = "=3.1.0" }
+solana-rpc-client-nonce-utils = { path = "rpc-client-nonce-utils", version = "=3.1.0" }
+solana-rpc-client-types = { path = "rpc-client-types", version = "=3.1.0" }
+solana-runtime = { path = "runtime", version = "=3.1.0" }
+solana-runtime-transaction = { path = "runtime-transaction", version = "=3.1.0" }
+solana-sanitize = "3.0.1"
+solana-sbpf = { version = "=0.12.2", default-features = false }
+solana-sdk-ids = "3.0.0"
+solana-secp256k1-program = "3.0.0"
+solana-secp256k1-recover = "3.0.0"
+solana-secp256r1-program = "3.0.0"
+solana-seed-derivable = "3.0.0"
+solana-seed-phrase = "3.0.0"
+solana-send-transaction-service = { path = "send-transaction-service", version = "=3.1.0" }
+solana-serde = "3.0.0"
+solana-serde-varint = "3.0.0"
+solana-serialize-utils = "3.1.0"
+solana-sha256-hasher = "3.0.0"
+solana-short-vec = "3.0.0"
+solana-shred-version = "3.0.0"
+solana-signature = { version = "3.1.0", default-features = false }
+solana-signer = "3.0.0"
+solana-signer-store = "0.1.0"
+solana-slot-hashes = "3.0.0"
+solana-slot-history = "3.0.0"
+solana-stable-layout = "3.0.0"
+solana-stake-interface = { version = "2.0.1" }
+solana-stake-program = { path = "programs/stake", version = "=3.1.0" }
+solana-storage-bigtable = { path = "storage-bigtable", version = "=3.1.0" }
+solana-storage-proto = { path = "storage-proto", version = "=3.1.0" }
+solana-streamer = { path = "streamer", version = "=3.1.0" }
+solana-svm = { path = "svm", version = "=3.1.0" }
+solana-svm-callback = { path = "svm-callback", version = "=3.1.0" }
+solana-svm-feature-set = { path = "svm-feature-set", version = "=3.1.0" }
+solana-svm-log-collector = { path = "svm-log-collector", version = "=3.1.0" }
+solana-svm-measure = { path = "svm-measure", version = "=3.1.0" }
+solana-svm-timings = { path = "svm-timings", version = "=3.1.0" }
+solana-svm-transaction = { path = "svm-transaction", version = "=3.1.0" }
+solana-svm-type-overrides = { path = "svm-type-overrides", version = "=3.1.0" }
+solana-system-interface = "2.0"
+solana-system-program = { path = "programs/system", version = "=3.1.0" }
+solana-system-transaction = "3.0.0"
+solana-sysvar = "3.0.0"
+solana-sysvar-id = "3.0.0"
+solana-test-validator = { path = "test-validator", version = "=3.1.0" }
+solana-time-utils = "3.0.0"
+solana-tls-utils = { path = "tls-utils", version = "=3.1.0" }
+solana-tps-client = { path = "tps-client", version = "=3.1.0" }
+solana-tpu-client = { path = "tpu-client", version = "=3.1.0", default-features = false }
+solana-tpu-client-next = { path = "tpu-client-next", version = "=3.1.0" }
+solana-transaction = "3.0.1"
+solana-transaction-context = { path = "transaction-context", version = "=3.1.0", features = ["bincode"] }
+solana-transaction-error = "3.0.0"
+solana-transaction-metrics-tracker = { path = "transaction-metrics-tracker", version = "=3.1.0" }
+solana-transaction-status = { path = "transaction-status", version = "=3.1.0" }
+solana-transaction-status-client-types = { path = "transaction-status-client-types", version = "=3.1.0" }
+solana-turbine = { path = "turbine", version = "=3.1.0" }
+solana-udp-client = { path = "udp-client", version = "=3.1.0" }
+solana-unified-scheduler-logic = { path = "unified-scheduler-logic", version = "=3.1.0" }
+solana-unified-scheduler-pool = { path = "unified-scheduler-pool", version = "=3.1.0" }
+solana-validator-exit = "3.0.0"
+solana-version = { path = "version", version = "=3.1.0" }
+solana-vote = { path = "vote", version = "=3.1.0" }
+solana-vote-interface = "4.0.2"
+solana-vote-program = { path = "programs/vote", version = "=3.1.0", default-features = false }
+solana-votor-messages = { path = "votor-messages", version = "=3.1.0" }
+solana-wen-restart = { path = "wen-restart", version = "=3.1.0" }
+solana-zk-elgamal-proof-program = { path = "programs/zk-elgamal-proof", version = "=3.1.0" }
+solana-zk-sdk = "4.0.0"
+solana-zk-token-proof-program = { path = "programs/zk-token-proof", version = "=3.1.0" }
+solana-zk-token-sdk = { path = "zk-token-sdk", version = "=3.1.0" }
+spl-associated-token-account-interface = "2.0.0"
+spl-generic-token = "2.0.0"
+spl-memo-interface = "2.0.0"
+spl-pod = "0.7.0"
+spl-token-2022-interface = "2.0.0"
+spl-token-confidential-transfer-proof-extraction = "0.5.0"
+spl-token-group-interface = "0.7.0"
+spl-token-interface = "2.0.0"
+spl-token-metadata-interface = "0.8.0"
static_assertions = "1.1.0"
stream-cancel = "0.8.2"
strum = "0.24"
@@ -581,9 +585,9 @@ sysctl = "0.4.6"
systemstat = "0.2.5"
tar = "0.4.44"
tarpc = "0.29.0"
-tempfile = "3.20.0"
+tempfile = "3.22.0"
test-case = "3.3.1"
-thiserror = "2.0.12"
+thiserror = "2.0.16"
thread-priority = "1.2.0"
tiny-bip39 = "0.8.2"
tokio = "1.47.1"
@@ -595,12 +599,13 @@ toml = "0.8.12"
tonic = "0.9.2"
tonic-build = "0.9.2"
tower = "0.5.2"
+tracing = "0.1"
trait-set = "0.3.0"
trees = "0.4.2"
tungstenite = "0.20.1"
unwrap_none = "0.1.2"
uriparse = "0.6.4"
-url = "2.5.4"
+url = "2.5.7"
vec_extract_if_polyfill = "0.1.0"
wasm-bindgen = "0.2"
winapi = "0.3.8"
diff --git a/README.md b/README.md
index e1cb6c29c049bb..b7a563b63d033c 100644
--- a/README.md
+++ b/README.md
@@ -19,17 +19,9 @@ $ source $HOME/.cargo/env
$ rustup component add rustfmt
```
-When building the master branch, please make sure you are using the latest stable rust version by running:
-
-```bash
-$ rustup update
-```
-
-When building a specific release branch, you should check the rust version in `ci/rust-version.sh` and if necessary, install that version by running:
-```bash
-$ rustup install VERSION
-```
-Note that if this is not the latest rust version on your machine, cargo commands may require an [override](https://rust-lang.github.io/rustup/overrides.html) in order to use the correct version.
+The `rust-toolchain.toml` file pins a specific rust version and ensures that
+cargo commands run with that version. Note that cargo will automatically install
+the correct version if it is not already installed.
On Linux systems you may need to install libssl-dev, pkg-config, zlib1g-dev, protobuf etc.
@@ -70,12 +62,12 @@ $ ./cargo test
### Starting a local testnet
-Start your own testnet locally, instructions are in the [online docs](https://docs.solanalabs.com/clusters/benchmark).
+Start your own testnet locally, instructions are in the [online docs](https://docs.anza.xyz/clusters/benchmark).
### Accessing the remote development cluster
* `devnet` - stable public cluster for development accessible via
-devnet.solana.com. Runs 24/7. Learn more about the [public clusters](https://docs.solanalabs.com/clusters)
+devnet.solana.com. Runs 24/7. Learn more about the [public clusters](https://docs.anza.xyz/clusters)
# Benchmarking
diff --git a/account-decoder/Cargo.toml b/account-decoder/Cargo.toml
index b6ba7fd8a67dd9..d94c96a1954b15 100644
--- a/account-decoder/Cargo.toml
+++ b/account-decoder/Cargo.toml
@@ -28,7 +28,7 @@ solana-address-lookup-table-interface = { workspace = true, features = [
"bytemuck",
] }
solana-clock = { workspace = true }
-solana-config-program-client = { workspace = true, features = ["serde"] }
+solana-config-interface = { workspace = true, features = ["bincode"] }
solana-epoch-schedule = { workspace = true }
solana-fee-calculator = { workspace = true }
solana-instruction = { workspace = true }
@@ -41,7 +41,7 @@ solana-rent = { workspace = true }
solana-sdk-ids = { workspace = true }
solana-slot-hashes = { workspace = true }
solana-slot-history = { workspace = true }
-solana-stake-interface = { workspace = true }
+solana-stake-interface = { workspace = true, features = ["bincode", "sysvar"] }
solana-sysvar = { workspace = true }
solana-vote-interface = { workspace = true, features = ["bincode"] }
spl-generic-token = { workspace = true }
diff --git a/account-decoder/src/parse_account_data.rs b/account-decoder/src/parse_account_data.rs
index 807cedee64eba8..5b1d0bf5279a3e 100644
--- a/account-decoder/src/parse_account_data.rs
+++ b/account-decoder/src/parse_account_data.rs
@@ -165,7 +165,7 @@ mod test {
},
solana_vote_interface::{
program::id as vote_program_id,
- state::{VoteState, VoteStateVersions},
+ state::{VoteStateV3, VoteStateVersions},
},
};
@@ -176,10 +176,10 @@ mod test {
let data = vec![0; 4];
assert!(parse_account_data_v3(&account_pubkey, &other_program, &data, None).is_err());
- let vote_state = VoteState::default();
- let mut vote_account_data: Vec = vec![0; VoteState::size_of()];
- let versioned = VoteStateVersions::new_current(vote_state);
- VoteState::serialize(&versioned, &mut vote_account_data).unwrap();
+ let vote_state = VoteStateV3::default();
+ let mut vote_account_data: Vec = vec![0; VoteStateV3::size_of()];
+ let versioned = VoteStateVersions::new_v3(vote_state);
+ VoteStateV3::serialize(&versioned, &mut vote_account_data).unwrap();
let parsed = parse_account_data_v3(
&account_pubkey,
&vote_program_id(),
@@ -188,7 +188,7 @@ mod test {
)
.unwrap();
assert_eq!(parsed.program, "vote".to_string());
- assert_eq!(parsed.space, VoteState::size_of() as u64);
+ assert_eq!(parsed.space, VoteStateV3::size_of() as u64);
let nonce_data = Versions::new(State::Initialized(Data::default()));
let nonce_account_data = bincode::serialize(&nonce_data).unwrap();
diff --git a/account-decoder/src/parse_config.rs b/account-decoder/src/parse_config.rs
index 4f03503806de36..08be68fc42bc26 100644
--- a/account-decoder/src/parse_config.rs
+++ b/account-decoder/src/parse_config.rs
@@ -5,7 +5,7 @@ use {
},
bincode::deserialize,
serde_json::Value,
- solana_config_program_client::{get_config_data, ConfigKeys},
+ solana_config_interface::state::{get_config_data, ConfigKeys},
solana_pubkey::Pubkey,
solana_stake_interface::config::{
Config as StakeConfig, {self as stake_config},
@@ -101,7 +101,6 @@ mod test {
bincode::serialize,
serde_json::json,
solana_account::{Account, AccountSharedData, ReadableAccount},
- solana_config_program_client::ConfigKeys,
};
fn create_config_account(
diff --git a/account-decoder/src/parse_sysvar.rs b/account-decoder/src/parse_sysvar.rs
index c0b260f1542010..9a5c70c89d0a3b 100644
--- a/account-decoder/src/parse_sysvar.rs
+++ b/account-decoder/src/parse_sysvar.rs
@@ -14,11 +14,9 @@ use {
solana_sdk_ids::sysvar,
solana_slot_hashes::SlotHashes,
solana_slot_history::{self as slot_history, SlotHistory},
+ solana_stake_interface::stake_history::{StakeHistory, StakeHistoryEntry},
solana_sysvar::{
- epoch_rewards::EpochRewards,
- last_restart_slot::LastRestartSlot,
- rewards::Rewards,
- stake_history::{StakeHistory, StakeHistoryEntry},
+ epoch_rewards::EpochRewards, last_restart_slot::LastRestartSlot, rewards::Rewards,
},
};
diff --git a/account-decoder/src/parse_vote.rs b/account-decoder/src/parse_vote.rs
index aca75b74f8354d..3dfb5aa63bce55 100644
--- a/account-decoder/src/parse_vote.rs
+++ b/account-decoder/src/parse_vote.rs
@@ -2,11 +2,11 @@ use {
crate::{parse_account_data::ParseAccountError, StringAmount},
solana_clock::{Epoch, Slot},
solana_pubkey::Pubkey,
- solana_vote_interface::state::{BlockTimestamp, Lockout, VoteState},
+ solana_vote_interface::state::{BlockTimestamp, Lockout, VoteStateV3},
};
pub fn parse_vote(data: &[u8]) -> Result {
- let mut vote_state = VoteState::deserialize(data).map_err(ParseAccountError::from)?;
+ let mut vote_state = VoteStateV3::deserialize(data).map_err(ParseAccountError::from)?;
let epoch_credits = vote_state
.epoch_credits()
.iter()
@@ -125,10 +125,10 @@ mod test {
#[test]
fn test_parse_vote() {
- let vote_state = VoteState::default();
- let mut vote_account_data: Vec = vec![0; VoteState::size_of()];
- let versioned = VoteStateVersions::new_current(vote_state);
- VoteState::serialize(&versioned, &mut vote_account_data).unwrap();
+ let vote_state = VoteStateV3::default();
+ let mut vote_account_data: Vec = vec![0; VoteStateV3::size_of()];
+ let versioned = VoteStateVersions::new_v3(vote_state);
+ VoteStateV3::serialize(&versioned, &mut vote_account_data).unwrap();
let expected_vote_state = UiVoteState {
node_pubkey: Pubkey::default().to_string(),
authorized_withdrawer: Pubkey::default().to_string(),
diff --git a/accounts-cluster-bench/Cargo.toml b/accounts-cluster-bench/Cargo.toml
index f016eb135c84d8..5af427f4fdf5ac 100644
--- a/accounts-cluster-bench/Cargo.toml
+++ b/accounts-cluster-bench/Cargo.toml
@@ -11,6 +11,9 @@ edition = { workspace = true }
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]
+[features]
+dev-context-only-utils = []
+
[dependencies]
clap = { workspace = true }
log = { workspace = true }
@@ -50,8 +53,8 @@ jemallocator = { workspace = true }
[dev-dependencies]
solana-accounts-db = { workspace = true }
solana-core = { workspace = true, features = ["dev-context-only-utils"] }
-solana-faucet = { workspace = true }
-solana-local-cluster = { workspace = true }
+solana-faucet = { workspace = true, features = ["dev-context-only-utils"] }
+solana-local-cluster = { workspace = true, features = ["dev-context-only-utils"] }
solana-native-token = { workspace = true }
solana-poh-config = { workspace = true }
solana-runtime = { workspace = true, features = ["dev-context-only-utils"] }
diff --git a/accounts-cluster-bench/src/main.rs b/accounts-cluster-bench/src/main.rs
index bdbe2cf4813cce..abc6a49fa76b87 100644
--- a/accounts-cluster-bench/src/main.rs
+++ b/accounts-cluster-bench/src/main.rs
@@ -61,10 +61,7 @@ pub fn poll_slot_height(client: &RpcClient) -> Slot {
return slot;
} else {
num_retries -= 1;
- warn!(
- "get_slot_height failure: {:?}. remaining retries {}",
- response, num_retries
- );
+ warn!("get_slot_height failure: {response:?}. remaining retries {num_retries}");
}
if num_retries == 0 {
panic!("failed to get_slot_height(), rpc node down?")
@@ -81,10 +78,7 @@ pub fn poll_get_latest_blockhash(client: &RpcClient) -> Option {
return Some(blockhash);
} else {
num_retries -= 1;
- warn!(
- "get_latest_blockhash failure: {:?}. remaining retries {}",
- response, num_retries
- );
+ warn!("get_latest_blockhash failure: {response:?}. remaining retries {num_retries}");
}
if num_retries == 0 {
panic!("failed to get_latest_blockhash(), rpc node down?")
@@ -102,10 +96,7 @@ pub fn poll_get_fee_for_message(client: &RpcClient, message: &mut Message) -> (O
return (Some(fee), message.recent_blockhash);
} else {
num_retries -= 1;
- warn!(
- "get_fee_for_message failure: {:?}. remaining retries {}",
- response, num_retries
- );
+ warn!("get_fee_for_message failure: {response:?}. remaining retries {num_retries}");
let blockhash = poll_get_latest_blockhash(client).expect("blockhash");
message.recent_blockhash = blockhash;
@@ -119,7 +110,7 @@ pub fn poll_get_fee_for_message(client: &RpcClient, message: &mut Message) -> (O
fn airdrop_lamports(client: &RpcClient, id: &Keypair, desired_balance: u64) -> bool {
let starting_balance = client.get_balance(&id.pubkey()).unwrap_or(0);
- info!("starting balance {}", starting_balance);
+ info!("starting balance {starting_balance}");
if starting_balance < desired_balance {
let airdrop_amount = desired_balance - starting_balance;
@@ -143,7 +134,7 @@ fn airdrop_lamports(client: &RpcClient, id: &Keypair, desired_balance: u64) -> b
let current_balance = client.get_balance(&id.pubkey()).unwrap_or_else(|e| {
panic!("airdrop error {e}");
});
- info!("current balance {}...", current_balance);
+ info!("current balance {current_balance}...");
if current_balance - starting_balance != airdrop_amount {
info!(
@@ -396,10 +387,10 @@ fn process_get_multiple_accounts(
stats.total_errors_time_us += rpc_time.as_us();
stats.errors += 1;
if last_error.elapsed().as_secs() > 2 {
- info!("error: {:?}", e);
+ info!("error: {e:?}");
*last_error = Instant::now();
}
- debug!("error: {:?}", e);
+ debug!("error: {e:?}");
}
}
}
@@ -519,7 +510,7 @@ fn run_rpc_bench_loop(
stats.total_errors_time_us += rpc_time.as_us();
stats.errors += 1;
if last_error.elapsed().as_secs() > 2 {
- info!("get_account_info error: {:?}", e);
+ info!("get_account_info error: {e:?}");
last_error = Instant::now();
}
}
@@ -545,7 +536,7 @@ fn run_rpc_bench_loop(
stats.total_errors_time_us += rpc_time.as_us();
stats.errors += 1;
if last_error.elapsed().as_secs() > 2 {
- info!("get_block error: {:?}", e);
+ info!("get_block error: {e:?}");
last_error = Instant::now();
}
}
@@ -569,7 +560,7 @@ fn run_rpc_bench_loop(
stats.total_errors_time_us += rpc_time.as_us();
stats.errors += 1;
if last_error.elapsed().as_secs() > 2 {
- info!("get_blocks error: {:?}", e);
+ info!("get_blocks error: {e:?}");
last_error = Instant::now();
}
}
@@ -588,7 +579,7 @@ fn run_rpc_bench_loop(
stats.total_errors_time_us += rpc_time.as_us();
stats.errors += 1;
if last_error.elapsed().as_secs() > 2 {
- info!("get_first_available_block error: {:?}", e);
+ info!("get_first_available_block error: {e:?}");
last_error = Instant::now();
}
}
@@ -607,7 +598,7 @@ fn run_rpc_bench_loop(
stats.total_errors_time_us += rpc_time.as_us();
stats.errors += 1;
if last_error.elapsed().as_secs() > 2 {
- info!("get_slot error: {:?}", e);
+ info!("get_slot error: {e:?}");
last_error = Instant::now();
}
}
@@ -626,7 +617,7 @@ fn run_rpc_bench_loop(
stats.total_errors_time_us += rpc_time.as_us();
stats.errors += 1;
if last_error.elapsed().as_secs() > 2 {
- info!("get_token_supply error: {:?}", e);
+ info!("get_token_supply error: {e:?}");
last_error = Instant::now();
}
}
@@ -659,7 +650,7 @@ fn run_rpc_bench_loop(
stats.errors += 1;
stats.total_errors_time_us += rpc_time.as_us();
if last_error.elapsed().as_secs() > 2 {
- info!("get-program-accounts error: {:?}", e);
+ info!("get-program-accounts error: {e:?}");
last_error = Instant::now();
}
}
@@ -679,7 +670,7 @@ fn run_rpc_bench_loop(
stats.errors += 1;
stats.total_errors_time_us += rpc_time.as_us();
if last_error.elapsed().as_secs() > 2 {
- info!("get-token-accounts-by-delegate error: {:?}", e);
+ info!("get-token-accounts-by-delegate error: {e:?}");
last_error = Instant::now();
}
}
@@ -699,7 +690,7 @@ fn run_rpc_bench_loop(
stats.errors += 1;
stats.total_errors_time_us += rpc_time.as_us();
if last_error.elapsed().as_secs() > 2 {
- info!("get-token-accounts-by-owner error: {:?}", e);
+ info!("get-token-accounts-by-owner error: {e:?}");
last_error = Instant::now();
}
}
@@ -781,7 +772,7 @@ fn make_rpc_bench_threads(
let transaction_signature_tracker = transaction_signature_tracker.clone();
let mint = *mint;
Builder::new()
- .name(format!("rpc-bench-{}", thread))
+ .name(format!("rpc-bench-{thread}"))
.spawn(move || {
start_bench.wait();
run_rpc_bench_loop(
@@ -853,7 +844,7 @@ fn run_accounts_bench(
let transaction_signature_tracker =
TransactionSignatureTracker(Arc::new(RwLock::new(VecDeque::with_capacity(5000))));
- info!("Starting balance(s): {:?}", balances);
+ info!("Starting balance(s): {balances:?}");
let executor = TransactionExecutor::new_with_rpc_client(client.clone());
@@ -917,10 +908,7 @@ fn run_accounts_bench(
}
last_balance = Instant::now();
if *balance < lamports * 2 {
- info!(
- "Balance {} is less than needed: {}, doing airdrop...",
- balance, lamports
- );
+ info!("Balance {balance} is less than needed: {lamports}, doing airdrop...");
if !airdrop_lamports(&client, payer_keypairs[i], lamports * 100_000) {
warn!("failed airdrop, exiting");
return;
@@ -934,7 +922,7 @@ fn run_accounts_bench(
if sigs_len < batch_size {
let num_to_create = batch_size - sigs_len;
if num_to_create >= payer_keypairs.len() {
- info!("creating {} new", num_to_create);
+ info!("creating {num_to_create} new");
let chunk_size = num_to_create / payer_keypairs.len();
if chunk_size > 0 {
for (i, keypair) in payer_keypairs.iter().enumerate() {
@@ -1018,8 +1006,9 @@ fn run_accounts_bench(
|| max_accounts_met
{
info!(
- "total_accounts_created: {} total_accounts_closed: {} tx_sent_count: {} loop_count: {} balance(s): {:?}",
- total_accounts_created, total_accounts_closed, tx_sent_count, count, balances
+ "total_accounts_created: {total_accounts_created} total_accounts_closed: \
+ {total_accounts_closed} tx_sent_count: {tx_sent_count} loop_count: {count} \
+ balance(s): {balances:?}"
);
last_log = Instant::now();
}
@@ -1061,9 +1050,9 @@ fn run_accounts_bench(
(max_created_seed - max_closed_seed) as usize,
);
if num_to_close >= payer_keypairs.len() {
- info!("closing {} accounts", num_to_close);
+ info!("closing {num_to_close} accounts");
let chunk_size = num_to_close / payer_keypairs.len();
- info!("{:?} chunk_size", chunk_size);
+ info!("{chunk_size:?} chunk_size");
if chunk_size > 0 {
for (i, keypair) in payer_keypairs.iter().enumerate() {
let txs: Vec<_> = (0..chunk_size)
@@ -1101,8 +1090,8 @@ fn run_accounts_bench(
count += 1;
if last_log.elapsed().as_millis() > 3000 || max_closed_seed >= max_created_seed {
info!(
- "total_accounts_closed: {} tx_sent_count: {} loop_count: {} balance(s): {:?}",
- total_accounts_closed, tx_sent_count, count, balances
+ "total_accounts_closed: {total_accounts_closed} tx_sent_count: \
+ {tx_sent_count} loop_count: {count} balance(s): {balances:?}"
);
last_log = Instant::now();
}
@@ -1150,8 +1139,8 @@ fn main() {
.validator(is_url_or_moniker)
.conflicts_with("entrypoint")
.help(
- "URL for Solana's JSON RPC or moniker (or their first letter): \
- [mainnet-beta, testnet, devnet, localhost]",
+ "URL for Solana's JSON RPC or moniker (or their first letter): [mainnet-beta, \
+ testnet, devnet, localhost]",
),
)
.arg(
@@ -1206,10 +1195,9 @@ fn main() {
.takes_value(true)
.value_name("BYTES")
.help(
- "Every `n` batches, create a batch of close transactions for \
- the earliest remaining batch of accounts created. \
- Note: Should be > 1 to avoid situations where the close \
- transactions will be submitted before the corresponding \
+ "Every `n` batches, create a batch of close transactions for the earliest \
+ remaining batch of accounts created. Note: Should be > 1 to avoid situations \
+ where the close transactions will be submitted before the corresponding \
create transactions have been confirmed",
),
)
@@ -1232,7 +1220,10 @@ fn main() {
.long("max-accounts")
.takes_value(true)
.value_name("NUM_ACCOUNTS")
- .help("Halt after client has created this number of accounts. Does not count closed accounts."),
+ .help(
+ "Halt after client has created this number of accounts. Does not count closed \
+ accounts.",
+ ),
)
.arg(
Arg::with_name("check_gossip")
@@ -1273,10 +1264,7 @@ fn main() {
.takes_value(true)
.value_name("RPC_BENCH_TYPE(S)")
.multiple(true)
- .requires_ifs(&[
- ("supply", "mint"),
- ("token-accounts-by-owner", "mint"),
- ])
+ .requires_ifs(&[("supply", "mint"), ("token-accounts-by-owner", "mint")])
.help("Spawn a thread which calls a specific RPC method in a loop to benchmark it"),
)
.get_matches();
@@ -1333,7 +1321,7 @@ fn main() {
Some(
solana_net_utils::get_cluster_shred_version(&entrypoint_addr).unwrap_or_else(
|err| {
- eprintln!("Failed to get shred version: {}", err);
+ eprintln!("Failed to get shred version: {err}");
exit(1);
},
),
@@ -1344,7 +1332,7 @@ fn main() {
};
let rpc_addr = if !skip_gossip {
- info!("Finding cluster entry: {:?}", entrypoint_addr);
+ info!("Finding cluster entry: {entrypoint_addr:?}");
let (gossip_nodes, _validators) = discover(
None, // keypair
Some(&entrypoint_addr),
@@ -1364,7 +1352,7 @@ fn main() {
info!("done found {} nodes", gossip_nodes.len());
gossip_nodes[0].rpc().unwrap()
} else {
- info!("Using {:?} as the RPC address", entrypoint_addr);
+ info!("Using {entrypoint_addr:?} as the RPC address");
entrypoint_addr
};
@@ -1409,18 +1397,15 @@ fn main() {
pub mod test {
use {
super::*,
- solana_accounts_db::{
- accounts_db::ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS,
- accounts_index::{AccountIndex, AccountSecondaryIndexes},
- },
+ solana_accounts_db::accounts_index::{AccountIndex, AccountSecondaryIndexes},
solana_core::validator::ValidatorConfig,
- solana_faucet::faucet::run_local_faucet,
+ solana_faucet::faucet::run_local_faucet_for_tests,
solana_local_cluster::{
local_cluster::{ClusterConfig, LocalCluster},
validator_configs::make_identical_validator_configs,
},
solana_measure::measure::Measure,
- solana_native_token::sol_to_lamports,
+ solana_native_token::LAMPORTS_PER_SOL,
solana_poh_config::PohConfig,
solana_program_pack::Pack,
solana_test_validator::TestValidator,
@@ -1428,15 +1413,7 @@ pub mod test {
};
fn initialize_and_add_secondary_indexes(validator_config: &mut ValidatorConfig) {
- if validator_config.accounts_db_config.is_none() {
- validator_config.accounts_db_config = Some(ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS);
- }
-
- let account_indexes = &mut validator_config
- .accounts_db_config
- .as_mut()
- .unwrap()
- .account_indexes;
+ let account_indexes = &mut validator_config.accounts_db_config.account_indexes;
if account_indexes.is_none() {
*account_indexes = Some(AccountSecondaryIndexes::default());
}
@@ -1498,7 +1475,7 @@ pub mod test {
);
let post_txs = client.get_transaction_count().unwrap();
start.stop();
- info!("{} pre {} post {}", start, pre_txs, post_txs);
+ info!("{start} pre {pre_txs} post {post_txs}");
}
#[test]
@@ -1548,7 +1525,7 @@ pub mod test {
);
let post_txs = client.get_transaction_count().unwrap();
start.stop();
- info!("{} pre {} post {}", start, pre_txs, post_txs);
+ info!("{start} pre {pre_txs} post {post_txs}");
}
#[test]
@@ -1556,7 +1533,11 @@ pub mod test {
solana_logger::setup();
let mint_keypair = Keypair::new();
let mint_pubkey = mint_keypair.pubkey();
- let faucet_addr = run_local_faucet(mint_keypair, None);
+ let faucet_addr = run_local_faucet_for_tests(
+ mint_keypair,
+ None, /* per_time_cap */
+ 0, /* port */
+ );
let test_validator = TestValidator::with_custom_fees(
mint_pubkey,
1,
@@ -1572,11 +1553,7 @@ pub mod test {
let funder = Keypair::new();
let latest_blockhash = rpc_client.get_latest_blockhash().unwrap();
let signature = rpc_client
- .request_airdrop_with_blockhash(
- &funder.pubkey(),
- sol_to_lamports(1.0),
- &latest_blockhash,
- )
+ .request_airdrop_with_blockhash(&funder.pubkey(), LAMPORTS_PER_SOL, &latest_blockhash)
.unwrap();
rpc_client
.confirm_transaction_with_spinner(
@@ -1647,6 +1624,6 @@ pub mod test {
0,
);
start.stop();
- info!("{}", start);
+ info!("{start}");
}
}
diff --git a/accounts-db/Cargo.toml b/accounts-db/Cargo.toml
index 46d56f733fcbac..f1eb57d3848327 100644
--- a/accounts-db/Cargo.toml
+++ b/accounts-db/Cargo.toml
@@ -25,7 +25,6 @@ dev-context-only-utils = [
"dep:solana-stake-program",
"dep:solana-vote-program",
"solana-account/dev-context-only-utils",
- "solana-pubkey/rand",
"solana-transaction/dev-context-only-utils",
]
frozen-abi = [
@@ -85,10 +84,9 @@ solana-measure = { workspace = true }
solana-message = { workspace = true }
solana-metrics = { workspace = true }
solana-nohash-hasher = { workspace = true }
-solana-pubkey = { workspace = true }
+solana-pubkey = { workspace = true, features = ["rand"] }
solana-rayon-threadlimit = { workspace = true }
solana-rent = { workspace = true, optional = true }
-solana-rent-collector = { workspace = true }
solana-reward-info = { workspace = true, features = ["serde"] }
solana-sha256-hasher = { workspace = true }
solana-signer = { workspace = true, optional = true }
@@ -128,6 +126,7 @@ solana-logger = { workspace = true }
solana-sdk-ids = { workspace = true }
solana-signature = { workspace = true, features = ["rand"] }
solana-slot-history = { workspace = true }
+solana-svm = { workspace = true }
static_assertions = { workspace = true }
strum = { workspace = true, features = ["derive"] }
strum_macros = { workspace = true }
diff --git a/accounts-db/benches/accounts.rs b/accounts-db/benches/accounts.rs
index cfb0c89d15fdc6..f2012aafc22414 100644
--- a/accounts-db/benches/accounts.rs
+++ b/accounts-db/benches/accounts.rs
@@ -33,7 +33,7 @@ static GLOBAL: jemallocator::Jemalloc = jemallocator::Jemalloc;
fn new_accounts_db(account_paths: Vec) -> AccountsDb {
AccountsDb::new_with_config(
account_paths,
- Some(ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS),
+ ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS,
None,
Arc::default(),
)
@@ -51,10 +51,10 @@ fn bench_delete_dependencies(bencher: &mut Bencher) {
let account = AccountSharedData::new(i + 1, 0, AccountSharedData::default().owner());
accounts
.accounts_db
- .store_for_tests(i, &[(&pubkey, &account)]);
+ .store_for_tests((i, [(&pubkey, &account)].as_slice()));
accounts
.accounts_db
- .store_for_tests(i, &[(&old_pubkey, &zero_account)]);
+ .store_for_tests((i, [(&old_pubkey, &zero_account)].as_slice()));
old_pubkey = pubkey;
accounts.accounts_db.add_root_and_flush_write_cache(i);
}
@@ -89,7 +89,7 @@ where
)
.collect();
let storable_accounts: Vec<_> = pubkeys.iter().zip(accounts_data.iter()).collect();
- accounts.store_accounts_cached((slot, storable_accounts.as_slice()));
+ accounts.store_accounts_par((slot, storable_accounts.as_slice()), None);
accounts.add_root(slot);
accounts
.accounts_db
@@ -116,7 +116,7 @@ where
// Write to a different slot than the one being read from. Because
// there's a new account pubkey being written to every time, will
// compete for the accounts index lock on every store
- accounts.store_accounts_cached((slot + 1, new_storable_accounts.as_slice()));
+ accounts.store_accounts_par((slot + 1, new_storable_accounts.as_slice()), None);
});
}
@@ -234,7 +234,7 @@ fn bench_dashmap_par_iter(bencher: &mut Bencher) {
let (accounts, dashmap) = setup_bench_dashmap_iter();
bencher.iter(|| {
- test::black_box(accounts.accounts_db.thread_pool.install(|| {
+ test::black_box(accounts.accounts_db.thread_pool_foreground.install(|| {
dashmap
.par_iter()
.map(|cached_account| (*cached_account.key(), cached_account.value().1))
@@ -268,7 +268,7 @@ fn bench_load_largest_accounts(b: &mut Bencher) {
let account = AccountSharedData::new(lamports, 0, &Pubkey::default());
accounts
.accounts_db
- .store_for_tests(0, &[(&pubkey, &account)]);
+ .store_for_tests((0, [(&pubkey, &account)].as_slice()));
}
accounts.accounts_db.add_root_and_flush_write_cache(0);
let ancestors = Ancestors::from(vec![0]);
diff --git a/accounts-db/benches/accounts_index.rs b/accounts-db/benches/accounts_index.rs
index 6786a251d3cee1..9ed8c09dad7547 100644
--- a/accounts-db/benches/accounts_index.rs
+++ b/accounts-db/benches/accounts_index.rs
@@ -8,7 +8,7 @@ use {
solana_accounts_db::{
account_info::AccountInfo,
accounts_index::{
- AccountSecondaryIndexes, AccountsIndex, UpsertReclaim,
+ AccountSecondaryIndexes, AccountsIndex, ReclaimsSlotList, UpsertReclaim,
ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS,
},
},
@@ -29,7 +29,7 @@ fn bench_accounts_index(bencher: &mut Bencher) {
const NUM_FORKS: u64 = 16;
- let mut reclaims = vec![];
+ let mut reclaims = ReclaimsSlotList::new();
let index = AccountsIndex::::new(
&ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS,
Arc::default(),
diff --git a/accounts-db/benches/append_vec.rs b/accounts-db/benches/append_vec.rs
index 4e2536b6ad836e..204c51e01db948 100644
--- a/accounts-db/benches/append_vec.rs
+++ b/accounts-db/benches/append_vec.rs
@@ -5,7 +5,7 @@ use {
rand::{thread_rng, Rng},
solana_account::{AccountSharedData, ReadableAccount},
solana_accounts_db::{
- accounts_file::StoredAccountsInfo,
+ accounts_file::{StorageAccess, StoredAccountsInfo},
append_vec::{
test_utils::{create_test_account, get_append_vec_path},
AppendVec, StoredMeta,
@@ -39,10 +39,9 @@ fn append_account(
vec.append_accounts(&storable_accounts, 0)
}
-#[bench]
-fn append_vec_append(bencher: &mut Bencher) {
+fn append_vec_append(bencher: &mut Bencher, storage_access: StorageAccess) {
let path = get_append_vec_path("bench_append");
- let vec = AppendVec::new(&path.path, true, 64 * 1024);
+ let vec = AppendVec::new(&path.path, true, 64 * 1024, storage_access);
bencher.iter(|| {
let (meta, account) = create_test_account(0);
if append_account(&vec, meta, &account).is_none() {
@@ -51,6 +50,16 @@ fn append_vec_append(bencher: &mut Bencher) {
});
}
+#[bench]
+fn append_vec_append_file(bencher: &mut Bencher) {
+ append_vec_append(bencher, StorageAccess::File);
+}
+
+#[bench]
+fn append_vec_append_mmap(bencher: &mut Bencher) {
+ append_vec_append(bencher, StorageAccess::Mmap);
+}
+
fn add_test_accounts(vec: &AppendVec, size: usize) -> Vec<(usize, usize)> {
(0..size)
.filter_map(|sample| {
@@ -60,10 +69,9 @@ fn add_test_accounts(vec: &AppendVec, size: usize) -> Vec<(usize, usize)> {
.collect()
}
-#[bench]
-fn append_vec_sequential_read(bencher: &mut Bencher) {
+fn append_vec_sequential_read(bencher: &mut Bencher, storage_access: StorageAccess) {
let path = get_append_vec_path("seq_read");
- let vec = AppendVec::new(&path.path, true, 64 * 1024);
+ let vec = AppendVec::new(&path.path, true, 64 * 1024, storage_access);
let size = 1_000;
let mut indexes = add_test_accounts(&vec, size);
bencher.iter(|| {
@@ -76,10 +84,20 @@ fn append_vec_sequential_read(bencher: &mut Bencher) {
});
});
}
+
+#[bench]
+fn append_vec_sequential_read_file(bencher: &mut Bencher) {
+ append_vec_sequential_read(bencher, StorageAccess::File);
+}
+
#[bench]
-fn append_vec_random_read(bencher: &mut Bencher) {
+fn append_vec_sequential_read_mmap(bencher: &mut Bencher) {
+ append_vec_sequential_read(bencher, StorageAccess::Mmap);
+}
+
+fn append_vec_random_read(bencher: &mut Bencher, storage_access: StorageAccess) {
let path = get_append_vec_path("random_read");
- let vec = AppendVec::new(&path.path, true, 64 * 1024);
+ let vec = AppendVec::new(&path.path, true, 64 * 1024, storage_access);
let size = 1_000;
let indexes = add_test_accounts(&vec, size);
bencher.iter(|| {
@@ -93,9 +111,23 @@ fn append_vec_random_read(bencher: &mut Bencher) {
}
#[bench]
-fn append_vec_concurrent_append_read(bencher: &mut Bencher) {
+fn append_vec_random_read_file(bencher: &mut Bencher) {
+ append_vec_random_read(bencher, StorageAccess::File);
+}
+
+#[bench]
+fn append_vec_random_read_mmap(bencher: &mut Bencher) {
+ append_vec_random_read(bencher, StorageAccess::Mmap);
+}
+
+fn append_vec_concurrent_append_read(bencher: &mut Bencher, storage_access: StorageAccess) {
let path = get_append_vec_path("concurrent_read");
- let vec = Arc::new(AppendVec::new(&path.path, true, 1024 * 1024));
+ let vec = Arc::new(AppendVec::new(
+ &path.path,
+ true,
+ 1024 * 1024,
+ storage_access,
+ ));
let vec1 = vec.clone();
let indexes: Arc>> = Arc::new(Mutex::new(vec![]));
let indexes1 = indexes.clone();
@@ -123,9 +155,23 @@ fn append_vec_concurrent_append_read(bencher: &mut Bencher) {
}
#[bench]
-fn append_vec_concurrent_read_append(bencher: &mut Bencher) {
+fn append_vec_concurrent_append_read_file(bencher: &mut Bencher) {
+ append_vec_concurrent_append_read(bencher, StorageAccess::File);
+}
+
+#[bench]
+fn append_vec_concurrent_append_read_mmap(bencher: &mut Bencher) {
+ append_vec_concurrent_append_read(bencher, StorageAccess::Mmap);
+}
+
+fn append_vec_concurrent_read_append(bencher: &mut Bencher, storage_access: StorageAccess) {
let path = get_append_vec_path("concurrent_read");
- let vec = Arc::new(AppendVec::new(&path.path, true, 1024 * 1024));
+ let vec = Arc::new(AppendVec::new(
+ &path.path,
+ true,
+ 1024 * 1024,
+ storage_access,
+ ));
let vec1 = vec.clone();
let indexes: Arc>> = Arc::new(Mutex::new(vec![]));
let indexes1 = indexes.clone();
@@ -134,8 +180,12 @@ fn append_vec_concurrent_read_append(bencher: &mut Bencher) {
if len == 0 {
continue;
}
- let random_index: usize = thread_rng().gen_range(0..len + 1);
- let (sample, pos) = *indexes1.lock().unwrap().get(random_index % len).unwrap();
+ let random_index: usize = thread_rng().gen_range(0..len.wrapping_add(1));
+ let (sample, pos) = *indexes1
+ .lock()
+ .unwrap()
+ .get(random_index.checked_rem(len).unwrap())
+ .unwrap();
vec1.get_stored_account_meta_callback(pos, |account| {
let (_meta, test) = create_test_account(sample);
assert_eq!(account.data(), test.data());
@@ -149,3 +199,13 @@ fn append_vec_concurrent_read_append(bencher: &mut Bencher) {
}
});
}
+
+#[bench]
+fn append_vec_concurrent_read_append_file(bencher: &mut Bencher) {
+ append_vec_concurrent_read_append(bencher, StorageAccess::File);
+}
+
+#[bench]
+fn append_vec_concurrent_read_append_mmap(bencher: &mut Bencher) {
+ append_vec_concurrent_read_append(bencher, StorageAccess::Mmap);
+}
diff --git a/accounts-db/benches/bench_accounts_file.rs b/accounts-db/benches/bench_accounts_file.rs
index 0e83e3e12c9db0..6a2068422ec86d 100644
--- a/accounts-db/benches/bench_accounts_file.rs
+++ b/accounts-db/benches/bench_accounts_file.rs
@@ -7,12 +7,11 @@ use {
append_vec::{self, AppendVec},
tiered_storage::{
file::TieredReadableFile,
- hot::{HotStorageReader, HotStorageWriter},
+ hot::{HotStorageReader, HotStorageWriter, RENT_EXEMPT_RENT_EPOCH},
},
},
solana_clock::Slot,
solana_pubkey::Pubkey,
- solana_rent_collector::RENT_EXEMPT_RENT_EPOCH,
solana_system_interface::MAX_PERMITTED_DATA_LENGTH,
std::mem::ManuallyDrop,
};
@@ -30,8 +29,8 @@ const ACCOUNTS_COUNTS: [usize; 4] = [
10_000, // reasonable largest number of accounts written per slot
];
-fn bench_write_accounts_file(c: &mut Criterion) {
- let mut group = c.benchmark_group("write_accounts_file");
+fn bench_write_accounts_file(c: &mut Criterion, storage_access: StorageAccess) {
+ let mut group = c.benchmark_group(format!("write_accounts_file_{storage_access:?}"));
// most accounts on mnb are 165-200 bytes, so use that here too
let space = 200;
@@ -65,7 +64,7 @@ fn bench_write_accounts_file(c: &mut Criterion) {
|| {
let path = temp_dir.path().join(format!("append_vec_{accounts_count}"));
let file_size = accounts.len() * (space + append_vec::STORE_META_OVERHEAD);
- AppendVec::new(path, true, file_size)
+ AppendVec::new(path, true, file_size, storage_access)
},
|append_vec| {
let res = append_vec.append_accounts(&storable_accounts, 0).unwrap();
@@ -99,6 +98,14 @@ fn bench_write_accounts_file(c: &mut Criterion) {
}
}
+fn bench_write_accounts_file_file_io(c: &mut Criterion) {
+ bench_write_accounts_file(c, StorageAccess::File);
+}
+
+fn bench_write_accounts_file_mmap(c: &mut Criterion) {
+ bench_write_accounts_file(c, StorageAccess::Mmap);
+}
+
fn bench_scan_pubkeys(c: &mut Criterion) {
let mut group = c.benchmark_group("scan_pubkeys");
let temp_dir = tempfile::tempdir().unwrap();
@@ -126,7 +133,7 @@ fn bench_scan_pubkeys(c: &mut Criterion) {
.iter()
.map(|(_, account)| append_vec::aligned_stored_size(account.data().len()))
.sum();
- let append_vec = AppendVec::new(append_vec_path, true, file_size);
+ let append_vec = AppendVec::new(append_vec_path, true, file_size, StorageAccess::File);
let stored_accounts_info = append_vec
.append_accounts(&(Slot::MAX, storable_accounts.as_slice()), 0)
.unwrap();
@@ -211,7 +218,7 @@ fn bench_get_account_shared_data(c: &mut Criterion) {
.iter()
.map(|(_, account)| append_vec::aligned_stored_size(account.data().len()))
.sum();
- let append_vec = AppendVec::new(append_vec_path, true, file_size);
+ let append_vec = AppendVec::new(append_vec_path, true, file_size, StorageAccess::File);
let stored_accounts_info = append_vec
.append_accounts(&(Slot::MAX, storable_accounts.as_slice()), 0)
.unwrap();
@@ -283,7 +290,8 @@ fn bench_get_account_shared_data(c: &mut Criterion) {
criterion_group!(
benches,
- bench_write_accounts_file,
+ bench_write_accounts_file_file_io,
+ bench_write_accounts_file_mmap,
bench_scan_pubkeys,
bench_get_account_shared_data,
);
diff --git a/accounts-db/benches/bench_hashing.rs b/accounts-db/benches/bench_hashing.rs
index 54958715d66507..a339235d2879e7 100644
--- a/accounts-db/benches/bench_hashing.rs
+++ b/accounts-db/benches/bench_hashing.rs
@@ -26,7 +26,7 @@ const DATA_SIZES: [usize; 6] = [
///
/// Ensure this constant stays in sync with the value of `META_SIZE` in
/// AccountsDb::hash_account_helper().
-const META_SIZE: usize = 81;
+const META_SIZE: usize = 73;
fn bench_hash_account(c: &mut Criterion) {
let lamports = 123_456_789;
diff --git a/accounts-db/benches/bench_lock_accounts.rs b/accounts-db/benches/bench_lock_accounts.rs
index 620b6939e0cbc8..42d11e4875704f 100644
--- a/accounts-db/benches/bench_lock_accounts.rs
+++ b/accounts-db/benches/bench_lock_accounts.rs
@@ -38,7 +38,7 @@ fn create_test_transactions(lock_count: usize, read_conflicts: bool) -> Vec = utils::accounts_with_size_limit(
255,
diff --git a/accounts-db/benches/utils.rs b/accounts-db/benches/utils.rs
index a76c6c00a25496..9b8dfa8cbfc5da 100644
--- a/accounts-db/benches/utils.rs
+++ b/accounts-db/benches/utils.rs
@@ -9,9 +9,9 @@ use {
},
rand_chacha::ChaChaRng,
solana_account::AccountSharedData,
+ solana_accounts_db::tiered_storage::hot::RENT_EXEMPT_RENT_EPOCH,
solana_pubkey::Pubkey,
solana_rent::Rent,
- solana_rent_collector::RENT_EXEMPT_RENT_EPOCH,
std::iter,
};
diff --git a/accounts-db/src/account_info.rs b/accounts-db/src/account_info.rs
index 9cf8e771edd9e5..995cab01143316 100644
--- a/accounts-db/src/account_info.rs
+++ b/accounts-db/src/account_info.rs
@@ -4,8 +4,10 @@
//! Note that AccountInfo is saved to disk buckets during runtime, but disk buckets are recreated at startup.
use {
crate::{
- accounts_db::AccountsFileId, accounts_file::ALIGN_BOUNDARY_OFFSET,
- accounts_index::IsCached, is_zero_lamport::IsZeroLamport,
+ accounts_db::AccountsFileId,
+ accounts_file::ALIGN_BOUNDARY_OFFSET,
+ accounts_index::{DiskIndexValue, IndexValue, IsCached},
+ is_zero_lamport::IsZeroLamport,
},
modular_bitfield::prelude::*,
};
@@ -13,10 +15,6 @@ use {
/// offset within an append vec to account data
pub type Offset = usize;
-/// bytes used to store this account in append vec
-/// Note this max needs to be big enough to handle max data len of 10MB, which is a const
-pub type StoredSize = u32;
-
/// specify where account data is located
#[derive(Debug, PartialEq, Eq)]
pub enum StorageLocation {
@@ -103,6 +101,10 @@ impl IsCached for AccountInfo {
}
}
+impl IndexValue for AccountInfo {}
+
+impl DiskIndexValue for AccountInfo {}
+
impl IsCached for StorageLocation {
fn is_cached(&self) -> bool {
matches!(self, StorageLocation::Cached)
@@ -168,6 +170,7 @@ impl AccountInfo {
}
}
}
+
#[cfg(test)]
mod test {
use {super::*, crate::append_vec::MAXIMUM_APPEND_VEC_FILE_SIZE};
diff --git a/accounts-db/src/account_storage.rs b/accounts-db/src/account_storage.rs
index c48ca81b7ccba6..324583f5cde70a 100644
--- a/accounts-db/src/account_storage.rs
+++ b/accounts-db/src/account_storage.rs
@@ -8,8 +8,11 @@ use {
solana_clock::Slot,
solana_nohash_hasher::{BuildNoHashHasher, IntMap},
std::{
- ops::Range,
- sync::{Arc, RwLock},
+ ops::{Index, Range},
+ sync::{
+ atomic::{AtomicUsize, Ordering},
+ Arc, RwLock,
+ },
},
};
@@ -83,6 +86,14 @@ impl AccountStorage {
self.get_slot_storage_entry_shrinking_in_progress_ok(slot)
}
+ pub(super) fn all_storages(&self) -> Vec> {
+ assert!(self.no_shrink_in_progress());
+ self.map
+ .iter()
+ .map(|item| Arc::clone(item.value()))
+ .collect()
+ }
+
pub(crate) fn replace_storage_with_equivalent(
&self,
slot: Slot,
@@ -284,20 +295,6 @@ impl ShrinkInProgress<'_> {
}
}
-#[cfg_attr(feature = "frozen-abi", derive(AbiExample, AbiEnumVisitor))]
-#[derive(Debug, Eq, PartialEq, Copy, Clone, Deserialize, Serialize)]
-pub enum AccountStorageStatus {
- Available = 0,
- Full = 1,
- Candidate = 2,
-}
-
-impl Default for AccountStorageStatus {
- fn default() -> Self {
- Self::Available
- }
-}
-
/// Wrapper over slice of `Arc` that provides an ordered access to storages.
///
/// A few strategies are available for ordering storages:
@@ -339,6 +336,19 @@ impl<'a> AccountStoragesOrderer<'a> {
}
}
+ pub fn entries_len(&self) -> usize {
+ self.indices.len()
+ }
+
+ /// Returns the original index, into the storages slice, at `position`
+ ///
+ /// # Panics
+ ///
+ /// Caller must ensure `position` is in range, else will panic.
+ pub fn original_index(&'a self, position: usize) -> usize {
+ self.indices[position]
+ }
+
pub fn iter(&'a self) -> impl ExactSizeIterator- + 'a {
self.indices.iter().map(|i| self.storages[*i].as_ref())
}
@@ -346,6 +356,71 @@ impl<'a> AccountStoragesOrderer<'a> {
pub fn par_iter(&'a self) -> impl IndexedParallelIterator
- + 'a {
self.indices.par_iter().map(|i| self.storages[*i].as_ref())
}
+
+ pub fn into_concurrent_consumer(self) -> AccountStoragesConcurrentConsumer<'a> {
+ AccountStoragesConcurrentConsumer::new(self)
+ }
+}
+
+impl Index for AccountStoragesOrderer<'_> {
+ type Output = AccountStorageEntry;
+
+ fn index(&self, position: usize) -> &Self::Output {
+ // SAFETY: Caller must ensure `position` is in range.
+ let original_index = self.original_index(position);
+ // SAFETY: `original_index` must be valid here, so it is a valid index into `storages`.
+ self.storages[original_index].as_ref()
+ }
+}
+
+/// A thread-safe, lock-free iterator for consuming `AccountStorageEntry` values
+/// from an `AccountStoragesOrderer` across multiple threads.
+///
+/// Unlike standard iterators, `AccountStoragesConcurrentConsumer`:
+/// - Is **shared** between threads via references (`&self`), not moved.
+/// - Allows safe, parallel consumption where each item is yielded at most once.
+/// - Does **not** implement `Iterator` because it must take `&self` instead of `&mut self`.
+pub struct AccountStoragesConcurrentConsumer<'a> {
+ orderer: AccountStoragesOrderer<'a>,
+ current_position: AtomicUsize,
+}
+
+impl<'a> AccountStoragesConcurrentConsumer<'a> {
+ pub fn new(orderer: AccountStoragesOrderer<'a>) -> Self {
+ Self {
+ orderer,
+ current_position: AtomicUsize::new(0),
+ }
+ }
+
+ /// Takes the next `AccountStorageEntry` moving shared consume position
+ /// until the end of the entries source is reached.
+ pub fn next(&'a self) -> Option> {
+ let position = self.current_position.fetch_add(1, Ordering::Relaxed);
+ if position < self.orderer.entries_len() {
+ // SAFETY: We have ensured `position` is in range.
+ let original_index = self.orderer.original_index(position);
+ let storage = &self.orderer[position];
+ Some(NextItem {
+ position,
+ original_index,
+ storage,
+ })
+ } else {
+ None
+ }
+ }
+}
+
+/// Value returned from calling `AccountStoragesConcurrentConsumer::next()`
+#[derive(Debug)]
+pub struct NextItem<'a> {
+ /// The position through the orderer for this call to `next()`
+ pub position: usize,
+ /// The index into the original storages slice at this position
+ pub original_index: usize,
+ /// The storage itself
+ pub storage: &'a AccountStorageEntry,
}
/// Select the `nth` (`0 <= nth < range.len()`) value from a `range`, choosing values alternately
@@ -377,12 +452,14 @@ fn select_from_range_with_start_end_rates(
pub(crate) mod tests {
use {
super::*,
- crate::accounts_file::AccountsFileProvider,
+ crate::accounts_file::{AccountsFileProvider, StorageAccess},
std::{iter, path::Path},
+ test_case::test_case,
};
- #[test]
- fn test_shrink_in_progress() {
+ #[test_case(StorageAccess::Mmap)]
+ #[test_case(StorageAccess::File)]
+ fn test_shrink_in_progress(storage_access: StorageAccess) {
// test that we check in order map then shrink_in_progress_map
let storage = AccountStorage::default();
let slot = 0;
@@ -401,6 +478,7 @@ pub(crate) mod tests {
id,
store_file_size,
AccountsFileProvider::AppendVec,
+ storage_access,
));
let entry2 = Arc::new(AccountStorageEntry::new(
common_store_path,
@@ -408,6 +486,7 @@ pub(crate) mod tests {
id,
store_file_size2,
AccountsFileProvider::AppendVec,
+ storage_access,
));
storage.map.insert(slot, entry);
@@ -450,7 +529,11 @@ pub(crate) mod tests {
}
impl AccountStorage {
- fn get_test_storage_with_id(&self, id: AccountsFileId) -> Arc {
+ fn get_test_storage_with_id(
+ &self,
+ id: AccountsFileId,
+ storage_access: StorageAccess,
+ ) -> Arc {
let slot = 0;
// add a map store
let common_store_path = Path::new("");
@@ -461,80 +544,87 @@ pub(crate) mod tests {
id,
store_file_size,
AccountsFileProvider::AppendVec,
+ storage_access,
))
}
- fn get_test_storage(&self) -> Arc {
- self.get_test_storage_with_id(0)
+ fn get_test_storage(&self, storage_access: StorageAccess) -> Arc {
+ self.get_test_storage_with_id(0, storage_access)
}
}
- #[test]
+ #[test_case(StorageAccess::Mmap)]
+ #[test_case(StorageAccess::File)]
#[should_panic(expected = "self.no_shrink_in_progress()")]
- fn test_get_slot_storage_entry_fail() {
+ fn test_get_slot_storage_entry_fail(storage_access: StorageAccess) {
let storage = AccountStorage::default();
storage
.shrink_in_progress_map
.write()
.unwrap()
- .insert(0, storage.get_test_storage());
+ .insert(0, storage.get_test_storage(storage_access));
storage.get_slot_storage_entry(0);
}
- #[test]
+ #[test_case(StorageAccess::Mmap)]
+ #[test_case(StorageAccess::File)]
#[should_panic(expected = "self.no_shrink_in_progress()")]
- fn test_all_slots_fail() {
+ fn test_all_slots_fail(storage_access: StorageAccess) {
let storage = AccountStorage::default();
storage
.shrink_in_progress_map
.write()
.unwrap()
- .insert(0, storage.get_test_storage());
+ .insert(0, storage.get_test_storage(storage_access));
storage.all_slots();
}
- #[test]
+ #[test_case(StorageAccess::Mmap)]
+ #[test_case(StorageAccess::File)]
#[should_panic(expected = "self.no_shrink_in_progress()")]
- fn test_initialize_fail() {
+ fn test_initialize_fail(storage_access: StorageAccess) {
let mut storage = AccountStorage::default();
storage
.shrink_in_progress_map
.write()
.unwrap()
- .insert(0, storage.get_test_storage());
+ .insert(0, storage.get_test_storage(storage_access));
storage.initialize(AccountStorageMap::default());
}
- #[test]
+ #[test_case(StorageAccess::Mmap)]
+ #[test_case(StorageAccess::File)]
#[should_panic(
expected = "shrink_can_be_active || self.shrink_in_progress_map.read().unwrap().is_empty()"
)]
- fn test_remove_fail() {
+ fn test_remove_fail(storage_access: StorageAccess) {
let storage = AccountStorage::default();
storage
.shrink_in_progress_map
.write()
.unwrap()
- .insert(0, storage.get_test_storage());
+ .insert(0, storage.get_test_storage(storage_access));
storage.remove(&0, false);
}
- #[test]
+ #[test_case(StorageAccess::Mmap)]
+ #[test_case(StorageAccess::File)]
#[should_panic(expected = "self.no_shrink_in_progress()")]
- fn test_iter_fail() {
+ fn test_iter_fail(storage_access: StorageAccess) {
let storage = AccountStorage::default();
storage
.shrink_in_progress_map
.write()
.unwrap()
- .insert(0, storage.get_test_storage());
+ .insert(0, storage.get_test_storage(storage_access));
storage.iter();
}
- #[test]
+ #[test_case(StorageAccess::Mmap)]
+ #[test_case(StorageAccess::File)]
#[should_panic(expected = "self.no_shrink_in_progress()")]
- fn test_insert_fail() {
+ fn test_insert_fail(storage_access: StorageAccess) {
let storage = AccountStorage::default();
- let sample = storage.get_test_storage();
+ let sample = storage.get_test_storage(storage_access);
storage
.shrink_in_progress_map
.write()
@@ -543,12 +633,13 @@ pub(crate) mod tests {
storage.insert(0, sample);
}
- #[test]
+ #[test_case(StorageAccess::Mmap)]
+ #[test_case(StorageAccess::File)]
#[should_panic(expected = "duplicate call")]
- fn test_shrinking_in_progress_fail3() {
+ fn test_shrinking_in_progress_fail3(storage_access: StorageAccess) {
// already entry in shrink_in_progress_map
let storage = AccountStorage::default();
- let sample = storage.get_test_storage();
+ let sample = storage.get_test_storage(storage_access);
storage.map.insert(0, sample.clone());
storage
.shrink_in_progress_map
@@ -558,28 +649,30 @@ pub(crate) mod tests {
storage.shrinking_in_progress(0, sample);
}
- #[test]
+ #[test_case(StorageAccess::Mmap)]
+ #[test_case(StorageAccess::File)]
#[should_panic(expected = "duplicate call")]
- fn test_shrinking_in_progress_fail4() {
+ fn test_shrinking_in_progress_fail4(storage_access: StorageAccess) {
// already called 'shrink_in_progress' on this slot and it is still active
let storage = AccountStorage::default();
- let sample_to_shrink = storage.get_test_storage();
- let sample = storage.get_test_storage();
+ let sample_to_shrink = storage.get_test_storage(storage_access);
+ let sample = storage.get_test_storage(storage_access);
storage.map.insert(0, sample_to_shrink);
let _shrinking_in_progress = storage.shrinking_in_progress(0, sample.clone());
storage.shrinking_in_progress(0, sample);
}
- #[test]
- fn test_shrinking_in_progress_second_call() {
+ #[test_case(StorageAccess::Mmap)]
+ #[test_case(StorageAccess::File)]
+ fn test_shrinking_in_progress_second_call(storage_access: StorageAccess) {
// already called 'shrink_in_progress' on this slot, but it finished, so we succeed
// verify data structures during and after shrink and then with subsequent shrink call
let storage = AccountStorage::default();
let slot = 0;
let id_to_shrink = 1;
let id_shrunk = 0;
- let sample_to_shrink = storage.get_test_storage_with_id(id_to_shrink);
- let sample = storage.get_test_storage();
+ let sample_to_shrink = storage.get_test_storage_with_id(id_to_shrink, storage_access);
+ let sample = storage.get_test_storage(storage_access);
storage.map.insert(slot, sample_to_shrink);
let shrinking_in_progress = storage.shrinking_in_progress(slot, sample.clone());
assert!(storage.map.contains_key(&slot));
@@ -602,30 +695,33 @@ pub(crate) mod tests {
storage.shrinking_in_progress(slot, sample);
}
- #[test]
+ #[test_case(StorageAccess::Mmap)]
+ #[test_case(StorageAccess::File)]
#[should_panic(expected = "no pre-existing storage for shrinking slot")]
- fn test_shrinking_in_progress_fail1() {
+ fn test_shrinking_in_progress_fail1(storage_access: StorageAccess) {
// nothing in slot currently
let storage = AccountStorage::default();
- let sample = storage.get_test_storage();
+ let sample = storage.get_test_storage(storage_access);
storage.shrinking_in_progress(0, sample);
}
- #[test]
+ #[test_case(StorageAccess::Mmap)]
+ #[test_case(StorageAccess::File)]
#[should_panic(expected = "no pre-existing storage for shrinking slot")]
- fn test_shrinking_in_progress_fail2() {
+ fn test_shrinking_in_progress_fail2(storage_access: StorageAccess) {
// nothing in slot currently, but there is an empty map entry
let storage = AccountStorage::default();
- let sample = storage.get_test_storage();
+ let sample = storage.get_test_storage(storage_access);
storage.shrinking_in_progress(0, sample);
}
- #[test]
- fn test_missing() {
+ #[test_case(StorageAccess::Mmap)]
+ #[test_case(StorageAccess::File)]
+ fn test_missing(storage_access: StorageAccess) {
// already called 'shrink_in_progress' on this slot, but it finished, so we succeed
// verify data structures during and after shrink and then with subsequent shrink call
let storage = AccountStorage::default();
- let sample = storage.get_test_storage();
+ let sample = storage.get_test_storage(storage_access);
let id = sample.id();
let missing_id = 9999;
let slot = sample.slot();
@@ -659,8 +755,9 @@ pub(crate) mod tests {
assert!(storage.get_account_storage_entry(slot, id).is_some());
}
- #[test]
- fn test_get_if() {
+ #[test_case(StorageAccess::Mmap)]
+ #[test_case(StorageAccess::File)]
+ fn test_get_if(storage_access: StorageAccess) {
let storage = AccountStorage::default();
assert!(storage.get_if(|_, _| true).is_empty());
@@ -674,6 +771,7 @@ pub(crate) mod tests {
id,
5000,
AccountsFileProvider::AppendVec,
+ storage_access,
);
storage.map.insert(slot, entry.into());
}
@@ -691,15 +789,16 @@ pub(crate) mod tests {
assert_eq!(storage.get_if(|_, _| true).len(), ids.len());
}
- #[test]
+ #[test_case(StorageAccess::Mmap)]
+ #[test_case(StorageAccess::File)]
#[should_panic(expected = "self.no_shrink_in_progress()")]
- fn test_get_if_fail() {
+ fn test_get_if_fail(storage_access: StorageAccess) {
let storage = AccountStorage::default();
storage
.shrink_in_progress_map
.write()
.unwrap()
- .insert(0, storage.get_test_storage());
+ .insert(0, storage.get_test_storage(storage_access));
storage.get_if(|_, _| true);
}
diff --git a/accounts-db/src/account_storage/stored_account_info.rs b/accounts-db/src/account_storage/stored_account_info.rs
index faabf619391676..b31bf64f2c8130 100644
--- a/accounts-db/src/account_storage/stored_account_info.rs
+++ b/accounts-db/src/account_storage/stored_account_info.rs
@@ -46,6 +46,12 @@ impl<'storage> StoredAccountInfo<'storage> {
}
}
+impl IsZeroLamport for StoredAccountInfo<'_> {
+ fn is_zero_lamport(&self) -> bool {
+ self.lamports == 0
+ }
+}
+
impl ReadableAccount for StoredAccountInfo<'_> {
fn lamports(&self) -> u64 {
self.lamports
diff --git a/accounts-db/src/account_storage_reader.rs b/accounts-db/src/account_storage_reader.rs
index d42c630c46588e..f5aa204f2caa7a 100644
--- a/accounts-db/src/account_storage_reader.rs
+++ b/accounts-db/src/account_storage_reader.rs
@@ -32,7 +32,7 @@ impl<'a> AccountStorageReader<'a> {
let mut sorted_obsolete_accounts = storage.get_obsolete_accounts(snapshot_slot);
- // Tiered storage is not compatible with obsolete acocunts at this time
+ // Tiered storage is not compatible with obsolete accounts at this time
if matches!(storage.accounts, AccountsFile::TieredStorage(_)) {
assert!(
sorted_obsolete_accounts.is_empty(),
@@ -147,21 +147,25 @@ mod tests {
fn create_storage_for_storage_reader(
slot: Slot,
provider: AccountsFileProvider,
+ storage_access: StorageAccess,
) -> (AccountStorageEntry, Vec) {
let id = 0;
let (temp_dirs, paths) = get_temp_accounts_paths(1).unwrap();
let file_size = 1024 * 1024;
(
- AccountStorageEntry::new(&paths[0], slot, id, file_size, provider),
+ AccountStorageEntry::new(&paths[0], slot, id, file_size, provider, storage_access),
temp_dirs,
)
}
- #[test]
+ #[test_case(StorageAccess::Mmap)]
+ #[test_case(StorageAccess::File)]
#[should_panic(expected = "Obsolete accounts should be empty for TieredStorage")]
- fn test_account_storage_reader_tiered_storage_one_obsolete_account_should_panic() {
+ fn test_account_storage_reader_tiered_storage_one_obsolete_account_should_panic(
+ storage_access: StorageAccess,
+ ) {
let (storage, _temp_dirs) =
- create_storage_for_storage_reader(0, AccountsFileProvider::HotStorage);
+ create_storage_for_storage_reader(0, AccountsFileProvider::HotStorage, storage_access);
let account = AccountSharedData::new(1, 10, &Pubkey::new_unique());
let account2 = AccountSharedData::new(1, 10, &Pubkey::new_unique());
@@ -182,10 +186,14 @@ mod tests {
_ = AccountStorageReader::new(&storage, None).unwrap();
}
- #[test_case(AccountsFileProvider::AppendVec)]
- #[test_case(AccountsFileProvider::HotStorage)]
- fn test_account_storage_reader_no_obsolete_accounts(provider: AccountsFileProvider) {
- let (storage, _temp_dirs) = create_storage_for_storage_reader(0, provider);
+ #[test_case(AccountsFileProvider::AppendVec, StorageAccess::Mmap)]
+ #[test_case(AccountsFileProvider::AppendVec, StorageAccess::File)]
+ #[test_case(AccountsFileProvider::HotStorage, StorageAccess::File)]
+ fn test_account_storage_reader_no_obsolete_accounts(
+ provider: AccountsFileProvider,
+ storage_access: StorageAccess,
+ ) {
+ let (storage, _temp_dirs) = create_storage_for_storage_reader(0, provider, storage_access);
let account = AccountSharedData::new(1, 10, &Pubkey::default());
let account2 = AccountSharedData::new(1, 10, &Pubkey::default());
@@ -221,7 +229,7 @@ mod tests {
) {
solana_logger::setup();
let (storage, _temp_dirs) =
- create_storage_for_storage_reader(0, AccountsFileProvider::AppendVec);
+ create_storage_for_storage_reader(0, AccountsFileProvider::AppendVec, storage_access);
let slot = 0;
@@ -318,10 +326,11 @@ mod tests {
}
}
- #[test]
- fn test_account_storage_reader_filter_by_slot() {
+ #[test_case(StorageAccess::Mmap)]
+ #[test_case(StorageAccess::File)]
+ fn test_account_storage_reader_filter_by_slot(storage_access: StorageAccess) {
let (storage, _temp_dirs) =
- create_storage_for_storage_reader(10, AccountsFileProvider::AppendVec);
+ create_storage_for_storage_reader(10, AccountsFileProvider::AppendVec, storage_access);
let total_accounts = 30;
let slot = 0;
diff --git a/accounts-db/src/accounts.rs b/accounts-db/src/accounts.rs
index 0b9022e4028873..32f28d971f3a13 100644
--- a/accounts-db/src/accounts.rs
+++ b/accounts-db/src/accounts.rs
@@ -4,7 +4,7 @@ use {
account_storage::stored_account_info::StoredAccountInfo,
accounts_db::{
AccountsAddRootTiming, AccountsDb, LoadHint, LoadedAccount, ScanAccountStorageData,
- ScanStorageResult,
+ ScanStorageResult, UpdateIndexThreadSelection,
},
accounts_index::{IndexKey, ScanConfig, ScanError, ScanOrder, ScanResult},
ancestors::Ancestors,
@@ -24,7 +24,7 @@ use {
message_address_table_lookup::SVMMessageAddressTableLookup, svm_message::SVMMessage,
},
solana_transaction::sanitized::SanitizedTransaction,
- solana_transaction_context::TransactionAccount,
+ solana_transaction_context::transaction_accounts::TransactionAccount,
solana_transaction_error::TransactionResult as Result,
std::{
cmp::Reverse,
@@ -546,18 +546,36 @@ impl Accounts {
}
}
- /// Store the accounts into the DB
- pub fn store_cached<'a>(
+ /// Store `accounts` into the DB
+ ///
+ /// This version updates the accounts index sequentially,
+ /// using the same thread that calls the fn itself.
+ pub fn store_accounts_seq<'a>(
&self,
accounts: impl StorableAccounts<'a>,
transactions: Option<&'a [&'a SanitizedTransaction]>,
) {
- self.accounts_db
- .store_cached_inline_update_index(accounts, transactions);
+ self.accounts_db.store_accounts_unfrozen(
+ accounts,
+ transactions,
+ UpdateIndexThreadSelection::Inline,
+ );
}
- pub fn store_accounts_cached<'a>(&self, accounts: impl StorableAccounts<'a>) {
- self.accounts_db.store_cached(accounts)
+ /// Store `accounts` into the DB
+ ///
+ /// This version updates the accounts index in parallel,
+ /// using the foreground AccountsDb thread pool.
+ pub fn store_accounts_par<'a>(
+ &self,
+ accounts: impl StorableAccounts<'a>,
+ transactions: Option<&'a [&'a SanitizedTransaction]>,
+ ) {
+ self.accounts_db.store_accounts_unfrozen(
+ accounts,
+ transactions,
+ UpdateIndexThreadSelection::PoolWithThreshold,
+ );
}
/// Add a slot to root. Root slots cannot be purged
@@ -1120,7 +1138,8 @@ mod tests {
impl Accounts {
pub fn store_for_tests(&self, slot: Slot, pubkey: &Pubkey, account: &AccountSharedData) {
- self.accounts_db.store_for_tests(slot, &[(pubkey, account)])
+ self.accounts_db
+ .store_for_tests((slot, [(pubkey, account)].as_slice()))
}
/// useful to adapt tests written prior to introduction of the write cache
@@ -1225,13 +1244,14 @@ mod tests {
let pubkey = Pubkey::new_unique();
let account_data = AccountSharedData::new(1, 0, &Pubkey::default());
let accounts_db = Arc::new(AccountsDb::new_single_for_tests());
- accounts_db.store_for_tests(
+ accounts_db.store_for_tests((
0,
- &[
+ [
(&Pubkey::default(), &account_data),
(&pubkey, &account_data),
- ],
- );
+ ]
+ .as_slice(),
+ ));
let r_tx = sanitized_tx_from_metas(vec![AccountMeta {
pubkey,
@@ -1339,7 +1359,7 @@ mod tests {
/* This test assumes pubkey0 < pubkey1 < pubkey2.
* But the keys created with new_unique() does not guarantee this
* order because of the endianness. new_unique() calls add 1 at each
- * key generaration as the little endian integer. A pubkey stores its
+ * key generation as the little endian integer. A pubkey stores its
* value in a 32-byte array bytes, and its eq-partial trait considers
* the lower-address bytes more significant, which is the big-endian
* order.
diff --git a/accounts-db/src/accounts_cache.rs b/accounts-db/src/accounts_cache.rs
index d8f5cacebef1f0..0b42e2c3e73419 100644
--- a/accounts-db/src/accounts_cache.rs
+++ b/accounts-db/src/accounts_cache.rs
@@ -1,10 +1,9 @@
use {
- ahash::RandomState as AHashRandomState,
dashmap::DashMap,
solana_account::{AccountSharedData, ReadableAccount},
solana_clock::Slot,
solana_nohash_hasher::BuildNoHashHasher,
- solana_pubkey::Pubkey,
+ solana_pubkey::{Pubkey, PubkeyHasherBuilder},
std::{
collections::BTreeSet,
ops::Deref,
@@ -17,7 +16,7 @@ use {
#[derive(Debug)]
pub struct SlotCache {
- cache: DashMap, AHashRandomState>,
+ cache: DashMap, PubkeyHasherBuilder>,
same_account_writes: AtomicU64,
same_account_writes_size: AtomicU64,
unique_account_writes_size: AtomicU64,
@@ -128,7 +127,7 @@ impl SlotCache {
}
impl Deref for SlotCache {
- type Target = DashMap, AHashRandomState>;
+ type Target = DashMap, PubkeyHasherBuilder>;
fn deref(&self) -> &Self::Target {
&self.cache
}
diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs
index 902c093606b204..36fcd723f90a83 100644
--- a/accounts-db/src/accounts_db.rs
+++ b/accounts-db/src/accounts_db.rs
@@ -31,29 +31,27 @@ use {
account_info::{AccountInfo, Offset, StorageLocation},
account_storage::{
stored_account_info::{StoredAccountInfo, StoredAccountInfoWithoutData},
- AccountStorage, AccountStorageStatus, AccountStoragesOrderer, ShrinkInProgress,
+ AccountStorage, AccountStoragesOrderer, ShrinkInProgress,
},
accounts_cache::{AccountsCache, CachedAccount, SlotCache},
accounts_db::stats::{
AccountsStats, CleanAccountsStats, FlushStats, ObsoleteAccountsStats, PurgeStats,
ShrinkAncientStats, ShrinkStats, ShrinkStatsSub, StoreAccountsTiming,
},
- accounts_file::{
- AccountsFile, AccountsFileError, AccountsFileProvider, MatchAccountOwnerError,
- StorageAccess,
- },
+ accounts_file::{AccountsFile, AccountsFileError, AccountsFileProvider, StorageAccess},
accounts_hash::{AccountLtHash, AccountsLtHash, ZERO_LAMPORT_ACCOUNT_LT_HASH},
accounts_index::{
in_mem_accounts_index::StartupStats, AccountSecondaryIndexes, AccountsIndex,
- AccountsIndexConfig, AccountsIndexRootsStats, AccountsIndexScanResult, DiskIndexValue,
- IndexKey, IndexValue, IsCached, RefCount, ScanConfig, ScanFilter, ScanResult, SlotList,
+ AccountsIndexConfig, AccountsIndexRootsStats, AccountsIndexScanResult, IndexKey,
+ IsCached, ReclaimsSlotList, RefCount, ScanConfig, ScanFilter, ScanResult, SlotList,
UpsertReclaim, ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS, ACCOUNTS_INDEX_CONFIG_FOR_TESTING,
},
accounts_index_storage::Startup,
- accounts_update_notifier_interface::AccountsUpdateNotifier,
+ accounts_update_notifier_interface::{AccountForGeyser, AccountsUpdateNotifier},
active_stats::{ActiveStatItem, ActiveStats},
ancestors::Ancestors,
- append_vec::{aligned_stored_size, IndexInfo, IndexInfoInner, STORE_META_OVERHEAD},
+ append_vec::{self, aligned_stored_size, STORE_META_OVERHEAD},
+ buffered_reader::RequiredLenBufFileRead,
contains::Contains,
is_zero_lamport::IsZeroLamport,
partitioned_rewards::{
@@ -62,7 +60,6 @@ use {
read_only_accounts_cache::ReadOnlyAccountsCache,
storable_accounts::{StorableAccounts, StorableAccountsBySlot},
u64_align, utils,
- verify_accounts_hash_in_background::VerifyAccountsHashInBackground,
},
dashmap::{DashMap, DashSet},
log::*,
@@ -74,7 +71,7 @@ use {
solana_clock::{BankId, Epoch, Slot},
solana_epoch_schedule::EpochSchedule,
solana_lattice_hash::lt_hash::LtHash,
- solana_measure::{meas_dur, measure::Measure, measure_us},
+ solana_measure::{measure::Measure, measure_us},
solana_nohash_hasher::{BuildNoHashHasher, IntMap, IntSet},
solana_pubkey::Pubkey,
solana_rayon_threadlimit::get_thread_count,
@@ -91,7 +88,7 @@ use {
atomic::{AtomicBool, AtomicU32, AtomicU64, AtomicUsize, Ordering},
Arc, Condvar, Mutex, RwLock,
},
- thread::sleep,
+ thread::{self, sleep},
time::{Duration, Instant},
},
tempfile::TempDir,
@@ -107,6 +104,13 @@ const UNREF_ACCOUNTS_BATCH_SIZE: usize = 10_000;
const DEFAULT_FILE_SIZE: u64 = 4 * 1024 * 1024;
const DEFAULT_NUM_DIRS: u32 = 4;
+// This value reflects recommended memory lock limit documented in the validator's
+// setup instructions at docs/src/operations/guides/validator-start.md allowing use of
+// several io_uring instances with fixed buffers for large disk IO operations.
+pub const DEFAULT_MEMLOCK_BUDGET_SIZE: usize = 2_000_000_000;
+// Linux distributions often have some small memory lock limit (e.g. 8MB) that we can tap into.
+const MEMLOCK_BUDGET_SIZE_FOR_TESTS: usize = 4_000_000;
+
// When getting accounts for shrinking from the index, this is the # of accounts to lookup per thread.
// This allows us to split up accounts index accesses across multiple threads.
const SHRINK_COLLECT_CHUNK_SIZE: usize = 50;
@@ -153,7 +157,7 @@ pub(crate) trait ShrinkCollectRefs<'a>: Sync + Send {
fn collect(&mut self, other: Self);
fn add(
&mut self,
- ref_count: u64,
+ ref_count: RefCount,
account: &'a AccountFromStorage,
slot_list: &[(Slot, AccountInfo)],
);
@@ -176,7 +180,7 @@ impl<'a> ShrinkCollectRefs<'a> for AliveAccounts<'a> {
}
fn add(
&mut self,
- _ref_count: u64,
+ _ref_count: RefCount,
account: &'a AccountFromStorage,
_slot_list: &[(Slot, AccountInfo)],
) {
@@ -210,7 +214,7 @@ impl<'a> ShrinkCollectRefs<'a> for ShrinkCollectAliveSeparatedByRefs<'a> {
}
fn add(
&mut self,
- ref_count: u64,
+ ref_count: RefCount,
account: &'a AccountFromStorage,
slot_list: &[(Slot, AccountInfo)],
) {
@@ -299,10 +303,10 @@ pub const ACCOUNTS_DB_CONFIG_FOR_TESTING: AccountsDbConfig = AccountsDbConfig {
partitioned_epoch_rewards_config: DEFAULT_PARTITIONED_EPOCH_REWARDS_CONFIG,
storage_access: StorageAccess::File,
scan_filter_for_shrinking: ScanFilter::OnlyAbnormalTest,
- mark_obsolete_accounts: false,
- num_clean_threads: None,
+ mark_obsolete_accounts: MarkObsoleteAccounts::Disabled,
+ num_background_threads: None,
num_foreground_threads: None,
- num_hash_threads: None,
+ memlock_budget_size: MEMLOCK_BUDGET_SIZE_FOR_TESTS,
};
pub const ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS: AccountsDbConfig = AccountsDbConfig {
index: Some(ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS),
@@ -321,10 +325,10 @@ pub const ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS: AccountsDbConfig = AccountsDbConfig
partitioned_epoch_rewards_config: DEFAULT_PARTITIONED_EPOCH_REWARDS_CONFIG,
storage_access: StorageAccess::File,
scan_filter_for_shrinking: ScanFilter::OnlyAbnormal,
- mark_obsolete_accounts: false,
- num_clean_threads: None,
+ mark_obsolete_accounts: MarkObsoleteAccounts::Disabled,
+ num_background_threads: None,
num_foreground_threads: None,
- num_hash_threads: None,
+ memlock_budget_size: MEMLOCK_BUDGET_SIZE_FOR_TESTS,
};
struct LoadAccountsIndexForShrink<'a, T: ShrinkCollectRefs<'a>> {
@@ -390,7 +394,6 @@ pub struct GetUniqueAccountsResult {
pub struct AccountsAddRootTiming {
pub index_us: u64,
pub cache_us: u64,
- pub store_us: u64,
}
/// Slots older the "number of slots in an epoch minus this number"
@@ -444,13 +447,15 @@ pub struct AccountsDbConfig {
pub partitioned_epoch_rewards_config: PartitionedEpochRewardsConfig,
pub storage_access: StorageAccess,
pub scan_filter_for_shrinking: ScanFilter,
- pub mark_obsolete_accounts: bool,
- /// Number of threads for background cleaning operations (`thread_pool_clean')
- pub num_clean_threads: Option,
- /// Number of threads for foreground operations (`thread_pool`)
+ pub mark_obsolete_accounts: MarkObsoleteAccounts,
+ /// Number of threads for background operations (`thread_pool_background')
+ pub num_background_threads: Option,
+ /// Number of threads for foreground operations (`thread_pool_foreground`)
pub num_foreground_threads: Option,
- /// Number of threads for background accounts hashing (`thread_pool_hash`)
- pub num_hash_threads: Option,
+ /// Amount of memory (in bytes) that is allowed to be locked during db operations.
+ /// On linux it's verified on start-up with the kernel limits, such that during runtime
+ /// parts of it can be utilized without panicking.
+ pub memlock_budget_size: usize,
}
#[cfg(not(test))]
@@ -486,12 +491,12 @@ pub enum ScanStorageResult {
Stored(B),
}
-#[derive(Debug, Default)]
+#[derive(Debug)]
pub struct IndexGenerationInfo {
pub accounts_data_len: u64,
- /// The lt hash of the old/duplicate accounts identified during index generation.
- /// Will be used when verifying the accounts lt hash, after rebuilding a Bank.
- pub duplicates_lt_hash: Option>,
+ /// The accounts lt hash calculated during index generation.
+ /// Will be used when verifying accounts, after rebuilding a Bank.
+ pub calculated_accounts_lt_hash: AccountsLtHash,
}
#[derive(Debug, Default)]
@@ -507,6 +512,8 @@ struct SlotIndexGenerationInfo {
num_existed_in_mem: u64,
/// Number of accounts in this slot that already existed, and were on-disk
num_existed_on_disk: u64,
+ /// The accounts lt hash *of only this slot*
+ slot_lt_hash: SlotLtHash,
}
/// The lt hash of old/duplicate accounts
@@ -524,6 +531,16 @@ impl Default for DuplicatesLtHash {
}
}
+/// The lt hash of accounts in a single slot
+#[derive(Debug)]
+struct SlotLtHash(pub LtHash);
+
+impl Default for SlotLtHash {
+ fn default() -> Self {
+ Self(LtHash::identity())
+ }
+}
+
#[derive(Default, Debug)]
struct GenerateIndexTimings {
pub total_time_us: u64,
@@ -539,7 +556,6 @@ struct GenerateIndexTimings {
pub num_duplicate_accounts: u64,
pub populate_duplicate_keys_us: u64,
pub total_slots: u64,
- pub par_duplicates_lt_hash_us: AtomicU64,
pub visit_zero_lamports_us: u64,
pub num_zero_lamport_single_refs: u64,
pub all_accounts_are_zero_lamports_slots: u64,
@@ -605,11 +621,6 @@ impl GenerateIndexTimings {
startup_stats.copy_data_us.swap(0, Ordering::Relaxed),
i64
),
- (
- "par_duplicates_lt_hash_us",
- self.par_duplicates_lt_hash_us.load(Ordering::Relaxed),
- i64
- ),
(
"num_zero_lamport_single_refs",
self.num_zero_lamport_single_refs,
@@ -640,9 +651,6 @@ impl GenerateIndexTimings {
}
}
-impl IndexValue for AccountInfo {}
-impl DiskIndexValue for AccountInfo {}
-
impl IsZeroLamport for AccountSharedData {
fn is_zero_lamport(&self) -> bool {
self.lamports() == 0
@@ -655,56 +663,6 @@ impl IsZeroLamport for Account {
}
}
-struct MultiThreadProgress<'a> {
- last_update: Instant,
- my_last_report_count: u64,
- total_count: &'a AtomicU64,
- report_delay_secs: u64,
- first_caller: bool,
- ultimate_count: u64,
- start_time: Instant,
-}
-
-impl<'a> MultiThreadProgress<'a> {
- fn new(total_count: &'a AtomicU64, report_delay_secs: u64, ultimate_count: u64) -> Self {
- Self {
- last_update: Instant::now(),
- my_last_report_count: 0,
- total_count,
- report_delay_secs,
- first_caller: false,
- ultimate_count,
- start_time: Instant::now(),
- }
- }
- fn report(&mut self, my_current_count: u64) {
- let now = Instant::now();
- if now.duration_since(self.last_update).as_secs() >= self.report_delay_secs {
- let my_total_newly_processed_slots_since_last_report =
- my_current_count - self.my_last_report_count;
-
- self.my_last_report_count = my_current_count;
- let previous_total_processed_slots_across_all_threads = self.total_count.fetch_add(
- my_total_newly_processed_slots_since_last_report,
- Ordering::Relaxed,
- );
- self.first_caller =
- self.first_caller || 0 == previous_total_processed_slots_across_all_threads;
- if self.first_caller {
- let total = previous_total_processed_slots_across_all_threads
- + my_total_newly_processed_slots_since_last_report;
- info!(
- "generating index: {}/{} slots... ({}/s)",
- total,
- self.ultimate_count,
- total / self.start_time.elapsed().as_secs().max(1),
- );
- }
- self.last_update = now;
- }
- }
-}
-
/// An offset into the AccountsDb::storage vector
pub type AtomicAccountsFileId = AtomicU32;
pub type AccountsFileId = u32;
@@ -830,36 +788,6 @@ impl LoadedAccountAccessor<'_> {
}
}
}
-
- fn account_matches_owners(&self, owners: &[Pubkey]) -> Result {
- match self {
- LoadedAccountAccessor::Cached(cached_account) => cached_account
- .as_ref()
- .and_then(|cached_account| {
- if cached_account.account.is_zero_lamport() {
- None
- } else {
- owners
- .iter()
- .position(|entry| cached_account.account.owner() == entry)
- }
- })
- .ok_or(MatchAccountOwnerError::NoMatch),
- LoadedAccountAccessor::Stored(maybe_storage_entry) => {
- // storage entry may not be present if slot was cleaned up in
- // between reading the accounts index and calling this function to
- // get account meta from the storage entry here
- maybe_storage_entry
- .as_ref()
- .map(|(storage_entry, offset)| {
- storage_entry
- .accounts
- .account_matches_owners(*offset, owners)
- })
- .unwrap_or(Err(MatchAccountOwnerError::UnableToLoad))
- }
- }
- }
}
pub enum LoadedAccount<'a> {
@@ -956,12 +884,8 @@ pub struct AccountStorageEntry {
/// storage holding the accounts
pub accounts: AccountsFile,
- /// Keeps track of the number of accounts stored in a specific AppendVec.
- /// This is periodically checked to reuse the stores that do not have
- /// any accounts in it
- /// status corresponding to the storage, lets us know that
- /// the append_vec, once maxed out, then emptied, can be reclaimed
- count_and_status: SeqLock<(usize, AccountStorageStatus)>,
+ /// The number of alive accounts in this storage
+ count: AtomicUsize,
alive_bytes: AtomicUsize,
@@ -995,16 +919,17 @@ impl AccountStorageEntry {
id: AccountsFileId,
file_size: u64,
provider: AccountsFileProvider,
+ storage_access: StorageAccess,
) -> Self {
let tail = AccountsFile::file_name(slot, id);
let path = Path::new(path).join(tail);
- let accounts = provider.new_writable(path, file_size);
+ let accounts = provider.new_writable(path, file_size, storage_access);
Self {
id,
slot,
accounts,
- count_and_status: SeqLock::new((0, AccountStorageStatus::Available)),
+ count: AtomicUsize::new(0),
alive_bytes: AtomicUsize::new(0),
zero_lamport_single_ref_offsets: RwLock::default(),
obsolete_accounts: RwLock::default(),
@@ -1019,14 +944,15 @@ impl AccountStorageEntry {
return None;
}
- let count_and_status = self.count_and_status.lock_write();
self.accounts.reopen_as_readonly().map(|accounts| Self {
id: self.id,
slot: self.slot,
- count_and_status: SeqLock::new(*count_and_status),
+ count: AtomicUsize::new(self.count()),
alive_bytes: AtomicUsize::new(self.alive_bytes()),
accounts,
- zero_lamport_single_ref_offsets: RwLock::default(),
+ zero_lamport_single_ref_offsets: RwLock::new(
+ self.zero_lamport_single_ref_offsets.read().unwrap().clone(),
+ ),
obsolete_accounts: RwLock::new(self.obsolete_accounts.read().unwrap().clone()),
})
}
@@ -1036,40 +962,16 @@ impl AccountStorageEntry {
id,
slot,
accounts,
- count_and_status: SeqLock::new((0, AccountStorageStatus::Available)),
+ count: AtomicUsize::new(0),
alive_bytes: AtomicUsize::new(0),
zero_lamport_single_ref_offsets: RwLock::default(),
obsolete_accounts: RwLock::default(),
}
}
- pub fn set_status(&self, mut status: AccountStorageStatus) {
- let mut count_and_status = self.count_and_status.lock_write();
-
- let count = count_and_status.0;
-
- if status == AccountStorageStatus::Full && count == 0 {
- // this case arises when the append_vec is full (store_ptrs fails),
- // but all accounts have already been removed from the storage
- //
- // the only time it's safe to call reset() on an append_vec is when
- // every account has been removed
- // **and**
- // the append_vec has previously been completely full
- //
- self.accounts.reset();
- status = AccountStorageStatus::Available;
- }
-
- *count_and_status = (count, status);
- }
-
- pub fn status(&self) -> AccountStorageStatus {
- self.count_and_status.read().1
- }
-
+ /// Returns the number of alive accounts in this storage
pub fn count(&self) -> usize {
- self.count_and_status.read().0
+ self.count.load(Ordering::Acquire)
}
pub fn alive_bytes(&self) -> usize {
@@ -1128,6 +1030,20 @@ impl AccountStorageEntry {
zero_lamport_single_ref_offsets.insert(offset)
}
+ /// Insert offsets into the zero lamport single ref account offset set.
+ /// Return the number of new offsets that were inserted.
+ fn batch_insert_zero_lamport_single_ref_account_offsets(&self, offsets: &[Offset]) -> u64 {
+ let mut zero_lamport_single_ref_offsets =
+ self.zero_lamport_single_ref_offsets.write().unwrap();
+ let mut count = 0;
+ for offset in offsets {
+ if zero_lamport_single_ref_offsets.insert(*offset) {
+ count += 1;
+ }
+ }
+ count
+ }
+
/// Return the number of zero_lamport_single_ref accounts in the storage.
fn num_zero_lamport_single_ref_accounts(&self) -> usize {
self.zero_lamport_single_ref_offsets.read().unwrap().len()
@@ -1141,10 +1057,12 @@ impl AccountStorageEntry {
self.alive_bytes().saturating_sub(zero_lamport_dead_bytes)
}
+ /// Returns the number of bytes used in this storage
pub fn written_bytes(&self) -> u64 {
self.accounts.len() as u64
}
+ /// Returns the number of bytes, not accounts, this storage can hold
pub fn capacity(&self) -> u64 {
self.accounts.capacity()
}
@@ -1166,45 +1084,28 @@ impl AccountStorageEntry {
}
fn add_accounts(&self, num_accounts: usize, num_bytes: usize) {
- let mut count_and_status = self.count_and_status.lock_write();
- *count_and_status = (count_and_status.0 + num_accounts, count_and_status.1);
+ self.count.fetch_add(num_accounts, Ordering::Release);
self.alive_bytes.fetch_add(num_bytes, Ordering::Release);
}
- /// returns # of accounts remaining in the storage
+ /// Removes `num_bytes` and `num_accounts` from the storage,
+ /// and returns the remaining number of accounts.
fn remove_accounts(&self, num_bytes: usize, num_accounts: usize) -> usize {
- let mut count_and_status = self.count_and_status.lock_write();
- let (mut count, mut status) = *count_and_status;
-
- if count == num_accounts && status == AccountStorageStatus::Full {
- // this case arises when we remove the last account from the
- // storage, but we've learned from previous write attempts that
- // the storage is full
- //
- // the only time it's safe to call reset() on an append_vec is when
- // every account has been removed
- // **and**
- // the append_vec has previously been completely full
- //
- // otherwise, the storage may be in flight with a store()
- // call
- self.accounts.reset();
- status = AccountStorageStatus::Available;
- }
+ let prev_alive_bytes = self.alive_bytes.fetch_sub(num_bytes, Ordering::Release);
+ let prev_count = self.count.fetch_sub(num_accounts, Ordering::Release);
- // Some code path is removing accounts too many; this may result in an
- // unintended reveal of old state for unrelated accounts.
+ // enforce invariant that we're not removing too many bytes or accounts
assert!(
- count >= num_accounts,
- "double remove of account in slot: {}/store: {}!!",
- self.slot(),
- self.id(),
+ num_bytes <= prev_alive_bytes && num_accounts <= prev_count,
+ "Too many bytes or accounts removed from storage! slot: {}, id: {}, initial alive \
+ bytes: {prev_alive_bytes}, initial num accounts: {prev_count}, num bytes removed: \
+ {num_bytes}, num accounts removed: {num_accounts}",
+ self.slot,
+ self.id,
);
- self.alive_bytes.fetch_sub(num_bytes, Ordering::Release);
- count = count.saturating_sub(num_accounts);
- *count_and_status = (count, status);
- count
+ // SAFETY: subtraction is safe since we just asserted num_accounts <= prev_num_accounts
+ prev_count - num_accounts
}
/// Returns the path to the underlying accounts storage file
@@ -1231,13 +1132,23 @@ pub fn get_temp_accounts_paths(count: u32) -> io::Result<(Vec, Vec,
- ref_count: u64,
+ ref_count: RefCount,
/// Indicates if this account might have a zero lamport index entry.
/// If false, the account *shall* not have zero lamport index entries.
/// If true, the account *might* have zero lamport index entries.
might_contain_zero_lamport_entry: bool,
}
+/// Indicates when to mark accounts obsolete
+/// * Disabled - do not mark accounts obsolete
+/// * Enabled - mark accounts obsolete during write cache flush
+#[derive(Default, Debug, Clone, Copy, PartialEq, Eq)]
+pub enum MarkObsoleteAccounts {
+ #[default]
+ Disabled,
+ Enabled,
+}
+
/// This is the return type of AccountsDb::construct_candidate_clean_keys.
/// It's a collection of pubkeys with associated information to
/// facilitate the decision making about which accounts can be removed
@@ -1305,12 +1216,10 @@ pub struct AccountsDb {
/// Starting file size of appendvecs
file_size: u64,
- /// Foreground thread pool used for par_iter
- pub thread_pool: ThreadPool,
- /// Thread pool for AccountsBackgroundServices
- pub thread_pool_clean: ThreadPool,
- // number of threads to use for accounts hash verify at startup
- pub num_hash_threads: Option,
+ /// Thread pool for foreground tasks, e.g. transaction processing
+ pub thread_pool_foreground: ThreadPool,
+ /// Thread pool for background tasks, e.g. AccountsBackgroundService and flush/clean/shrink
+ pub thread_pool_background: ThreadPool,
pub stats: AccountsStats,
@@ -1360,8 +1269,6 @@ pub struct AccountsDb {
pub(crate) active_stats: ActiveStats,
- pub verify_accounts_hash_in_bg: VerifyAccountsHashInBackground,
-
/// Used to disable logging dead slots during removal.
/// allow disabling noisy log
pub log_dead_slots: AtomicBool,
@@ -1396,29 +1303,13 @@ pub struct AccountsDb {
/// Flag to indicate if the experimental obsolete account tracking feature is enabled.
/// This feature tracks obsolete accounts in the account storage entry allowing
/// for earlier cleaning of obsolete accounts in the storages and index.
- pub mark_obsolete_accounts: bool,
+ pub mark_obsolete_accounts: MarkObsoleteAccounts,
}
pub fn quarter_thread_count() -> usize {
std::cmp::max(2, num_cpus::get() / 4)
}
-pub fn make_min_priority_thread_pool() -> ThreadPool {
- // Use lower thread count to reduce priority.
- let num_threads = quarter_thread_count();
- rayon::ThreadPoolBuilder::new()
- .thread_name(|i| format!("solAccountsLo{i:02}"))
- .num_threads(num_threads)
- .build()
- .unwrap()
-}
-
-/// Returns the default number of threads to use for background accounts hashing
-pub fn default_num_hash_threads() -> NonZeroUsize {
- // 1/8 of the number of cpus and up to 6 threads gives good balance for the system.
- let num_threads = (num_cpus::get() / 8).clamp(2, 6);
- NonZeroUsize::new(num_threads).unwrap()
-}
pub fn default_num_foreground_threads() -> usize {
get_thread_count()
}
@@ -1431,7 +1322,7 @@ impl solana_frozen_abi::abi_example::AbiExample for AccountsDb {
let some_data_len = 5;
let some_slot: Slot = 0;
let account = AccountSharedData::new(1, some_data_len, &key);
- accounts_db.store_for_tests(some_slot, &[(&key, &account)]);
+ accounts_db.store_for_tests((some_slot, [(&key, &account)].as_slice()));
accounts_db.add_root_and_flush_write_cache(0);
accounts_db
}
@@ -1440,52 +1331,25 @@ impl solana_frozen_abi::abi_example::AbiExample for AccountsDb {
impl AccountsDb {
// The default high and low watermark sizes for the accounts read cache.
// If the cache size exceeds MAX_SIZE_HI, it'll evict entries until the size is <= MAX_SIZE_LO.
+ //
+ // These default values were chosen empirically to minimize evictions on mainnet-beta.
+ // As of 2025-08-15 on mainnet-beta, the read cache size's steady state is around 2.5 GB,
+ // and add a bit more to buffer future growth.
#[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))]
- const DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_LO: usize = 400 * 1024 * 1024;
+ const DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_LO: usize = 3_000_000_000;
#[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))]
- const DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_HI: usize = 410 * 1024 * 1024;
+ const DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_HI: usize = 3_100_000_000;
// See AccountsDbConfig::read_cache_evict_sample_size.
#[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))]
const DEFAULT_READ_ONLY_CACHE_EVICT_SAMPLE_SIZE: usize = 8;
- pub fn default_for_tests() -> Self {
- Self::new_single_for_tests()
- }
-
- pub fn new_single_for_tests() -> Self {
- AccountsDb::new_for_tests(Vec::new())
- }
-
- pub fn new_single_for_tests_with_provider(file_provider: AccountsFileProvider) -> Self {
- AccountsDb::new_for_tests_with_provider(Vec::new(), file_provider)
- }
-
- pub fn new_for_tests(paths: Vec) -> Self {
- Self::new_for_tests_with_provider(paths, AccountsFileProvider::default())
- }
-
- fn new_for_tests_with_provider(
- paths: Vec,
- accounts_file_provider: AccountsFileProvider,
- ) -> Self {
- let mut db = AccountsDb::new_with_config(
- paths,
- Some(ACCOUNTS_DB_CONFIG_FOR_TESTING),
- None,
- Arc::default(),
- );
- db.accounts_file_provider = accounts_file_provider;
- db
- }
-
pub fn new_with_config(
paths: Vec,
- accounts_db_config: Option,
+ accounts_db_config: AccountsDbConfig,
accounts_update_notifier: Option,
exit: Arc,
) -> Self {
- let accounts_db_config = accounts_db_config.unwrap_or_default();
let accounts_index_config = accounts_db_config.index.unwrap_or_default();
let accounts_index = AccountsIndex::new(&accounts_index_config, exit);
@@ -1528,20 +1392,20 @@ impl AccountsDb {
.num_foreground_threads
.map(Into::into)
.unwrap_or_else(default_num_foreground_threads);
- let thread_pool = rayon::ThreadPoolBuilder::new()
+ let thread_pool_foreground = rayon::ThreadPoolBuilder::new()
.num_threads(num_foreground_threads)
- .thread_name(|i| format!("solAccounts{i:02}"))
+ .thread_name(|i| format!("solAcctsDbFg{i:02}"))
.stack_size(ACCOUNTS_STACK_SIZE)
.build()
.expect("new rayon threadpool");
- let num_clean_threads = accounts_db_config
- .num_clean_threads
+ let num_background_threads = accounts_db_config
+ .num_background_threads
.map(Into::into)
.unwrap_or_else(quarter_thread_count);
- let thread_pool_clean = rayon::ThreadPoolBuilder::new()
- .thread_name(|i| format!("solAccountsLo{i:02}"))
- .num_threads(num_clean_threads)
+ let thread_pool_background = rayon::ThreadPoolBuilder::new()
+ .thread_name(|i| format!("solAcctsDbBg{i:02}"))
+ .num_threads(num_background_threads)
.build()
.expect("new rayon threadpool");
@@ -1575,10 +1439,8 @@ impl AccountsDb {
exhaustively_verify_refcounts: accounts_db_config.exhaustively_verify_refcounts,
storage_access: accounts_db_config.storage_access,
scan_filter_for_shrinking: accounts_db_config.scan_filter_for_shrinking,
- thread_pool,
- thread_pool_clean,
- num_hash_threads: accounts_db_config.num_hash_threads,
- verify_accounts_hash_in_bg: VerifyAccountsHashInBackground::default(),
+ thread_pool_foreground,
+ thread_pool_background,
active_stats: ActiveStats::default(),
storage: AccountStorage::default(),
accounts_cache: AccountsCache::default(),
@@ -1645,6 +1507,7 @@ impl AccountsDb {
self.next_id(),
size,
self.accounts_file_provider,
+ self.storage_access,
)
}
@@ -1658,10 +1521,10 @@ impl AccountsDb {
ancient_account_cleans: &AtomicU64,
epoch_schedule: &EpochSchedule,
pubkeys_removed_from_accounts_index: &Mutex,
- ) -> SlotList {
+ ) -> ReclaimsSlotList {
let one_epoch_old = self.get_oldest_non_ancient_slot(epoch_schedule);
let mut clean_rooted = Measure::start("clean_old_root-ms");
- let mut reclaims = Vec::new();
+ let mut reclaims = ReclaimsSlotList::new();
let removed_from_index = self.accounts_index.clean_rooted_entries(
pubkey,
&mut reclaims,
@@ -1696,20 +1559,19 @@ impl AccountsDb {
reclaims: &SlotList,
pubkeys_removed_from_accounts_index: &HashSet,
) -> ReclaimResult {
- let mut measure = Measure::start("clean_old_root_reclaims");
-
- let reclaim_result = self.handle_reclaims(
- (!reclaims.is_empty()).then(|| reclaims.iter()),
+ if reclaims.is_empty() {
+ return ReclaimResult::default();
+ }
+ let (reclaim_result, reclaim_us) = measure_us!(self.handle_reclaims(
+ reclaims.iter(),
None,
pubkeys_removed_from_accounts_index,
HandleReclaims::ProcessDeadSlots(&self.clean_accounts_stats.purge_stats),
MarkAccountsObsolete::No,
- );
- measure.stop();
- debug!("{measure}");
+ ));
self.clean_accounts_stats
.clean_old_root_reclaim_us
- .fetch_add(measure.as_us(), Ordering::Relaxed);
+ .fetch_add(reclaim_us, Ordering::Relaxed);
reclaim_result
}
@@ -1818,26 +1680,30 @@ impl AccountsDb {
}
#[must_use]
- pub fn purge_keys_exact<'a, C>(
- &'a self,
- pubkey_to_slot_set: impl Iterator
- ,
- ) -> (Vec<(Slot, AccountInfo)>, PubkeysRemovedFromAccountsIndex)
+ pub fn purge_keys_exact(
+ &self,
+ pubkey_to_slot_set: impl IntoIterator
- ,
+ ) -> (
+ ReclaimsSlotList,
+ PubkeysRemovedFromAccountsIndex,
+ )
where
- C: Contains<'a, Slot> + 'a,
+ C: for<'a> Contains<'a, Slot>,
{
- let mut reclaims = Vec::new();
+ let mut reclaims = ReclaimsSlotList::new();
let mut dead_keys = Vec::new();
let mut purge_exact_count = 0;
- let (_, purge_exact_us) = measure_us!(for (pubkey, slots_set) in pubkey_to_slot_set {
- purge_exact_count += 1;
- let is_empty = self
- .accounts_index
- .purge_exact(pubkey, slots_set, &mut reclaims);
- if is_empty {
- dead_keys.push(pubkey);
- }
- });
+ let (_, purge_exact_us) =
+ measure_us!(for (pubkey, slots_set) in pubkey_to_slot_set.into_iter() {
+ purge_exact_count += 1;
+ let is_empty = self
+ .accounts_index
+ .purge_exact(&pubkey, slots_set, &mut reclaims);
+ if is_empty {
+ dead_keys.push(pubkey);
+ }
+ });
let (pubkeys_removed_from_accounts_index, handle_dead_keys_us) = measure_us!(self
.accounts_index
@@ -2050,7 +1916,7 @@ impl AccountsDb {
// Free to consume all the cores during startup
dirty_store_routine();
} else {
- self.thread_pool_clean.install(|| {
+ self.thread_pool_background.install(|| {
dirty_store_routine();
});
}
@@ -2098,11 +1964,6 @@ impl AccountsDb {
(candidates, min_dirty_slot)
}
- /// Call clean_accounts() with the common parameters that tests/benches use.
- pub fn clean_accounts_for_tests(&self) {
- self.clean_accounts(None, false, &EpochSchedule::default())
- }
-
/// called with cli argument to verify refcounts are correct on all accounts
/// this is very slow
/// this function will call Rayon par_iter, so you will want to have thread pool installed if
@@ -2112,16 +1973,16 @@ impl AccountsDb {
max_slot_inclusive.unwrap_or_else(|| self.accounts_index.max_root_inclusive());
info!("exhaustively verifying refcounts as of slot: {max_slot_inclusive}");
let pubkey_refcount = DashMap::>::default();
- let slots = self.storage.all_slots();
+ let mut storages = self.storage.all_storages();
+ storages.retain(|s| s.slot() <= max_slot_inclusive);
// populate
- slots.into_par_iter().for_each(|slot| {
- if slot > max_slot_inclusive {
- return;
- }
- if let Some(storage) = self.storage.get_slot_storage_entry(slot) {
+ storages.par_iter().for_each_init(
+ || Box::new(append_vec::new_scan_accounts_reader()),
+ |reader, storage| {
+ let slot = storage.slot();
storage
.accounts
- .scan_accounts(|_offset, account| {
+ .scan_accounts(reader.as_mut(), |_offset, account| {
let pk = account.pubkey();
match pubkey_refcount.entry(*pk) {
dashmap::mapref::entry::Entry::Occupied(mut occupied_entry) => {
@@ -2134,9 +1995,9 @@ impl AccountsDb {
}
}
})
- .expect("must scan accounts storage");
- }
- });
+ .expect("must scan accounts storage")
+ },
+ );
let total = pubkey_refcount.len();
let failed = AtomicBool::default();
let threads = quarter_thread_count();
@@ -2218,8 +2079,8 @@ impl AccountsDb {
if is_startup {
self.exhaustively_verify_refcounts(max_clean_root_inclusive);
} else {
- // otherwise, use the cleaning thread pool
- self.thread_pool_clean
+ // otherwise, use the background thread pool
+ self.thread_pool_background
.install(|| self.exhaustively_verify_refcounts(max_clean_root_inclusive));
}
}
@@ -2253,7 +2114,7 @@ impl AccountsDb {
let not_found_on_fork_accum = AtomicU64::new(0);
let missing_accum = AtomicU64::new(0);
let useful_accum = AtomicU64::new(0);
- let reclaims: SlotList = Vec::with_capacity(num_candidates as usize);
+ let reclaims: SlotList = SlotList::with_capacity(num_candidates as usize);
let reclaims = Mutex::new(reclaims);
let pubkeys_removed_from_accounts_index: PubkeysRemovedFromAccountsIndex = HashSet::new();
let pubkeys_removed_from_accounts_index = Mutex::new(pubkeys_removed_from_accounts_index);
@@ -2371,7 +2232,7 @@ impl AccountsDb {
if is_startup {
do_clean_scan();
} else {
- self.thread_pool_clean.install(do_clean_scan);
+ self.thread_pool_background.install(do_clean_scan);
}
accounts_scan.stop();
drop(active_guard);
@@ -2486,13 +2347,13 @@ impl AccountsDb {
let mut reclaims_time = Measure::start("reclaims");
// Recalculate reclaims with new purge set
let mut pubkey_to_slot_set = Vec::new();
- for candidates_bin in candidates.iter() {
+ for candidates_bin in candidates {
let mut bin_set = candidates_bin
- .iter()
+ .into_iter()
.filter_map(|(pubkey, cleaning_info)| {
- let slot_list = &cleaning_info.slot_list;
+ let slot_list = cleaning_info.slot_list;
(!slot_list.is_empty()).then_some((
- *pubkey,
+ pubkey,
slot_list
.iter()
.map(|(slot, _)| *slot)
@@ -2504,16 +2365,18 @@ impl AccountsDb {
}
let (reclaims, pubkeys_removed_from_accounts_index2) =
- self.purge_keys_exact(pubkey_to_slot_set.iter());
+ self.purge_keys_exact(pubkey_to_slot_set);
pubkeys_removed_from_accounts_index.extend(pubkeys_removed_from_accounts_index2);
- self.handle_reclaims(
- (!reclaims.is_empty()).then(|| reclaims.iter()),
- None,
- &pubkeys_removed_from_accounts_index,
- HandleReclaims::ProcessDeadSlots(&self.clean_accounts_stats.purge_stats),
- MarkAccountsObsolete::No,
- );
+ if !reclaims.is_empty() {
+ self.handle_reclaims(
+ reclaims.iter(),
+ None,
+ &pubkeys_removed_from_accounts_index,
+ HandleReclaims::ProcessDeadSlots(&self.clean_accounts_stats.purge_stats),
+ MarkAccountsObsolete::No,
+ );
+ }
reclaims_time.stop();
drop(active_guard);
@@ -2694,7 +2557,7 @@ impl AccountsDb {
/// It must be unrefed and removed to avoid double counting or missed counting in shrink
fn handle_reclaims<'a, I>(
&'a self,
- reclaims: Option,
+ reclaims: I,
expected_single_dead_slot: Option,
pubkeys_removed_from_accounts_index: &PubkeysRemovedFromAccountsIndex,
handle_reclaims: HandleReclaims<'a>,
@@ -2704,32 +2567,27 @@ impl AccountsDb {
I: Iterator
- ,
{
let mut reclaim_result = ReclaimResult::default();
- if let Some(reclaims) = reclaims {
- let (dead_slots, reclaimed_offsets) = self.remove_dead_accounts(
- reclaims,
- expected_single_dead_slot,
- mark_accounts_obsolete,
- );
- reclaim_result.1 = reclaimed_offsets;
-
- if let HandleReclaims::ProcessDeadSlots(purge_stats) = handle_reclaims {
- if let Some(expected_single_dead_slot) = expected_single_dead_slot {
- assert!(dead_slots.len() <= 1);
- if dead_slots.len() == 1 {
- assert!(dead_slots.contains(&expected_single_dead_slot));
- }
- }
-
- self.process_dead_slots(
- &dead_slots,
- Some(&mut reclaim_result.0),
- purge_stats,
- pubkeys_removed_from_accounts_index,
- );
- } else {
- assert!(dead_slots.is_empty());
+ let (dead_slots, reclaimed_offsets) =
+ self.remove_dead_accounts(reclaims, expected_single_dead_slot, mark_accounts_obsolete);
+ reclaim_result.1 = reclaimed_offsets;
+ let HandleReclaims::ProcessDeadSlots(purge_stats) = handle_reclaims;
+ if let Some(expected_single_dead_slot) = expected_single_dead_slot {
+ assert!(dead_slots.len() <= 1);
+ if dead_slots.len() == 1 {
+ assert!(dead_slots.contains(&expected_single_dead_slot));
}
}
+ // if we are marking accounts obsolete, then any dead slots have already been cleaned
+ let clean_stored_dead_slots =
+ !matches!(mark_accounts_obsolete, MarkAccountsObsolete::Yes(_));
+
+ self.process_dead_slots(
+ &dead_slots,
+ Some(&mut reclaim_result.0),
+ purge_stats,
+ pubkeys_removed_from_accounts_index,
+ clean_stored_dead_slots,
+ );
reclaim_result
}
@@ -2820,22 +2678,34 @@ impl AccountsDb {
// supported pipelines in AccountsDb
/// pubkeys_removed_from_accounts_index - These keys have already been removed from the accounts index
/// and should not be unref'd. If they exist in the accounts index, they are NEW.
+ /// clean_stored_dead_slots - clean_stored_dead_slots iterates through all the pubkeys in the dead
+ /// slots and unrefs them in the acocunts index if they are not present in
+ /// pubkeys_removed_from_accounts_index. Skipping clean is the equivilent to
+ /// pubkeys_removed_from_accounts_index containing all the pubkeys in the dead slots
fn process_dead_slots(
&self,
dead_slots: &IntSet,
purged_account_slots: Option<&mut AccountSlots>,
purge_stats: &PurgeStats,
pubkeys_removed_from_accounts_index: &PubkeysRemovedFromAccountsIndex,
+ clean_stored_dead_slots: bool,
) {
if dead_slots.is_empty() {
return;
}
let mut clean_dead_slots = Measure::start("reclaims::clean_dead_slots");
- self.clean_stored_dead_slots(
- dead_slots,
- purged_account_slots,
- pubkeys_removed_from_accounts_index,
- );
+
+ if clean_stored_dead_slots {
+ self.clean_stored_dead_slots(
+ dead_slots,
+ purged_account_slots,
+ pubkeys_removed_from_accounts_index,
+ );
+ }
+
+ // Remove dead slots from the accounts index root tracker
+ self.remove_dead_slots_metadata(dead_slots.iter());
+
clean_dead_slots.stop();
let mut purge_removed_slots = Measure::start("reclaims::purge_removed_slots");
@@ -3059,11 +2929,6 @@ impl AccountsDb {
let mut index_read_elapsed = Measure::start("index_read_elapsed");
- let len = stored_accounts.len();
- let alive_accounts_collect = Mutex::new(T::with_capacity(len, slot));
- let pubkeys_to_unref_collect = Mutex::new(Vec::with_capacity(len));
- let zero_lamport_single_ref_pubkeys_collect = Mutex::new(Vec::with_capacity(len));
-
// Get a set of all obsolete offsets
// Slot is not needed, as all obsolete accounts can be considered
// dead for shrink. Zero lamport accounts are not marked obsolete
@@ -3074,18 +2939,31 @@ impl AccountsDb {
.collect();
// Filter all the accounts that are marked obsolete
- let initial_len = stored_accounts.len();
+ let total_starting_accounts = stored_accounts.len();
stored_accounts.retain(|account| !obsolete_offsets.contains(&account.index_info.offset()));
- let obsolete_accounts_filtered = initial_len - stored_accounts.len();
+
+ let len = stored_accounts.len();
+ let shrink_collect = Mutex::new(ShrinkCollect {
+ slot,
+ capacity: *capacity,
+ pubkeys_to_unref: Vec::with_capacity(len),
+ zero_lamport_single_ref_pubkeys: Vec::new(),
+ alive_accounts: T::with_capacity(len, slot),
+ total_starting_accounts,
+ all_are_zero_lamports: true,
+ alive_total_bytes: 0, // will be updated after `alive_accounts` is populated
+ });
stats
.accounts_loaded
.fetch_add(len as u64, Ordering::Relaxed);
+ stats
+ .obsolete_accounts_filtered
+ .fetch_add((total_starting_accounts - len) as u64, Ordering::Relaxed);
stats
.num_duplicated_accounts
.fetch_add(*num_duplicated_accounts as u64, Ordering::Relaxed);
- let all_are_zero_lamports_collect = Mutex::new(true);
- self.thread_pool_clean.install(|| {
+ self.thread_pool_background.install(|| {
stored_accounts
.par_chunks(SHRINK_COLLECT_CHUNK_SIZE)
.for_each(|stored_accounts| {
@@ -3097,45 +2975,34 @@ impl AccountsDb {
} = self.load_accounts_index_for_shrink(stored_accounts, stats, slot);
// collect
- alive_accounts_collect
- .lock()
- .unwrap()
- .collect(alive_accounts);
- pubkeys_to_unref_collect
- .lock()
- .unwrap()
+ let mut shrink_collect = shrink_collect.lock().unwrap();
+ shrink_collect.alive_accounts.collect(alive_accounts);
+ shrink_collect
+ .pubkeys_to_unref
.append(&mut pubkeys_to_unref);
- zero_lamport_single_ref_pubkeys_collect
- .lock()
- .unwrap()
+ shrink_collect
+ .zero_lamport_single_ref_pubkeys
.append(&mut zero_lamport_single_ref_pubkeys);
if !all_are_zero_lamports {
- *all_are_zero_lamports_collect.lock().unwrap() = false;
+ shrink_collect.all_are_zero_lamports = false;
}
});
});
- let alive_accounts = alive_accounts_collect.into_inner().unwrap();
- let pubkeys_to_unref = pubkeys_to_unref_collect.into_inner().unwrap();
- let zero_lamport_single_ref_pubkeys = zero_lamport_single_ref_pubkeys_collect
- .into_inner()
- .unwrap();
-
index_read_elapsed.stop();
- stats
- .obsolete_accounts_filtered
- .fetch_add(obsolete_accounts_filtered as u64, Ordering::Relaxed);
+ let mut shrink_collect = shrink_collect.into_inner().unwrap();
+ let alive_total_bytes = shrink_collect.alive_accounts.alive_bytes();
+ shrink_collect.alive_total_bytes = alive_total_bytes;
stats
.index_read_elapsed
.fetch_add(index_read_elapsed.as_us(), Ordering::Relaxed);
- let alive_total_bytes = alive_accounts.alive_bytes();
-
- stats
- .accounts_removed
- .fetch_add(len - alive_accounts.len(), Ordering::Relaxed);
+ stats.accounts_removed.fetch_add(
+ total_starting_accounts - shrink_collect.alive_accounts.len(),
+ Ordering::Relaxed,
+ );
stats.bytes_removed.fetch_add(
capacity.saturating_sub(alive_total_bytes as u64),
Ordering::Relaxed,
@@ -3144,16 +3011,7 @@ impl AccountsDb {
.bytes_written
.fetch_add(alive_total_bytes as u64, Ordering::Relaxed);
- ShrinkCollect {
- slot,
- capacity: *capacity,
- pubkeys_to_unref,
- zero_lamport_single_ref_pubkeys,
- alive_accounts,
- alive_total_bytes,
- total_starting_accounts: len,
- all_are_zero_lamports: all_are_zero_lamports_collect.into_inner().unwrap(),
- }
+ shrink_collect
}
/// These accounts were found during shrink of `slot` to be slot_list=[slot] and ref_count == 1 and lamports = 0.
@@ -3194,7 +3052,7 @@ impl AccountsDb {
);
zero_lamport_single_ref_pubkeys.iter().for_each(|k| {
- _ = self.purge_keys_exact([&(**k, slot)].into_iter());
+ _ = self.purge_keys_exact([(**k, slot)]);
});
}
@@ -3799,7 +3657,7 @@ impl AccountsDb {
let num_selected = shrink_slots.len();
let (_, shrink_all_us) = measure_us!({
- self.thread_pool_clean.install(|| {
+ self.thread_pool_background.install(|| {
shrink_slots
.into_par_iter()
.for_each(|(slot, slot_shrink_candidate)| {
@@ -3922,29 +3780,6 @@ impl AccountsDb {
Ok(())
}
- #[cfg(feature = "dev-context-only-utils")]
- pub fn unchecked_scan_accounts(
- &self,
- metric_name: &'static str,
- ancestors: &Ancestors,
- mut scan_func: F,
- config: &ScanConfig,
- ) where
- F: FnMut(&Pubkey, LoadedAccount, Slot),
- {
- self.accounts_index.unchecked_scan_accounts(
- metric_name,
- ancestors,
- |pubkey, (account_info, slot)| {
- self.get_account_accessor(slot, pubkey, &account_info.storage_location())
- .get_loaded_account(|loaded_account| {
- scan_func(pubkey, loaded_account, slot);
- });
- },
- config,
- );
- }
-
pub fn index_scan_accounts(
&self,
ancestors: &Ancestors,
@@ -4010,7 +3845,8 @@ impl AccountsDb {
})
}
ScanAccountStorageData::DataRefForStorage => {
- storage.scan_accounts(|_offset, account| {
+ let mut reader = append_vec::new_scan_accounts_reader();
+ storage.scan_accounts(&mut reader, |_offset, account| {
let account_without_data = StoredAccountInfoWithoutData::new_from(&account);
storage_scan_func(retval, &account_without_data, Some(account.data));
})
@@ -4035,7 +3871,7 @@ impl AccountsDb {
// If we see the slot in the cache, then all the account information
// is in this cached slot
if slot_cache.len() > SCAN_SLOT_PAR_ITER_THRESHOLD {
- ScanStorageResult::Cached(self.thread_pool.install(|| {
+ ScanStorageResult::Cached(self.thread_pool_foreground.install(|| {
slot_cache
.par_iter()
.filter_map(|cached_account| {
@@ -4091,47 +3927,6 @@ impl AccountsDb {
self.do_load(ancestors, pubkey, None, load_hint, LoadZeroLamports::None)
}
- /// Return Ok(index_of_matching_owner) if the account owner at `offset` is one of the pubkeys in `owners`.
- /// Return Err(MatchAccountOwnerError::NoMatch) if the account has 0 lamports or the owner is not one of
- /// the pubkeys in `owners`.
- /// Return Err(MatchAccountOwnerError::UnableToLoad) if the account could not be accessed.
- pub fn account_matches_owners(
- &self,
- ancestors: &Ancestors,
- account: &Pubkey,
- owners: &[Pubkey],
- ) -> Result {
- let (slot, storage_location, _maybe_account_accesor) = self
- .read_index_for_accessor_or_load_slow(ancestors, account, None, false)
- .ok_or(MatchAccountOwnerError::UnableToLoad)?;
-
- if !storage_location.is_cached() {
- let result = self.read_only_accounts_cache.load(*account, slot);
- if let Some(account) = result {
- return if account.is_zero_lamport() {
- Err(MatchAccountOwnerError::NoMatch)
- } else {
- owners
- .iter()
- .position(|entry| account.owner() == entry)
- .ok_or(MatchAccountOwnerError::NoMatch)
- };
- }
- }
-
- let (account_accessor, _slot) = self
- .retry_to_get_account_accessor(
- slot,
- storage_location,
- ancestors,
- account,
- None,
- LoadHint::Unspecified,
- )
- .ok_or(MatchAccountOwnerError::UnableToLoad)?;
- account_accessor.account_matches_owners(owners)
- }
-
/// load the account with `pubkey` into the read only accounts cache.
/// The goal is to make subsequent loads (which caller expects to occur) to find the account quickly.
pub fn load_account_into_read_cache(&self, ancestors: &Ancestors, pubkey: &Pubkey) {
@@ -4476,18 +4271,15 @@ impl AccountsDb {
/// Load account with `pubkey` and maybe put into read cache.
///
- /// If the account is not already cached, invoke `should_put_in_read_cache_fn`.
- /// The caller can inspect the account and indicate if it should be put into the read cache or not.
- ///
/// Return the account and the slot when the account was last stored.
/// Return None for ZeroLamport accounts.
pub fn load_account_with(
&self,
ancestors: &Ancestors,
pubkey: &Pubkey,
- should_put_in_read_cache_fn: impl Fn(&AccountSharedData) -> bool,
+ should_put_in_read_cache: bool,
) -> Option<(AccountSharedData, Slot)> {
- let (slot, storage_location, _maybe_account_accesor) =
+ let (slot, storage_location, _maybe_account_accessor) =
self.read_index_for_accessor_or_load_slow(ancestors, pubkey, None, false)?;
// Notice the subtle `?` at previous line, we bail out pretty early if missing.
@@ -4519,7 +4311,7 @@ impl AccountsDb {
return None;
}
- if !in_write_cache && should_put_in_read_cache_fn(&account) {
+ if !in_write_cache && should_put_in_read_cache {
/*
We show this store into the read-only cache for account 'A' and future loads of 'A' from the read-only cache are
safe/reflect 'A''s latest state on this fork.
@@ -4554,7 +4346,7 @@ impl AccountsDb {
let starting_max_root = self.accounts_index.max_root_inclusive();
- let (slot, storage_location, _maybe_account_accesor) =
+ let (slot, storage_location, _maybe_account_accessor) =
self.read_index_for_accessor_or_load_slow(ancestors, pubkey, max_root, false)?;
// Notice the subtle `?` at previous line, we bail out pretty early if missing.
@@ -4621,8 +4413,8 @@ impl AccountsDb {
if starting_max_root != ending_max_root {
warn!(
"do_load_with_populate_read_cache() scanning pubkey {pubkey} called with \
- fixed max root, but max root changed from {starting_max_root} to \
- {ending_max_root} during function call"
+ fixed max root, but max root changed from {starting_max_root} to \
+ {ending_max_root} during function call"
);
}
}
@@ -4650,16 +4442,6 @@ impl AccountsDb {
}
}
- fn has_space_available(&self, slot: Slot, size: u64) -> bool {
- let store = self.storage.get_slot_storage_entry(slot).unwrap();
- if store.status() == AccountStorageStatus::Available
- && store.accounts.remaining_bytes() >= size
- {
- return true;
- }
- false
- }
-
fn create_store(
&self,
slot: Slot,
@@ -4860,17 +4642,14 @@ impl AccountsDb {
}
fn purge_slot_cache(&self, purged_slot: Slot, slot_cache: &SlotCache) {
- let pubkey_to_slot_set: Vec<(Pubkey, Slot)> = slot_cache
- .iter()
- .map(|account| (*account.key(), purged_slot))
- .collect();
- self.purge_slot_cache_pubkeys(purged_slot, pubkey_to_slot_set, true);
+ let pubkeys = slot_cache.iter().map(|account| *account.key());
+ self.purge_slot_cache_pubkeys(purged_slot, pubkeys, true);
}
fn purge_slot_cache_pubkeys(
&self,
purged_slot: Slot,
- pubkey_to_slot_set: Vec<(Pubkey, Slot)>,
+ pubkeys: impl IntoIterator
- ,
is_dead: bool,
) {
// Slot purged from cache should not exist in the backing store
@@ -4878,8 +4657,11 @@ impl AccountsDb {
.storage
.get_slot_storage_entry_shrinking_in_progress_ok(purged_slot)
.is_none());
- let num_purged_keys = pubkey_to_slot_set.len();
- let (reclaims, _) = self.purge_keys_exact(pubkey_to_slot_set.iter());
+ let mut num_purged_keys = 0;
+ let (reclaims, _) = self.purge_keys_exact(pubkeys.into_iter().map(|key| {
+ num_purged_keys += 1;
+ (key, purged_slot)
+ }));
assert_eq!(reclaims.len(), num_purged_keys);
if is_dead {
self.remove_dead_slots_metadata(std::iter::once(&purged_slot));
@@ -4913,8 +4695,7 @@ impl AccountsDb {
let mut purge_accounts_index_elapsed = Measure::start("purge_accounts_index_elapsed");
// Purge this slot from the accounts index
- let (reclaims, pubkeys_removed_from_accounts_index) =
- self.purge_keys_exact(stored_keys.iter());
+ let (reclaims, pubkeys_removed_from_accounts_index) = self.purge_keys_exact(stored_keys);
purge_accounts_index_elapsed.stop();
purge_stats
.purge_accounts_index_elapsed
@@ -4926,13 +4707,15 @@ impl AccountsDb {
// Slot should be dead after removing all its account entries
// There is no reason to mark accounts obsolete as the slot storage is being purged
let expected_dead_slot = Some(remove_slot);
- self.handle_reclaims(
- (!reclaims.is_empty()).then(|| reclaims.iter()),
- expected_dead_slot,
- &pubkeys_removed_from_accounts_index,
- HandleReclaims::ProcessDeadSlots(purge_stats),
- MarkAccountsObsolete::No,
- );
+ if !reclaims.is_empty() {
+ self.handle_reclaims(
+ reclaims.iter(),
+ expected_dead_slot,
+ &pubkeys_removed_from_accounts_index,
+ HandleReclaims::ProcessDeadSlots(purge_stats),
+ MarkAccountsObsolete::No,
+ );
+ }
handle_reclaims_elapsed.stop();
purge_stats
.handle_reclaims_elapsed
@@ -5101,78 +4884,13 @@ impl AccountsDb {
hasher
}
- fn write_accounts_to_storage<'a>(
- &self,
- slot: Slot,
- storage: &AccountStorageEntry,
- accounts_and_meta_to_store: &impl StorableAccounts<'a>,
- ) -> Vec {
- let mut infos: Vec = Vec::with_capacity(accounts_and_meta_to_store.len());
- let mut total_append_accounts_us = 0;
- while infos.len() < accounts_and_meta_to_store.len() {
- let mut append_accounts = Measure::start("append_accounts");
- let stored_accounts_info = storage
- .accounts
- .write_accounts(accounts_and_meta_to_store, infos.len());
- append_accounts.stop();
- total_append_accounts_us += append_accounts.as_us();
- let Some(stored_accounts_info) = stored_accounts_info else {
- storage.set_status(AccountStorageStatus::Full);
-
- // See if an account overflows the append vecs in the slot.
- let data_len = accounts_and_meta_to_store.data_len(infos.len());
- let data_len = (data_len + STORE_META_OVERHEAD) as u64;
- if !self.has_space_available(slot, data_len) {
- info!(
- "write_accounts_to_storage, no space: {}, {}, {}, {}, {}",
- storage.accounts.capacity(),
- storage.accounts.remaining_bytes(),
- data_len,
- infos.len(),
- accounts_and_meta_to_store.len()
- );
- let special_store_size = std::cmp::max(data_len * 2, self.file_size);
- self.create_and_insert_store(slot, special_store_size, "large create");
- }
- continue;
- };
-
- let store_id = storage.id();
- for (i, offset) in stored_accounts_info.offsets.iter().enumerate() {
- infos.push(AccountInfo::new(
- StorageLocation::AppendVec(store_id, *offset),
- accounts_and_meta_to_store.is_zero_lamport(i),
- ));
- }
- storage.add_accounts(
- stored_accounts_info.offsets.len(),
- stored_accounts_info.size,
- );
-
- // restore the state to available
- storage.set_status(AccountStorageStatus::Available);
- }
-
- self.stats
- .store_append_accounts
- .fetch_add(total_append_accounts_us, Ordering::Relaxed);
-
- infos
- }
-
- pub fn mark_slot_frozen(&self, slot: Slot) {
- if let Some(slot_cache) = self.accounts_cache.slot_cache(slot) {
- slot_cache.mark_slot_frozen();
- slot_cache.report_slot_store_metrics();
- }
- self.accounts_cache.report_size();
- }
-
- // These functions/fields are only usable from a dev context (i.e. tests and benches)
- #[cfg(feature = "dev-context-only-utils")]
- pub fn flush_accounts_cache_slot_for_tests(&self, slot: Slot) {
- self.flush_slot_cache(slot);
- }
+ pub fn mark_slot_frozen(&self, slot: Slot) {
+ if let Some(slot_cache) = self.accounts_cache.slot_cache(slot) {
+ slot_cache.mark_slot_frozen();
+ slot_cache.report_slot_store_metrics();
+ }
+ self.accounts_cache.report_size();
+ }
/// true if write cache is too big
fn should_aggressively_flush_cache(&self) -> bool {
@@ -5380,7 +5098,7 @@ impl AccountsDb {
) -> FlushStats {
let mut flush_stats = FlushStats::default();
let iter_items: Vec<_> = slot_cache.iter().collect();
- let mut pubkey_to_slot_set: Vec<(Pubkey, Slot)> = vec![];
+ let mut pubkeys: Vec = vec![];
if should_flush_f.is_some() {
if let Some(max_clean_root) = max_clean_root {
if slot > max_clean_root {
@@ -5409,7 +5127,7 @@ impl AccountsDb {
} else {
// If we don't flush, we have to remove the entry from the
// index, since it's equivalent to purging
- pubkey_to_slot_set.push((*key, slot));
+ pubkeys.push(*key);
flush_stats.num_bytes_purged +=
aligned_stored_size(account.data().len()) as u64;
flush_stats.num_accounts_purged += 1;
@@ -5421,7 +5139,7 @@ impl AccountsDb {
let is_dead_slot = accounts.is_empty();
// Remove the account index entries from earlier roots that are outdated by later roots.
// Safe because queries to the index will be reading updates from later roots.
- self.purge_slot_cache_pubkeys(slot, pubkey_to_slot_set, is_dead_slot);
+ self.purge_slot_cache_pubkeys(slot, pubkeys, is_dead_slot);
if !is_dead_slot {
// This ensures that all updates are written to an AppendVec, before any
@@ -5432,10 +5150,25 @@ impl AccountsDb {
flush_stats.num_bytes_flushed.0,
"flush_slot_cache",
);
+
+ // Use ReclaimOldSlots to reclaim old slots if marking obsolete accounts and cleaning
+ // Cleaning is enabled if `should_flush_f` is Some.
+ // should_flush_f is set to None when
+ // 1) There's an ongoing scan to avoid reclaiming accounts being scanned.
+ // 2) The slot is > max_clean_root to prevent unrooted slots from reclaiming rooted versions.
+ let reclaim_method = if self.mark_obsolete_accounts == MarkObsoleteAccounts::Enabled
+ && should_flush_f.is_some()
+ {
+ UpsertReclaim::ReclaimOldSlots
+ } else {
+ UpsertReclaim::IgnoreReclaims
+ };
+
let (store_accounts_timing_inner, store_accounts_total_inner_us) = measure_us!(self
- .store_accounts_frozen(
+ ._store_accounts_frozen(
(slot, &accounts[..]),
&flushed_store,
+ reclaim_method,
UpdateIndexThreadSelection::PoolWithThreshold,
));
flush_stats.store_accounts_timing = store_accounts_timing_inner;
@@ -5458,7 +5191,7 @@ impl AccountsDb {
self.uncleaned_pubkeys
.entry(slot)
.or_default()
- .extend(accounts.iter().map(|(pubkey, _account)| **pubkey));
+ .extend(accounts.into_iter().map(|(pubkey, _account)| *pubkey));
flush_stats
}
@@ -5519,44 +5252,6 @@ impl AccountsDb {
}
}
- fn write_accounts_to_cache<'a, 'b>(
- &self,
- slot: Slot,
- accounts_and_meta_to_store: &impl StorableAccounts<'b>,
- txs: Option<&[&SanitizedTransaction]>,
- ) -> Vec {
- let mut current_write_version = if self.accounts_update_notifier.is_some() {
- self.write_version
- .fetch_add(accounts_and_meta_to_store.len() as u64, Ordering::AcqRel)
- } else {
- 0
- };
-
- (0..accounts_and_meta_to_store.len())
- .map(|index| {
- let txn = txs.map(|txs| *txs.get(index).expect("txs must be present if provided"));
- accounts_and_meta_to_store.account_default_if_zero_lamport(index, |account| {
- let account_shared_data = account.to_account_shared_data();
- let pubkey = account.pubkey();
- let account_info =
- AccountInfo::new(StorageLocation::Cached, account.is_zero_lamport());
-
- self.notify_account_at_accounts_update(
- slot,
- &account_shared_data,
- &txn,
- pubkey,
- current_write_version,
- );
- current_write_version = current_write_version.saturating_add(1);
-
- self.accounts_cache.store(slot, pubkey, account_shared_data);
- account_info
- })
- })
- .collect()
- }
-
fn report_store_stats(&self) {
let mut total_count = 0;
let mut newest_slot = 0;
@@ -5589,28 +5284,6 @@ impl AccountsDb {
("total_alive_bytes", total_alive_bytes, i64),
("total_alive_ratio", total_alive_ratio, f64),
);
- datapoint_info!(
- "accounts_db-perf-stats",
- (
- "delta_hash_num",
- self.stats.delta_hash_num.swap(0, Ordering::Relaxed),
- i64
- ),
- (
- "delta_hash_scan_us",
- self.stats
- .delta_hash_scan_time_total_us
- .swap(0, Ordering::Relaxed),
- i64
- ),
- (
- "delta_hash_accumulate_us",
- self.stats
- .delta_hash_accumulate_time_total_us
- .swap(0, Ordering::Relaxed),
- i64
- ),
- );
}
/// Calculates the accounts lt hash
@@ -5676,58 +5349,6 @@ impl AccountsDb {
AccountsLtHash(lt_hash)
}
- /// Calculates the accounts lt hash
- ///
- /// Intended to be used to verify the accounts lt hash at startup.
- ///
- /// The `duplicates_lt_hash` is the old/duplicate accounts to mix *out* of the storages.
- /// This value comes from index generation.
- /// The 'startup_slot' is the slot for which the accounts_lt_hash is calculated.
- pub fn calculate_accounts_lt_hash_at_startup_from_storages(
- &self,
- storages: &[Arc],
- duplicates_lt_hash: &DuplicatesLtHash,
- startup_slot: Slot,
- ) -> AccountsLtHash {
- // Randomized order works well with rayon work splitting, since we only care about
- // uniform distribution of total work size per batch (other ordering strategies might be
- // useful for optimizing disk read sizes and buffers usage in a single IO queue).
- let storages = AccountStoragesOrderer::with_random_order(storages);
- let mut lt_hash = storages
- .par_iter()
- .fold(LtHash::identity, |mut accum, storage| {
- // Function is calculating the accounts_lt_hash from all accounts in the
- // storages as of startup_slot. This means that any accounts marked obsolete at a
- // slot newer than startup_slot should be included in the accounts_lt_hash
- let obsolete_accounts = storage.get_obsolete_accounts(Some(startup_slot));
- storage
- .accounts
- .scan_accounts(|offset, account| {
- // Obsolete accounts were not included in the original hash, so they should not be added here
- if !obsolete_accounts.contains(&(offset, account.data.len())) {
- let account_lt_hash = Self::lt_hash_account(&account, account.pubkey());
- accum.mix_in(&account_lt_hash.0);
- }
- })
- .expect("must scan accounts storage");
- accum
- })
- .reduce(LtHash::identity, |mut accum, elem| {
- accum.mix_in(&elem);
- accum
- });
-
- if self.mark_obsolete_accounts {
- // If `mark_obsolete_accounts` is true, then none if the duplicate accounts were
- // included in the lt_hash, and do not need to be mixed out.
- // The duplicates_lt_hash should be the default value.
- assert_eq!(*duplicates_lt_hash, DuplicatesLtHash::default());
- }
- lt_hash.mix_out(&duplicates_lt_hash.0);
-
- AccountsLtHash(lt_hash)
- }
-
/// Calculates the capitalization
///
/// Panics if capitalization overflows a u64.
@@ -5834,6 +5455,9 @@ impl AccountsDb {
}
}
+ /// Updates the accounts index with the given `infos` and `accounts`.
+ /// Returns a vector of `SlotList` containing the reclaims for each batch processed.
+ /// The element of the returned vector is guaranteed to be non-empty.
fn update_index<'a>(
&self,
infos: Vec,
@@ -5841,12 +5465,20 @@ impl AccountsDb {
reclaim: UpsertReclaim,
update_index_thread_selection: UpdateIndexThreadSelection,
thread_pool: &ThreadPool,
- ) -> SlotList {
+ ) -> Vec> {
let target_slot = accounts.target_slot();
let len = std::cmp::min(accounts.len(), infos.len());
+ // If reclaiming old slots, ensure the target slot is a root
+ // Having an unrooted slot reclaim a rooted version of a slot
+ // could lead to index corruption if the unrooted version is
+ // discarded
+ if reclaim == UpsertReclaim::ReclaimOldSlots {
+ assert!(target_slot <= self.accounts_index.max_root_inclusive());
+ }
+
let update = |start, end| {
- let mut reclaims = Vec::with_capacity((end - start) / 2);
+ let mut reclaims = ReclaimsSlotList::with_capacity((end - start) / 2);
(start..end).for_each(|i| {
let info = infos[i];
@@ -5883,11 +5515,17 @@ impl AccountsDb {
let end = std::cmp::min(start + chunk_size, len);
update(start, end)
})
- .flatten()
- .collect::>()
+ .filter(|reclaims| !reclaims.is_empty())
+ .collect()
})
} else {
- update(0, len)
+ let reclaims = update(0, len);
+ if reclaims.is_empty() {
+ // If no reclaims, return an empty vector
+ vec![]
+ } else {
+ vec![reclaims]
+ }
}
}
@@ -6053,12 +5691,9 @@ impl AccountsDb {
(dead_slots, reclaimed_offsets)
}
- fn remove_dead_slots_metadata<'a>(
- &'a self,
- dead_slots_iter: impl Iterator
- + Clone,
- ) {
+ fn remove_dead_slots_metadata<'a>(&'a self, dead_slots_iter: impl Iterator
- ) {
let mut measure = Measure::start("remove_dead_slots_metadata-ms");
- self.clean_dead_slots_from_accounts_index(dead_slots_iter.clone());
+ self.clean_dead_slots_from_accounts_index(dead_slots_iter);
measure.stop();
inc_new_counter_info!("remove_dead_slots_metadata-ms", measure.as_ms() as usize);
}
@@ -6072,7 +5707,7 @@ impl AccountsDb {
pubkeys_removed_from_accounts_index: &'a PubkeysRemovedFromAccountsIndex,
) {
let batches = 1 + (num_pubkeys / UNREF_ACCOUNTS_BATCH_SIZE);
- self.thread_pool_clean.install(|| {
+ self.thread_pool_background.install(|| {
(0..batches).into_par_iter().for_each(|batch| {
let skip = batch * UNREF_ACCOUNTS_BATCH_SIZE;
self.accounts_index.scan(
@@ -6135,7 +5770,7 @@ impl AccountsDb {
fn clean_dead_slots_from_accounts_index<'a>(
&'a self,
- dead_slots_iter: impl Iterator
- + Clone,
+ dead_slots_iter: impl Iterator
- ,
) {
let mut accounts_index_root_stats = AccountsIndexRootsStats::default();
let mut measure = Measure::start("clean_dead_slot");
@@ -6188,7 +5823,7 @@ impl AccountsDb {
}
// get all pubkeys in all dead slots
let purged_slot_pubkeys: HashSet<(Slot, Pubkey)> = {
- self.thread_pool_clean.install(|| {
+ self.thread_pool_background.install(|| {
stores
.into_par_iter()
.map(|store| {
@@ -6230,30 +5865,15 @@ impl AccountsDb {
.latest_accounts_index_roots_stats
.update(&accounts_index_root_stats);
- self.remove_dead_slots_metadata(dead_slots.iter());
measure.stop();
self.clean_accounts_stats
.clean_stored_dead_slots_us
.fetch_add(measure.as_us(), Ordering::Relaxed);
}
- pub fn store_cached<'a>(&self, accounts: impl StorableAccounts<'a>) {
- self.store(
- accounts,
- None,
- UpdateIndexThreadSelection::PoolWithThreshold,
- );
- }
-
- pub(crate) fn store_cached_inline_update_index<'a>(
- &self,
- accounts: impl StorableAccounts<'a>,
- transactions: Option<&'a [&'a SanitizedTransaction]>,
- ) {
- self.store(accounts, transactions, UpdateIndexThreadSelection::Inline);
- }
-
- fn store<'a>(
+ /// Stores accounts in the write cache and updates the index.
+ /// This should only be used for accounts that are unrooted (unfrozen)
+ pub(crate) fn store_accounts_unfrozen<'a>(
&self,
accounts: impl StorableAccounts<'a>,
transactions: Option<&'a [&'a SanitizedTransaction]>,
@@ -6274,196 +5894,68 @@ impl AccountsDb {
.store_total_data
.fetch_add(total_data as u64, Ordering::Relaxed);
- self.store_accounts_unfrozen(accounts, transactions, update_index_thread_selection);
+ // Store the accounts in the write cache
+ let mut store_accounts_time = Measure::start("store_accounts");
+ let infos = self.write_accounts_to_cache(accounts.target_slot(), &accounts, transactions);
+ store_accounts_time.stop();
+ self.stats
+ .store_accounts
+ .fetch_add(store_accounts_time.as_us(), Ordering::Relaxed);
+
+ // Update the index
+ let mut update_index_time = Measure::start("update_index");
+
+ self.update_index(
+ infos,
+ &accounts,
+ UpsertReclaim::PreviousSlotEntryWasCached,
+ update_index_thread_selection,
+ &self.thread_pool_foreground,
+ );
+
+ update_index_time.stop();
+ self.stats
+ .store_update_index
+ .fetch_add(update_index_time.as_us(), Ordering::Relaxed);
+ self.stats
+ .store_num_accounts
+ .fetch_add(accounts.len() as u64, Ordering::Relaxed);
self.report_store_timings();
}
- fn report_store_timings(&self) {
- if self.stats.last_store_report.should_update(1000) {
- let read_cache_stats = self.read_only_accounts_cache.get_and_reset_stats();
- datapoint_info!(
- "accounts_db_store_timings",
- (
- "hash_accounts",
- self.stats.store_hash_accounts.swap(0, Ordering::Relaxed),
- i64
- ),
- (
- "store_accounts",
- self.stats.store_accounts.swap(0, Ordering::Relaxed),
- i64
- ),
- (
- "update_index",
- self.stats.store_update_index.swap(0, Ordering::Relaxed),
- i64
- ),
- (
- "handle_reclaims",
- self.stats.store_handle_reclaims.swap(0, Ordering::Relaxed),
- i64
- ),
- (
- "append_accounts",
- self.stats.store_append_accounts.swap(0, Ordering::Relaxed),
- i64
- ),
- (
- "stakes_cache_check_and_store_us",
- self.stats
- .stakes_cache_check_and_store_us
- .swap(0, Ordering::Relaxed),
- i64
- ),
- (
- "num_accounts",
- self.stats.store_num_accounts.swap(0, Ordering::Relaxed),
- i64
- ),
- (
- "total_data",
- self.stats.store_total_data.swap(0, Ordering::Relaxed),
- i64
- ),
- (
- "read_only_accounts_cache_entries",
- self.read_only_accounts_cache.cache_len(),
- i64
- ),
- (
- "read_only_accounts_cache_data_size",
- self.read_only_accounts_cache.data_size(),
- i64
- ),
- ("read_only_accounts_cache_hits", read_cache_stats.hits, i64),
- (
- "read_only_accounts_cache_misses",
- read_cache_stats.misses,
- i64
- ),
- (
- "read_only_accounts_cache_evicts",
- read_cache_stats.evicts,
- i64
- ),
- (
- "read_only_accounts_cache_load_us",
- read_cache_stats.load_us,
- i64
- ),
- (
- "read_only_accounts_cache_store_us",
- read_cache_stats.store_us,
- i64
- ),
- (
- "read_only_accounts_cache_evict_us",
- read_cache_stats.evict_us,
- i64
- ),
- (
- "read_only_accounts_cache_evictor_wakeup_count_all",
- read_cache_stats.evictor_wakeup_count_all,
- i64
- ),
- (
- "read_only_accounts_cache_evictor_wakeup_count_productive",
- read_cache_stats.evictor_wakeup_count_productive,
- i64
- ),
- (
- "handle_dead_keys_us",
- self.stats.handle_dead_keys_us.swap(0, Ordering::Relaxed),
- i64
- ),
- (
- "purge_exact_us",
- self.stats.purge_exact_us.swap(0, Ordering::Relaxed),
- i64
- ),
- (
- "purge_exact_count",
- self.stats.purge_exact_count.swap(0, Ordering::Relaxed),
- i64
- ),
- );
-
- datapoint_info!(
- "accounts_db_store_timings2",
- (
- "create_store_count",
- self.stats.create_store_count.swap(0, Ordering::Relaxed),
- i64
- ),
- (
- "store_get_slot_store",
- self.stats.store_get_slot_store.swap(0, Ordering::Relaxed),
- i64
- ),
- (
- "store_find_existing",
- self.stats.store_find_existing.swap(0, Ordering::Relaxed),
- i64
- ),
- (
- "dropped_stores",
- self.stats.dropped_stores.swap(0, Ordering::Relaxed),
- i64
- ),
- );
- }
- }
-
- /// Stores accounts in the write cache and updates the index.
- /// This should only be used for accounts that are unrooted (unfrozen)
- fn store_accounts_unfrozen<'a>(
+ /// Stores accounts in the storage and updates the index.
+ /// This function is intended for accounts that are rooted (frozen).
+ /// - `UpsertReclaims` is set to `IgnoreReclaims`. If the slot in `accounts` differs from the new slot,
+ /// accounts may be removed from the account index. In such cases, the caller must ensure that alive
+ /// accounts are decremented for the older storage or that the old storage is removed entirely
+ pub fn store_accounts_frozen<'a>(
&self,
accounts: impl StorableAccounts<'a>,
- transactions: Option<&'a [&'a SanitizedTransaction]>,
+ storage: &Arc,
update_index_thread_selection: UpdateIndexThreadSelection,
- ) {
- let slot = accounts.target_slot();
-
- // Store the accounts in the write cache
- let mut store_accounts_time = Measure::start("store_accounts");
- let infos = self.write_accounts_to_cache(slot, &accounts, transactions);
- store_accounts_time.stop();
- self.stats
- .store_accounts
- .fetch_add(store_accounts_time.as_us(), Ordering::Relaxed);
-
- // Update the index
- let mut update_index_time = Measure::start("update_index");
-
- self.update_index(
- infos,
- &accounts,
- UpsertReclaim::PreviousSlotEntryWasCached,
+ ) -> StoreAccountsTiming {
+ self._store_accounts_frozen(
+ accounts,
+ storage,
+ UpsertReclaim::IgnoreReclaims,
update_index_thread_selection,
- &self.thread_pool,
- );
-
- update_index_time.stop();
- self.stats
- .store_update_index
- .fetch_add(update_index_time.as_us(), Ordering::Relaxed);
- self.stats
- .store_num_accounts
- .fetch_add(accounts.len() as u64, Ordering::Relaxed);
+ )
}
/// Stores accounts in the storage and updates the index.
- /// This should only be used on accounts that are rooted (frozen)
- pub fn store_accounts_frozen<'a>(
+ /// This function is intended for accounts that are rooted (frozen).
+ /// - `UpsertReclaims` must be set to `IgnoreReclaims` at this time
+ fn _store_accounts_frozen<'a>(
&self,
accounts: impl StorableAccounts<'a>,
storage: &Arc,
+ reclaim_handling: UpsertReclaim,
update_index_thread_selection: UpdateIndexThreadSelection,
) -> StoreAccountsTiming {
let slot = accounts.target_slot();
let mut store_accounts_time = Measure::start("store_accounts");
- // Flush the read cache if neccessary. This will occur during shrink or clean
+ // Flush the read cache if necessary. This will occur during shrink or clean
if self.read_only_accounts_cache.can_slot_be_in_cache(slot) {
(0..accounts.len()).for_each(|index| {
// based on the patterns of how a validator writes accounts, it is almost always the case that there is no read only cache entry
@@ -6479,13 +5971,10 @@ impl AccountsDb {
self.stats
.store_accounts
.fetch_add(store_accounts_time.as_us(), Ordering::Relaxed);
- let mut update_index_time = Measure::start("update_index");
- let reclaim = UpsertReclaim::IgnoreReclaims;
+ self.mark_zero_lamport_single_ref_accounts(&infos, storage, reclaim_handling);
- // if we are squashing a single slot, then we can expect a single dead slot
- let expected_single_dead_slot =
- (!accounts.contains_multiple_slots()).then(|| accounts.target_slot());
+ let mut update_index_time = Measure::start("update_index");
// If the cache was flushed, then because `update_index` occurs
// after the account are stored by the above `store_accounts_to`
@@ -6494,9 +5983,9 @@ impl AccountsDb {
let reclaims = self.update_index(
infos,
&accounts,
- UpsertReclaim::IgnoreReclaims,
+ reclaim_handling,
update_index_thread_selection,
- &self.thread_pool_clean,
+ &self.thread_pool_background,
);
update_index_time.stop();
@@ -6507,33 +5996,42 @@ impl AccountsDb {
.store_num_accounts
.fetch_add(accounts.len() as u64, Ordering::Relaxed);
- // A store for a single slot should:
- // 1) Only make "reclaims" for the same slot
- // 2) Should not cause any slots to be removed from the storage
- // database because
- // a) this slot has at least one account (the one being stored),
- // b)From 1) we know no other slots are included in the "reclaims"
- //
- // From 1) and 2) we guarantee passing `no_purge_stats` == None, which is
- // equivalent to asserting there will be no dead slots, is safe.
+ // If there are any reclaims then they should be handled. Reclaims affect
+ // all storages, and may result in the removal of dead storages.
let mut handle_reclaims_elapsed = 0;
- if reclaim == UpsertReclaim::PopulateReclaims {
+
+ // since reclaims only contains non-empty SlotList, we
+ // should skip handle_reclaims only when reclaims is empty. No need to
+ // check the elements of reclaims are empty.
+ if !reclaims.is_empty() {
+ let reclaims_len = reclaims.iter().map(|r| r.len()).sum::();
+ self.stats
+ .num_reclaims
+ .fetch_add(reclaims_len as u64, Ordering::Relaxed);
+ let purge_stats = PurgeStats::default();
let mut handle_reclaims_time = Measure::start("handle_reclaims");
self.handle_reclaims(
- (!reclaims.is_empty()).then(|| reclaims.iter()),
- expected_single_dead_slot,
+ reclaims.iter().flatten(),
+ None,
&HashSet::default(),
- // this callsite does NOT process dead slots
- HandleReclaims::DoNotProcessDeadSlots,
- MarkAccountsObsolete::No,
+ HandleReclaims::ProcessDeadSlots(&purge_stats),
+ MarkAccountsObsolete::Yes(slot),
);
handle_reclaims_time.stop();
handle_reclaims_elapsed = handle_reclaims_time.as_us();
+ self.stats.num_obsolete_slots_removed.fetch_add(
+ purge_stats.num_stored_slots_removed.load(Ordering::Relaxed),
+ Ordering::Relaxed,
+ );
+ self.stats.num_obsolete_bytes_removed.fetch_add(
+ purge_stats
+ .total_removed_stored_bytes
+ .load(Ordering::Relaxed),
+ Ordering::Relaxed,
+ );
self.stats
.store_handle_reclaims
.fetch_add(handle_reclaims_elapsed, Ordering::Relaxed);
- } else {
- assert!(reclaims.is_empty());
}
StoreAccountsTiming {
@@ -6543,6 +6041,296 @@ impl AccountsDb {
}
}
+ fn write_accounts_to_cache<'a, 'b>(
+ &self,
+ slot: Slot,
+ accounts_and_meta_to_store: &impl StorableAccounts<'b>,
+ txs: Option<&[&SanitizedTransaction]>,
+ ) -> Vec {
+ let mut current_write_version = if self.accounts_update_notifier.is_some() {
+ self.write_version
+ .fetch_add(accounts_and_meta_to_store.len() as u64, Ordering::AcqRel)
+ } else {
+ 0
+ };
+
+ (0..accounts_and_meta_to_store.len())
+ .map(|index| {
+ let txn = txs.map(|txs| *txs.get(index).expect("txs must be present if provided"));
+ accounts_and_meta_to_store.account_default_if_zero_lamport(index, |account| {
+ let account_shared_data = account.to_account_shared_data();
+ let pubkey = account.pubkey();
+ let account_info =
+ AccountInfo::new(StorageLocation::Cached, account.is_zero_lamport());
+
+ self.notify_account_at_accounts_update(
+ slot,
+ &account_shared_data,
+ &txn,
+ pubkey,
+ current_write_version,
+ );
+ current_write_version = current_write_version.saturating_add(1);
+
+ self.accounts_cache.store(slot, pubkey, account_shared_data);
+ account_info
+ })
+ })
+ .collect()
+ }
+
+ fn write_accounts_to_storage<'a>(
+ &self,
+ slot: Slot,
+ storage: &AccountStorageEntry,
+ accounts_and_meta_to_store: &impl StorableAccounts<'a>,
+ ) -> Vec {
+ let mut infos: Vec = Vec::with_capacity(accounts_and_meta_to_store.len());
+ let mut total_append_accounts_us = 0;
+ while infos.len() < accounts_and_meta_to_store.len() {
+ let mut append_accounts = Measure::start("append_accounts");
+ let stored_accounts_info = storage
+ .accounts
+ .write_accounts(accounts_and_meta_to_store, infos.len());
+ append_accounts.stop();
+ total_append_accounts_us += append_accounts.as_us();
+ let Some(stored_accounts_info) = stored_accounts_info else {
+ // See if an account overflows the storage in the slot.
+ let data_len = accounts_and_meta_to_store.data_len(infos.len());
+ let data_len = (data_len + STORE_META_OVERHEAD) as u64;
+ if data_len > storage.accounts.remaining_bytes() {
+ info!(
+ "write_accounts_to_storage, no space: {}, {}, {}, {}, {}",
+ storage.accounts.capacity(),
+ storage.accounts.remaining_bytes(),
+ data_len,
+ infos.len(),
+ accounts_and_meta_to_store.len()
+ );
+ let special_store_size = std::cmp::max(data_len * 2, self.file_size);
+ self.create_and_insert_store(slot, special_store_size, "large create");
+ }
+ continue;
+ };
+
+ let store_id = storage.id();
+ for (i, offset) in stored_accounts_info.offsets.iter().enumerate() {
+ infos.push(AccountInfo::new(
+ StorageLocation::AppendVec(store_id, *offset),
+ accounts_and_meta_to_store.is_zero_lamport(i),
+ ));
+ }
+ storage.add_accounts(
+ stored_accounts_info.offsets.len(),
+ stored_accounts_info.size,
+ );
+ }
+
+ self.stats
+ .store_append_accounts
+ .fetch_add(total_append_accounts_us, Ordering::Relaxed);
+
+ infos
+ }
+
+ /// Marks zero lamport single reference accounts in the storage during store_accounts
+ fn mark_zero_lamport_single_ref_accounts(
+ &self,
+ account_infos: &[AccountInfo],
+ storage: &AccountStorageEntry,
+ reclaim_handling: UpsertReclaim,
+ ) {
+ // If the reclaim handling is `ReclaimOldSlots`, then all zero lamport accounts are single
+ // ref accounts and they need to be inserted into the storages zero lamport single ref
+ // accounts list
+ // For other values of reclaim handling, there are no zero lamport single ref accounts
+ // so nothing needs to be done in this function
+ if reclaim_handling == UpsertReclaim::ReclaimOldSlots {
+ let mut add_zero_lamport_accounts = Measure::start("add_zero_lamport_accounts");
+ let mut num_zero_lamport_accounts_added = 0;
+
+ for account_info in account_infos {
+ if account_info.is_zero_lamport() {
+ storage.insert_zero_lamport_single_ref_account_offset(account_info.offset());
+ num_zero_lamport_accounts_added += 1;
+ }
+ }
+
+ // If any zero lamport accounts were added, the storage may be valid for shrinking
+ if num_zero_lamport_accounts_added > 0
+ && self.is_candidate_for_shrink(storage)
+ && Self::is_shrinking_productive(storage)
+ {
+ self.shrink_candidate_slots
+ .lock()
+ .unwrap()
+ .insert(storage.slot);
+ }
+
+ add_zero_lamport_accounts.stop();
+ self.stats
+ .add_zero_lamport_accounts_us
+ .fetch_add(add_zero_lamport_accounts.as_us(), Ordering::Relaxed);
+ self.stats
+ .num_zero_lamport_accounts_added
+ .fetch_add(num_zero_lamport_accounts_added, Ordering::Relaxed);
+ }
+ }
+
+ fn report_store_timings(&self) {
+ if self.stats.last_store_report.should_update(1000) {
+ let read_cache_stats = self.read_only_accounts_cache.get_and_reset_stats();
+ datapoint_info!(
+ "accounts_db_store_timings",
+ (
+ "store_accounts",
+ self.stats.store_accounts.swap(0, Ordering::Relaxed),
+ i64
+ ),
+ (
+ "update_index",
+ self.stats.store_update_index.swap(0, Ordering::Relaxed),
+ i64
+ ),
+ (
+ "handle_reclaims",
+ self.stats.store_handle_reclaims.swap(0, Ordering::Relaxed),
+ i64
+ ),
+ (
+ "append_accounts",
+ self.stats.store_append_accounts.swap(0, Ordering::Relaxed),
+ i64
+ ),
+ (
+ "stakes_cache_check_and_store_us",
+ self.stats
+ .stakes_cache_check_and_store_us
+ .swap(0, Ordering::Relaxed),
+ i64
+ ),
+ (
+ "num_accounts",
+ self.stats.store_num_accounts.swap(0, Ordering::Relaxed),
+ i64
+ ),
+ (
+ "total_data",
+ self.stats.store_total_data.swap(0, Ordering::Relaxed),
+ i64
+ ),
+ (
+ "num_reclaims",
+ self.stats.num_reclaims.swap(0, Ordering::Relaxed),
+ i64
+ ),
+ (
+ "read_only_accounts_cache_entries",
+ self.read_only_accounts_cache.cache_len(),
+ i64
+ ),
+ (
+ "read_only_accounts_cache_data_size",
+ self.read_only_accounts_cache.data_size(),
+ i64
+ ),
+ ("read_only_accounts_cache_hits", read_cache_stats.hits, i64),
+ (
+ "read_only_accounts_cache_misses",
+ read_cache_stats.misses,
+ i64
+ ),
+ (
+ "read_only_accounts_cache_evicts",
+ read_cache_stats.evicts,
+ i64
+ ),
+ (
+ "read_only_accounts_cache_load_us",
+ read_cache_stats.load_us,
+ i64
+ ),
+ (
+ "read_only_accounts_cache_store_us",
+ read_cache_stats.store_us,
+ i64
+ ),
+ (
+ "read_only_accounts_cache_evict_us",
+ read_cache_stats.evict_us,
+ i64
+ ),
+ (
+ "read_only_accounts_cache_evictor_wakeup_count_all",
+ read_cache_stats.evictor_wakeup_count_all,
+ i64
+ ),
+ (
+ "read_only_accounts_cache_evictor_wakeup_count_productive",
+ read_cache_stats.evictor_wakeup_count_productive,
+ i64
+ ),
+ (
+ "handle_dead_keys_us",
+ self.stats.handle_dead_keys_us.swap(0, Ordering::Relaxed),
+ i64
+ ),
+ (
+ "purge_exact_us",
+ self.stats.purge_exact_us.swap(0, Ordering::Relaxed),
+ i64
+ ),
+ (
+ "purge_exact_count",
+ self.stats.purge_exact_count.swap(0, Ordering::Relaxed),
+ i64
+ ),
+ (
+ "num_obsolete_slots_removed",
+ self.stats
+ .num_obsolete_slots_removed
+ .swap(0, Ordering::Relaxed),
+ i64
+ ),
+ (
+ "num_obsolete_bytes_removed",
+ self.stats
+ .num_obsolete_bytes_removed
+ .swap(0, Ordering::Relaxed),
+ i64
+ ),
+ (
+ "add_zero_lamport_accounts_us",
+ self.stats
+ .add_zero_lamport_accounts_us
+ .swap(0, Ordering::Relaxed),
+ i64
+ ),
+ (
+ "num_zero_lamport_accounts_added",
+ self.stats
+ .num_zero_lamport_accounts_added
+ .swap(0, Ordering::Relaxed),
+ i64
+ ),
+ );
+
+ datapoint_info!(
+ "accounts_db_store_timings2",
+ (
+ "create_store_count",
+ self.stats.create_store_count.swap(0, Ordering::Relaxed),
+ i64
+ ),
+ (
+ "dropped_stores",
+ self.stats.dropped_stores.swap(0, Ordering::Relaxed),
+ i64
+ ),
+ );
+ }
+ }
+
pub fn add_root(&self, slot: Slot) -> AccountsAddRootTiming {
let mut index_time = Measure::start("index_add_root");
self.accounts_index.add_root(slot);
@@ -6550,22 +6338,10 @@ impl AccountsDb {
let mut cache_time = Measure::start("cache_add_root");
self.accounts_cache.add_root(slot);
cache_time.stop();
- let mut store_time = Measure::start("store_add_root");
- // We would not expect this slot to be shrinking right now, but other slots may be.
- // But, even if it was, we would just mark a store id as dirty unnecessarily and that is ok.
- // So, allow shrinking to be in progress.
- if let Some(store) = self
- .storage
- .get_slot_storage_entry_shrinking_in_progress_ok(slot)
- {
- self.dirty_stores.insert(slot, store);
- }
- store_time.stop();
AccountsAddRootTiming {
index_us: index_time.as_us(),
cache_us: cache_time.as_us(),
- store_us: store_time.as_us(),
}
}
@@ -6596,9 +6372,10 @@ impl AccountsDb {
*self.latest_full_snapshot_slot.lock_write() = Some(slot);
}
- fn generate_index_for_slot(
+ fn generate_index_for_slot<'a>(
&self,
- storage: &AccountStorageEntry,
+ reader: &mut impl RequiredLenBufFileRead<'a>,
+ storage: &'a AccountStorageEntry,
slot: Slot,
store_id: AccountsFileId,
storage_info: &StorageSizeAndCountMap,
@@ -6606,86 +6383,97 @@ impl AccountsDb {
if storage.accounts.get_account_data_lens(&[0]).is_empty() {
return SlotIndexGenerationInfo::default();
}
- let secondary = !self.account_indexes.is_empty();
let mut accounts_data_len = 0;
let mut stored_size_alive = 0;
let mut zero_lamport_pubkeys = vec![];
+ let mut zero_lamport_offsets = vec![];
let mut all_accounts_are_zero_lamports = true;
+ let mut slot_lt_hash = SlotLtHash::default();
+ let mut keyed_account_infos = vec![];
+
+ let geyser_notifier = self
+ .accounts_update_notifier
+ .as_ref()
+ .filter(|notifier| notifier.snapshot_notifications_enabled());
+
+ // If geyser notifications at startup from snapshot are enabled, we need to pass in a
+ // write version for each account notification. This value does not need to be
+ // globally unique, as geyser plugins also receive the slot number. We only need to
+ // ensure that more recent accounts have a higher write version than older accounts.
+ // Even more relaxed, we really only need to have different write versions if there are
+ // multiple versions of the same account in a single storage, which is not allowed.
+ //
+ // Since we scan the storage from oldest to newest, we can simply increment a local
+ // counter per account and use that for the write version.
+ let mut write_version_for_geyser = 0;
- let (insert_time_us, generate_index_results) = {
- let mut keyed_account_infos = vec![];
- // this closure is the shared code when scanning the storage
- let mut itemizer = |info: IndexInfo| {
- stored_size_alive += info.stored_size_aligned;
- if info.index_info.lamports > 0 {
- accounts_data_len += info.index_info.data_len;
+ storage
+ .accounts
+ .scan_accounts(reader, |offset, account| {
+ let data_len = account.data.len();
+ stored_size_alive += storage.accounts.calculate_stored_size(data_len);
+ let is_account_zero_lamport = account.is_zero_lamport();
+ if !is_account_zero_lamport {
+ accounts_data_len += data_len as u64;
all_accounts_are_zero_lamports = false;
} else {
- // zero lamport accounts
- zero_lamport_pubkeys.push(info.index_info.pubkey);
+ // With obsolete accounts enabled, all zero lamport accounts
+ // are obsolete or single ref by the end of index generation
+ // Store the offsets here
+ if self.mark_obsolete_accounts == MarkObsoleteAccounts::Enabled {
+ zero_lamport_offsets.push(offset);
+ }
+ zero_lamport_pubkeys.push(*account.pubkey);
}
keyed_account_infos.push((
- info.index_info.pubkey,
+ *account.pubkey,
AccountInfo::new(
- StorageLocation::AppendVec(store_id, info.index_info.offset), // will never be cached
- info.index_info.is_zero_lamport(),
+ StorageLocation::AppendVec(store_id, offset), // will never be cached
+ is_account_zero_lamport,
),
));
- };
- if secondary {
- // WITH secondary indexes -- scan accounts WITH account data
- storage.accounts.scan_accounts(|offset, account| {
- let data_len = account.data.len() as u64;
- let stored_size_aligned =
- storage.accounts.calculate_stored_size(data_len as usize);
- let info = IndexInfo {
- stored_size_aligned,
- index_info: IndexInfoInner {
- offset,
- pubkey: *account.pubkey,
- lamports: account.lamports,
- data_len,
- },
- };
- itemizer(info);
+ if !self.account_indexes.is_empty() {
self.accounts_index.update_secondary_indexes(
account.pubkey,
&account,
&self.account_indexes,
);
- })
- } else {
- // withOUT secondary indexes -- scan accounts withOUT account data
- storage
- .accounts
- .scan_accounts_without_data(|offset, account| {
- let data_len = account.data_len as u64;
- let stored_size_aligned =
- storage.accounts.calculate_stored_size(data_len as usize);
- let info = IndexInfo {
- stored_size_aligned,
- index_info: IndexInfoInner {
- offset,
- pubkey: *account.pubkey,
- lamports: account.lamports,
- data_len,
- },
- };
- itemizer(info);
- })
- }
+ }
+
+ let account_lt_hash = Self::lt_hash_account(&account, account.pubkey());
+ slot_lt_hash.0.mix_in(&account_lt_hash.0);
+
+ if let Some(geyser_notifier) = geyser_notifier {
+ debug_assert!(geyser_notifier.snapshot_notifications_enabled());
+ let account_for_geyser = AccountForGeyser {
+ pubkey: account.pubkey(),
+ lamports: account.lamports(),
+ owner: account.owner(),
+ executable: account.executable(),
+ rent_epoch: account.rent_epoch(),
+ data: account.data(),
+ };
+ geyser_notifier.notify_account_restore_from_snapshot(
+ slot,
+ write_version_for_geyser,
+ &account_for_geyser,
+ );
+ write_version_for_geyser += 1;
+ }
+ })
.expect("must scan accounts storage");
- self.accounts_index
- .insert_new_if_missing_into_primary_index(slot, keyed_account_infos)
- };
+
+ let (insert_time_us, insert_info) = self
+ .accounts_index
+ .insert_new_if_missing_into_primary_index(slot, keyed_account_infos);
{
// second, collect into the shared DashMap once we've figured out all the info per store_id
let mut info = storage_info.entry(store_id).or_default();
info.stored_size += stored_size_alive;
- info.count += generate_index_results.count;
+ info.count += insert_info.count;
// sanity check that stored_size is not larger than the u64 aligned size of the accounts files.
// Note that the stored_size is aligned, so it can be larger than the size of the accounts file.
@@ -6707,15 +6495,25 @@ impl AccountsDb {
.insert(slot, zero_lamport_pubkeys.clone());
assert!(old.is_none());
}
+
+ // If obsolete accounts are enabled, add them as single ref accounts here
+ // to avoid having to revisit them later
+ // This is safe with obsolete accounts as all zero lamport accounts will be single ref
+ // or obsolete by the end of index generation
+ if self.mark_obsolete_accounts == MarkObsoleteAccounts::Enabled {
+ storage.batch_insert_zero_lamport_single_ref_account_offsets(&zero_lamport_offsets);
+ zero_lamport_pubkeys = Vec::new();
+ }
SlotIndexGenerationInfo {
insert_time_us,
- num_accounts: generate_index_results.count as u64,
+ num_accounts: insert_info.count as u64,
accounts_data_len,
zero_lamport_pubkeys,
all_accounts_are_zero_lamports,
- num_did_not_exist: generate_index_results.num_did_not_exist,
- num_existed_in_mem: generate_index_results.num_existed_in_mem,
- num_existed_on_disk: generate_index_results.num_existed_on_disk,
+ num_did_not_exist: insert_info.num_did_not_exist,
+ num_existed_in_mem: insert_info.num_existed_in_mem,
+ num_existed_on_disk: insert_info.num_existed_on_disk,
+ slot_lt_hash,
}
}
@@ -6725,395 +6523,390 @@ impl AccountsDb {
verify: bool,
) -> IndexGenerationInfo {
let mut total_time = Measure::start("generate_index");
- let mut slots = self.storage.all_slots();
- slots.sort_unstable();
+
+ let mut storages = self.storage.all_storages();
+ storages.sort_unstable_by_key(|storage| storage.slot);
if let Some(limit) = limit_load_slot_count_from_snapshot {
- slots.truncate(limit); // get rid of the newer slots and keep just the older
+ storages.truncate(limit); // get rid of the newer slots and keep just the older
}
- let accounts_data_len = AtomicU64::new(0);
+ let num_storages = storages.len();
- let zero_lamport_pubkeys = Mutex::new(HashSet::new());
- let mut outer_duplicates_lt_hash = None;
-
- // pass == 0 always runs and generates the index
- // pass == 1 only runs if verify == true.
- // verify checks that all the expected items are in the accounts index and measures how long it takes to look them all up
- let passes = if verify { 2 } else { 1 };
- for pass in 0..passes {
- if pass == 0 {
- self.accounts_index
- .set_startup(Startup::StartupWithExtraThreads);
+ self.accounts_index
+ .set_startup(Startup::StartupWithExtraThreads);
+ let storage_info = StorageSizeAndCountMap::default();
+
+ /// Accumulator for the values produced while generating the index
+ #[derive(Debug)]
+ struct IndexGenerationAccumulator {
+ insert_us: u64,
+ num_accounts: u64,
+ accounts_data_len: u64,
+ zero_lamport_pubkeys: Vec,
+ all_accounts_are_zero_lamports_slots: u64,
+ all_zeros_slots: Vec<(Slot, Arc)>,
+ num_did_not_exist: u64,
+ num_existed_in_mem: u64,
+ num_existed_on_disk: u64,
+ lt_hash: LtHash,
+ }
+ impl IndexGenerationAccumulator {
+ const fn new() -> Self {
+ Self {
+ insert_us: 0,
+ num_accounts: 0,
+ accounts_data_len: 0,
+ zero_lamport_pubkeys: Vec::new(),
+ all_accounts_are_zero_lamports_slots: 0,
+ all_zeros_slots: Vec::new(),
+ num_did_not_exist: 0,
+ num_existed_in_mem: 0,
+ num_existed_on_disk: 0,
+ lt_hash: LtHash::identity(),
+ }
}
- let storage_info = StorageSizeAndCountMap::default();
- let total_processed_slots_across_all_threads = AtomicU64::new(0);
- let outer_slots_len = slots.len();
- let threads = num_cpus::get();
- let chunk_size = (outer_slots_len / (std::cmp::max(1, threads.saturating_sub(1)))) + 1; // approximately 400k slots in a snapshot
- let mut index_time = Measure::start("index");
- let insertion_time_us = AtomicU64::new(0);
- let total_including_duplicates = AtomicU64::new(0);
- let all_accounts_are_zero_lamports_slots = AtomicU64::new(0);
- let mut all_zeros_slots = Mutex::new(Vec::<(Slot, Arc)>::new());
- let scan_time: u64 = slots
- .par_chunks(chunk_size)
- .map(|slots| {
- let mut log_status = MultiThreadProgress::new(
- &total_processed_slots_across_all_threads,
- 2,
- outer_slots_len as u64,
- );
- let mut scan_time_sum = 0;
- let mut all_accounts_are_zero_lamports_slots_inner = 0;
- let mut all_zeros_slots_inner = vec![];
- let mut local_zero_lamport_pubkeys = Vec::new();
- let mut insert_time_sum = 0;
- let mut total_including_duplicates_sum = 0;
- let mut accounts_data_len_sum = 0;
- let mut local_num_did_not_exist = 0;
- let mut local_num_existed_in_mem = 0;
- let mut local_num_existed_on_disk = 0;
- for (index, slot) in slots.iter().enumerate() {
- let mut scan_time = Measure::start("scan");
- log_status.report(index as u64);
- let Some(storage) = self.storage.get_slot_storage_entry(*slot) else {
- // no storage at this slot, no information to pull out
- continue;
- };
- let store_id = storage.id();
-
- scan_time.stop();
- scan_time_sum += scan_time.as_us();
-
- let insert_us = if pass == 0 {
- // generate index
- self.maybe_throttle_index_generation();
- let SlotIndexGenerationInfo {
- insert_time_us: insert_us,
- num_accounts: total_this_slot,
- accounts_data_len: accounts_data_len_this_slot,
- zero_lamport_pubkeys: mut zero_lamport_pubkeys_this_slot,
- all_accounts_are_zero_lamports,
- num_did_not_exist,
- num_existed_in_mem,
- num_existed_on_disk,
- } = self.generate_index_for_slot(
- &storage,
- *slot,
- store_id,
- &storage_info,
- );
+ fn accumulate(&mut self, other: Self) {
+ self.insert_us += other.insert_us;
+ self.num_accounts += other.num_accounts;
+ self.accounts_data_len += other.accounts_data_len;
+ self.zero_lamport_pubkeys.extend(other.zero_lamport_pubkeys);
+ self.all_accounts_are_zero_lamports_slots +=
+ other.all_accounts_are_zero_lamports_slots;
+ self.all_zeros_slots.extend(other.all_zeros_slots);
+ self.num_did_not_exist += other.num_did_not_exist;
+ self.num_existed_in_mem += other.num_existed_in_mem;
+ self.num_existed_on_disk += other.num_existed_on_disk;
+ self.lt_hash.mix_in(&other.lt_hash);
+ }
+ }
- local_num_did_not_exist += num_did_not_exist;
- local_num_existed_in_mem += num_existed_in_mem;
- local_num_existed_on_disk += num_existed_on_disk;
- total_including_duplicates_sum += total_this_slot;
- accounts_data_len_sum += accounts_data_len_this_slot;
- if all_accounts_are_zero_lamports {
- all_accounts_are_zero_lamports_slots_inner += 1;
- all_zeros_slots_inner.push((*slot, Arc::clone(&storage)));
+ let mut total_accum = IndexGenerationAccumulator::new();
+ let storages_orderer =
+ AccountStoragesOrderer::with_random_order(&storages).into_concurrent_consumer();
+ let exit_logger = AtomicBool::new(false);
+ let num_processed = AtomicU64::new(0);
+ let num_threads = num_cpus::get();
+ let mut index_time = Measure::start("index");
+ thread::scope(|s| {
+ let thread_handles = (0..num_threads)
+ .map(|i| {
+ thread::Builder::new()
+ .name(format!("solGenIndex{i:02}"))
+ .spawn_scoped(s, || {
+ let mut thread_accum = IndexGenerationAccumulator::new();
+ let mut reader = append_vec::new_scan_accounts_reader();
+ while let Some(next_item) = storages_orderer.next() {
+ self.maybe_throttle_index_generation();
+ let storage = next_item.storage;
+ let store_id = storage.id();
+ let slot = storage.slot();
+ let slot_info = self.generate_index_for_slot(
+ &mut reader,
+ storage,
+ slot,
+ store_id,
+ &storage_info,
+ );
+ thread_accum.insert_us += slot_info.insert_time_us;
+ thread_accum.num_accounts += slot_info.num_accounts;
+ thread_accum.accounts_data_len += slot_info.accounts_data_len;
+ thread_accum
+ .zero_lamport_pubkeys
+ .extend(slot_info.zero_lamport_pubkeys);
+ if slot_info.all_accounts_are_zero_lamports {
+ thread_accum.all_accounts_are_zero_lamports_slots += 1;
+ thread_accum.all_zeros_slots.push((
+ slot,
+ Arc::clone(&storages[next_item.original_index]),
+ ));
+ }
+ thread_accum.num_did_not_exist += slot_info.num_did_not_exist;
+ thread_accum.num_existed_in_mem += slot_info.num_existed_in_mem;
+ thread_accum.num_existed_on_disk += slot_info.num_existed_on_disk;
+ thread_accum.lt_hash.mix_in(&slot_info.slot_lt_hash.0);
+ num_processed.fetch_add(1, Ordering::Relaxed);
}
- local_zero_lamport_pubkeys.append(&mut zero_lamport_pubkeys_this_slot);
-
- insert_us
- } else {
- // verify index matches expected and measure the time to get all items
- assert!(verify);
- let mut lookup_time = Measure::start("lookup_time");
- storage
- .accounts
- .scan_accounts_without_data(|offset, account| {
- let key = account.pubkey();
- let index_entry = self.accounts_index.get_cloned(key).unwrap();
- let slot_list = index_entry.slot_list.read().unwrap();
- let mut count = 0;
- for (slot2, account_info2) in slot_list.iter() {
- if slot2 == slot {
- count += 1;
- let ai = AccountInfo::new(
- StorageLocation::AppendVec(store_id, offset), // will never be cached
- account.is_zero_lamport(),
- );
- assert_eq!(&ai, account_info2);
- }
- }
- assert_eq!(1, count);
- })
- .expect("must scan accounts storage");
- lookup_time.stop();
- lookup_time.as_us()
- };
- insert_time_sum += insert_us;
- }
-
- if pass == 0 {
- let mut zero_lamport_pubkeys_lock = zero_lamport_pubkeys.lock().unwrap();
- zero_lamport_pubkeys_lock.reserve(local_zero_lamport_pubkeys.len());
- zero_lamport_pubkeys_lock.extend(local_zero_lamport_pubkeys.into_iter());
- drop(zero_lamport_pubkeys_lock);
-
- // This thread has finished processing its chunk of slots.
- // Update the index stats now.
- let index_stats = self.accounts_index.bucket_map_holder_stats();
-
- // stats for inserted entries that previously did *not* exist
- index_stats.inc_insert_count(local_num_did_not_exist);
- index_stats.add_mem_count(local_num_did_not_exist as usize);
-
- // stats for inserted entries that previous did exist *in-mem*
- index_stats
- .entries_from_mem
- .fetch_add(local_num_existed_in_mem, Ordering::Relaxed);
- index_stats
- .updates_in_mem
- .fetch_add(local_num_existed_in_mem, Ordering::Relaxed);
-
- // stats for inserted entries that previously did exist *on-disk*
- index_stats.add_mem_count(local_num_existed_on_disk as usize);
- index_stats
- .entries_missing
- .fetch_add(local_num_existed_on_disk, Ordering::Relaxed);
- index_stats
- .updates_in_mem
- .fetch_add(local_num_existed_on_disk, Ordering::Relaxed);
- }
-
- all_accounts_are_zero_lamports_slots.fetch_add(
- all_accounts_are_zero_lamports_slots_inner,
- Ordering::Relaxed,
- );
- all_zeros_slots
- .lock()
- .unwrap()
- .append(&mut all_zeros_slots_inner);
- insertion_time_us.fetch_add(insert_time_sum, Ordering::Relaxed);
- total_including_duplicates
- .fetch_add(total_including_duplicates_sum, Ordering::Relaxed);
- accounts_data_len.fetch_add(accounts_data_len_sum, Ordering::Relaxed);
- scan_time_sum
+ thread_accum
+ })
})
- .sum();
- index_time.stop();
-
- let mut index_flush_us = 0;
- let total_duplicate_slot_keys = AtomicU64::default();
- let mut populate_duplicate_keys_us = 0;
- let total_num_unique_duplicate_keys = AtomicU64::default();
-
- // outer vec is accounts index bin (determined by pubkey value)
- // inner vec is the pubkeys within that bin that are present in > 1 slot
- let unique_pubkeys_by_bin = Mutex::new(Vec::>::default());
- if pass == 0 {
- // tell accounts index we are done adding the initial accounts at startup
- let mut m = Measure::start("accounts_index_idle_us");
- self.accounts_index.set_startup(Startup::Normal);
- m.stop();
- index_flush_us = m.as_us();
-
- populate_duplicate_keys_us = measure_us!({
- // this has to happen before visit_duplicate_pubkeys_during_startup below
- // get duplicate keys from acct idx. We have to wait until we've finished flushing.
- self.accounts_index
- .populate_and_retrieve_duplicate_keys_from_startup(|slot_keys| {
- total_duplicate_slot_keys
- .fetch_add(slot_keys.len() as u64, Ordering::Relaxed);
- let unique_keys =
- HashSet::::from_iter(slot_keys.iter().map(|(_, key)| *key));
- for (slot, key) in slot_keys {
- self.uncleaned_pubkeys.entry(slot).or_default().push(key);
- }
- let unique_pubkeys_by_bin_inner =
- unique_keys.into_iter().collect::>();
- total_num_unique_duplicate_keys.fetch_add(
- unique_pubkeys_by_bin_inner.len() as u64,
- Ordering::Relaxed,
+ .collect::, _>>()
+ .expect("spawn threads");
+ let logger_thread_handle = thread::Builder::new()
+ .name("solGenIndexLog".to_string())
+ .spawn_scoped(s, || {
+ let mut last_update = Instant::now();
+ loop {
+ if exit_logger.load(Ordering::Relaxed) {
+ break;
+ }
+ let num_processed = num_processed.load(Ordering::Relaxed);
+ if num_processed == num_storages as u64 {
+ info!("generating index: processed all slots");
+ break;
+ }
+ let now = Instant::now();
+ if now - last_update > Duration::from_secs(2) {
+ info!(
+ "generating index: processed {num_processed}/{num_storages} \
+ slots..."
);
- // does not matter that this is not ordered by slot
- unique_pubkeys_by_bin
- .lock()
- .unwrap()
- .push(unique_pubkeys_by_bin_inner);
- });
+ last_update = now;
+ }
+ thread::sleep(Duration::from_millis(500))
+ }
})
- .1;
+ .expect("spawn thread");
+ for thread_handle in thread_handles {
+ let Ok(thread_accum) = thread_handle.join() else {
+ exit_logger.store(true, Ordering::Relaxed);
+ panic!("index generation failed");
+ };
+ total_accum.accumulate(thread_accum);
}
- let unique_pubkeys_by_bin = unique_pubkeys_by_bin.into_inner().unwrap();
-
- let mut timings = GenerateIndexTimings {
- index_flush_us,
- scan_time,
- index_time: index_time.as_us(),
- insertion_time_us: insertion_time_us.load(Ordering::Relaxed),
- total_duplicate_slot_keys: total_duplicate_slot_keys.load(Ordering::Relaxed),
- total_num_unique_duplicate_keys: total_num_unique_duplicate_keys
- .load(Ordering::Relaxed),
- populate_duplicate_keys_us,
- total_including_duplicates: total_including_duplicates.load(Ordering::Relaxed),
- total_slots: slots.len() as u64,
- all_accounts_are_zero_lamports_slots: all_accounts_are_zero_lamports_slots
- .load(Ordering::Relaxed),
- ..GenerateIndexTimings::default()
- };
+ // Make sure to join the logger thread *after* the main threads.
+ // This way, if a main thread errors, we won't spin indefinitely
+ // waiting for the logger thread to finish (it never will).
+ logger_thread_handle.join().expect("join thread");
+ });
+ index_time.stop();
- if pass == 0 {
- #[derive(Debug, Default)]
- struct DuplicatePubkeysVisitedInfo {
- accounts_data_len_from_duplicates: u64,
- num_duplicate_accounts: u64,
- duplicates_lt_hash: Option>,
- }
- impl DuplicatePubkeysVisitedInfo {
- fn reduce(mut self, other: Self) -> Self {
- self.accounts_data_len_from_duplicates +=
- other.accounts_data_len_from_duplicates;
- self.num_duplicate_accounts += other.num_duplicate_accounts;
-
- match (
- self.duplicates_lt_hash.is_some(),
- other.duplicates_lt_hash.is_some(),
- ) {
- (true, true) => {
- // SAFETY: We just checked that both values are Some
- self.duplicates_lt_hash
- .as_mut()
- .unwrap()
- .0
- .mix_in(&other.duplicates_lt_hash.as_ref().unwrap().0);
- }
- (true, false) => {
- // nothing to do; `other` doesn't have a duplicates lt hash
- }
- (false, true) => {
- // `self` doesn't have a duplicates lt hash, so pilfer from `other`
- self.duplicates_lt_hash = other.duplicates_lt_hash;
- }
- (false, false) => {
- // nothing to do; no duplicates lt hash at all
+ {
+ // Update the index stats now.
+ let index_stats = self.accounts_index.bucket_map_holder_stats();
+
+ // stats for inserted entries that previously did *not* exist
+ index_stats.inc_insert_count(total_accum.num_did_not_exist);
+ index_stats.add_mem_count(total_accum.num_did_not_exist as usize);
+
+ // stats for inserted entries that previous did exist *in-mem*
+ index_stats
+ .entries_from_mem
+ .fetch_add(total_accum.num_existed_in_mem, Ordering::Relaxed);
+ index_stats
+ .updates_in_mem
+ .fetch_add(total_accum.num_existed_in_mem, Ordering::Relaxed);
+
+ // stats for inserted entries that previously did exist *on-disk*
+ index_stats.add_mem_count(total_accum.num_existed_on_disk as usize);
+ index_stats
+ .entries_missing
+ .fetch_add(total_accum.num_existed_on_disk, Ordering::Relaxed);
+ index_stats
+ .updates_in_mem
+ .fetch_add(total_accum.num_existed_on_disk, Ordering::Relaxed);
+ }
+
+ if let Some(geyser_notifier) = &self.accounts_update_notifier {
+ // We've finished scanning all the storages, and have thus sent all the
+ // account notifications. Now, let the geyser plugins know we're done.
+ geyser_notifier.notify_end_of_restore_from_snapshot();
+ }
+
+ if verify {
+ info!("Verifying index...");
+ let start = Instant::now();
+ storages.par_iter().for_each(|storage| {
+ let store_id = storage.id();
+ let slot = storage.slot();
+ storage
+ .accounts
+ .scan_accounts_without_data(|offset, account| {
+ let key = account.pubkey();
+ let index_entry = self.accounts_index.get_cloned(key).unwrap();
+ let slot_list = index_entry.slot_list.read().unwrap();
+ let mut count = 0;
+ for (slot2, account_info2) in slot_list.iter() {
+ if *slot2 == slot {
+ count += 1;
+ let ai = AccountInfo::new(
+ StorageLocation::AppendVec(store_id, offset), // will never be cached
+ account.is_zero_lamport(),
+ );
+ assert_eq!(&ai, account_info2);
}
}
- self
+ assert_eq!(1, count);
+ })
+ .expect("must scan accounts storage");
+ });
+ info!("Verifying index... Done in {:?}", start.elapsed());
+ }
+
+ let total_duplicate_slot_keys = AtomicU64::default();
+ let total_num_unique_duplicate_keys = AtomicU64::default();
+
+ // outer vec is accounts index bin (determined by pubkey value)
+ // inner vec is the pubkeys within that bin that are present in > 1 slot
+ let unique_pubkeys_by_bin = Mutex::new(Vec::>::default());
+ // tell accounts index we are done adding the initial accounts at startup
+ let mut m = Measure::start("accounts_index_idle_us");
+ self.accounts_index.set_startup(Startup::Normal);
+ m.stop();
+ let index_flush_us = m.as_us();
+
+ let populate_duplicate_keys_us = measure_us!({
+ // this has to happen before visit_duplicate_pubkeys_during_startup below
+ // get duplicate keys from acct idx. We have to wait until we've finished flushing.
+ self.accounts_index
+ .populate_and_retrieve_duplicate_keys_from_startup(|slot_keys| {
+ total_duplicate_slot_keys.fetch_add(slot_keys.len() as u64, Ordering::Relaxed);
+ let unique_keys =
+ HashSet::::from_iter(slot_keys.iter().map(|(_, key)| *key));
+ for (slot, key) in slot_keys {
+ self.uncleaned_pubkeys.entry(slot).or_default().push(key);
}
- }
+ let unique_pubkeys_by_bin_inner = unique_keys.into_iter().collect::>();
+ total_num_unique_duplicate_keys
+ .fetch_add(unique_pubkeys_by_bin_inner.len() as u64, Ordering::Relaxed);
+ // does not matter that this is not ordered by slot
+ unique_pubkeys_by_bin
+ .lock()
+ .unwrap()
+ .push(unique_pubkeys_by_bin_inner);
+ });
+ })
+ .1;
+ let unique_pubkeys_by_bin = unique_pubkeys_by_bin.into_inner().unwrap();
+
+ let mut timings = GenerateIndexTimings {
+ index_flush_us,
+ scan_time: 0,
+ index_time: index_time.as_us(),
+ insertion_time_us: total_accum.insert_us,
+ total_duplicate_slot_keys: total_duplicate_slot_keys.load(Ordering::Relaxed),
+ total_num_unique_duplicate_keys: total_num_unique_duplicate_keys
+ .load(Ordering::Relaxed),
+ populate_duplicate_keys_us,
+ total_including_duplicates: total_accum.num_accounts,
+ total_slots: num_storages as u64,
+ all_accounts_are_zero_lamports_slots: total_accum.all_accounts_are_zero_lamports_slots,
+ ..GenerateIndexTimings::default()
+ };
- let zero_lamport_pubkeys_to_visit =
- std::mem::take(&mut *zero_lamport_pubkeys.lock().unwrap());
- let (num_zero_lamport_single_refs, visit_zero_lamports_us) =
- measure_us!(self
- .visit_zero_lamport_pubkeys_during_startup(&zero_lamport_pubkeys_to_visit));
- timings.visit_zero_lamports_us = visit_zero_lamports_us;
- timings.num_zero_lamport_single_refs = num_zero_lamport_single_refs;
-
- // subtract data.len() from accounts_data_len for all old accounts that are in the index twice
- let mut accounts_data_len_dedup_timer =
- Measure::start("handle accounts data len duplicates");
- let DuplicatePubkeysVisitedInfo {
- accounts_data_len_from_duplicates,
- num_duplicate_accounts,
- duplicates_lt_hash,
- } = unique_pubkeys_by_bin
- .par_iter()
- .fold(
- DuplicatePubkeysVisitedInfo::default,
- |accum, pubkeys_by_bin| {
- let intermediate = pubkeys_by_bin
- .par_chunks(4096)
- .fold(DuplicatePubkeysVisitedInfo::default, |accum, pubkeys| {
- let (
- accounts_data_len_from_duplicates,
- accounts_duplicates_num,
- duplicates_lt_hash,
- ) = self
- .visit_duplicate_pubkeys_during_startup(pubkeys, &timings);
- let intermediate = DuplicatePubkeysVisitedInfo {
- accounts_data_len_from_duplicates,
- num_duplicate_accounts: accounts_duplicates_num,
- duplicates_lt_hash,
- };
- DuplicatePubkeysVisitedInfo::reduce(accum, intermediate)
- })
- .reduce(
- DuplicatePubkeysVisitedInfo::default,
- DuplicatePubkeysVisitedInfo::reduce,
- );
+ #[derive(Debug, Default)]
+ struct DuplicatePubkeysVisitedInfo {
+ accounts_data_len_from_duplicates: u64,
+ num_duplicate_accounts: u64,
+ duplicates_lt_hash: Box,
+ }
+ impl DuplicatePubkeysVisitedInfo {
+ fn reduce(mut self, other: Self) -> Self {
+ self.accounts_data_len_from_duplicates += other.accounts_data_len_from_duplicates;
+ self.num_duplicate_accounts += other.num_duplicate_accounts;
+ self.duplicates_lt_hash
+ .0
+ .mix_in(&other.duplicates_lt_hash.0);
+ self
+ }
+ }
+
+ let (num_zero_lamport_single_refs, visit_zero_lamports_us) = measure_us!(
+ self.visit_zero_lamport_pubkeys_during_startup(total_accum.zero_lamport_pubkeys)
+ );
+ timings.visit_zero_lamports_us = visit_zero_lamports_us;
+ timings.num_zero_lamport_single_refs = num_zero_lamport_single_refs;
+
+ // subtract data.len() from accounts_data_len for all old accounts that are in the index twice
+ let mut accounts_data_len_dedup_timer =
+ Measure::start("handle accounts data len duplicates");
+ let DuplicatePubkeysVisitedInfo {
+ accounts_data_len_from_duplicates,
+ num_duplicate_accounts,
+ duplicates_lt_hash,
+ } = unique_pubkeys_by_bin
+ .par_iter()
+ .fold(
+ DuplicatePubkeysVisitedInfo::default,
+ |accum, pubkeys_by_bin| {
+ let intermediate = pubkeys_by_bin
+ .par_chunks(4096)
+ .fold(DuplicatePubkeysVisitedInfo::default, |accum, pubkeys| {
+ let (
+ accounts_data_len_from_duplicates,
+ accounts_duplicates_num,
+ duplicates_lt_hash,
+ ) = self.visit_duplicate_pubkeys_during_startup(pubkeys);
+ let intermediate = DuplicatePubkeysVisitedInfo {
+ accounts_data_len_from_duplicates,
+ num_duplicate_accounts: accounts_duplicates_num,
+ duplicates_lt_hash,
+ };
DuplicatePubkeysVisitedInfo::reduce(accum, intermediate)
- },
- )
- .reduce(
- DuplicatePubkeysVisitedInfo::default,
- DuplicatePubkeysVisitedInfo::reduce,
- );
- accounts_data_len_dedup_timer.stop();
- timings.accounts_data_len_dedup_time_us = accounts_data_len_dedup_timer.as_us();
- timings.num_duplicate_accounts = num_duplicate_accounts;
-
- accounts_data_len.fetch_sub(accounts_data_len_from_duplicates, Ordering::Relaxed);
- if let Some(duplicates_lt_hash) = duplicates_lt_hash {
- let old_val = outer_duplicates_lt_hash.replace(duplicates_lt_hash);
- assert!(old_val.is_none());
- }
- info!(
- "accounts data len: {}",
- accounts_data_len.load(Ordering::Relaxed)
- );
+ })
+ .reduce(
+ DuplicatePubkeysVisitedInfo::default,
+ DuplicatePubkeysVisitedInfo::reduce,
+ );
+ DuplicatePubkeysVisitedInfo::reduce(accum, intermediate)
+ },
+ )
+ .reduce(
+ DuplicatePubkeysVisitedInfo::default,
+ DuplicatePubkeysVisitedInfo::reduce,
+ );
+ accounts_data_len_dedup_timer.stop();
+ timings.accounts_data_len_dedup_time_us = accounts_data_len_dedup_timer.as_us();
+ timings.num_duplicate_accounts = num_duplicate_accounts;
- // insert all zero lamport account storage into the dirty stores and add them into the uncleaned roots for clean to pick up
- let all_zero_slots_to_clean = std::mem::take(all_zeros_slots.get_mut().unwrap());
- info!(
- "insert all zero slots to clean at startup {}",
- all_zero_slots_to_clean.len()
- );
- for (slot, storage) in all_zero_slots_to_clean {
- self.dirty_stores.insert(slot, storage);
- }
- }
+ total_accum.lt_hash.mix_out(&duplicates_lt_hash.0);
+ total_accum.accounts_data_len -= accounts_data_len_from_duplicates;
+ info!("accounts data len: {}", total_accum.accounts_data_len);
- if pass == 0 {
- // Need to add these last, otherwise older updates will be cleaned
- for root in &slots {
- self.accounts_index.add_root(*root);
- }
+ // insert all zero lamport account storage into the dirty stores and add them into the uncleaned roots for clean to pick up
+ info!(
+ "insert all zero slots to clean at startup {}",
+ total_accum.all_zeros_slots.len()
+ );
+ for (slot, storage) in total_accum.all_zeros_slots {
+ self.dirty_stores.insert(slot, storage);
+ }
- self.set_storage_count_and_alive_bytes(storage_info, &mut timings);
-
- if self.mark_obsolete_accounts {
- let mut mark_obsolete_accounts_time =
- Measure::start("mark_obsolete_accounts_time");
- // Mark all reclaims at max_slot. This is safe because only the snapshot paths care about
- // this information. Since this account was just restored from the previous snapshot and
- // it is known that it was already obsolete at that time, it must hold true that it will
- // still be obsolete if a newer snapshot is created, since a newer snapshot will always
- // be performed on a slot greater than the current slot
- let slot_marked_obsolete = slots.last().copied().unwrap();
- let obsolete_account_stats = self.mark_obsolete_accounts_at_startup(
- slot_marked_obsolete,
- unique_pubkeys_by_bin,
- );
+ // Need to add these last, otherwise older updates will be cleaned
+ for storage in &storages {
+ self.accounts_index.add_root(storage.slot());
+ }
- mark_obsolete_accounts_time.stop();
- timings.mark_obsolete_accounts_us = mark_obsolete_accounts_time.as_us();
- timings.num_obsolete_accounts_marked =
- obsolete_account_stats.accounts_marked_obsolete;
- timings.num_slots_removed_as_obsolete = obsolete_account_stats.slots_removed;
- }
- }
- total_time.stop();
- timings.total_time_us = total_time.as_us();
- timings.report(self.accounts_index.get_startup_stats());
+ self.set_storage_count_and_alive_bytes(storage_info, &mut timings);
+
+ if self.mark_obsolete_accounts == MarkObsoleteAccounts::Enabled {
+ let mut mark_obsolete_accounts_time = Measure::start("mark_obsolete_accounts_time");
+ // Mark all reclaims at max_slot. This is safe because only the snapshot paths care about
+ // this information. Since this account was just restored from the previous snapshot and
+ // it is known that it was already obsolete at that time, it must hold true that it will
+ // still be obsolete if a newer snapshot is created, since a newer snapshot will always
+ // be performed on a slot greater than the current slot
+ let slot_marked_obsolete = storages.last().unwrap().slot();
+ let obsolete_account_stats =
+ self.mark_obsolete_accounts_at_startup(slot_marked_obsolete, unique_pubkeys_by_bin);
+
+ mark_obsolete_accounts_time.stop();
+ timings.mark_obsolete_accounts_us = mark_obsolete_accounts_time.as_us();
+ timings.num_obsolete_accounts_marked = obsolete_account_stats.accounts_marked_obsolete;
+ timings.num_slots_removed_as_obsolete = obsolete_account_stats.slots_removed;
}
+ total_time.stop();
+ timings.total_time_us = total_time.as_us();
+ timings.report(self.accounts_index.get_startup_stats());
self.accounts_index.log_secondary_indexes();
- // The duplicates lt hash must be Some if populate_duplicates_lt_hash is true.
- // But, if there were no duplicates or obsolete accounts marking removed all
- // duplicates, then we'd never set outer_duplicates_lt_hash to Some! So do one
- // last check here to ensure outer_duplicates_lt_hash is Some if we're supposed
- // to calculate the duplicates lt hash.
- if outer_duplicates_lt_hash.is_none() {
- outer_duplicates_lt_hash = Some(Box::new(DuplicatesLtHash::default()));
- }
+ // Now that the index is generated, get the total capacity of the in-mem maps
+ // across all the bins and set the initial value for the stat.
+ // We do this all at once, at the end, since getting the capacity requries iterating all
+ // the bins and grabbing a read lock, which we try to avoid whenever possible.
+ let index_capacity = self
+ .accounts_index
+ .account_maps
+ .iter()
+ .map(|bin| bin.capacity_for_startup())
+ .sum();
+ self.accounts_index
+ .bucket_map_holder_stats()
+ .capacity_in_mem
+ .store(index_capacity, Ordering::Relaxed);
IndexGenerationInfo {
- accounts_data_len: accounts_data_len.load(Ordering::Relaxed),
- duplicates_lt_hash: outer_duplicates_lt_hash,
+ accounts_data_len: total_accum.accounts_data_len,
+ calculated_accounts_lt_hash: AccountsLtHash(total_accum.lt_hash),
}
}
@@ -7127,30 +6920,21 @@ impl AccountsDb {
let stats: ObsoleteAccountsStats = pubkeys_with_duplicates_by_bin
.par_iter()
.map(|pubkeys_by_bin| {
- let reclaims = self.accounts_index.clean_and_unref_rooted_entries_by_bin(
- pubkeys_by_bin,
- |slot, account_info| {
- // Since the unref makes every account a single ref account, all
- // zero lamport accounts should be tracked as zero_lamport_single_ref
- if account_info.is_zero_lamport() {
- self.zero_lamport_single_ref_found(slot, account_info.offset());
- }
- },
- );
+ let reclaims = self
+ .accounts_index
+ .clean_and_unref_rooted_entries_by_bin(pubkeys_by_bin);
let stats = PurgeStats::default();
- // Convert from a vector to a hashset for use in reclaims
- let pubkeys_removed_from_accounts_index: PubkeysRemovedFromAccountsIndex =
- pubkeys_by_bin.iter().cloned().collect();
-
// Mark all the entries as obsolete, and remove any empty storages
- self.handle_reclaims(
- (!reclaims.is_empty()).then(|| reclaims.iter()),
- None,
- &pubkeys_removed_from_accounts_index,
- HandleReclaims::ProcessDeadSlots(&stats),
- MarkAccountsObsolete::Yes(slot_marked_obsolete),
- );
+ if !reclaims.is_empty() {
+ self.handle_reclaims(
+ reclaims.iter(),
+ None,
+ &HashSet::new(),
+ HandleReclaims::ProcessDeadSlots(&stats),
+ MarkAccountsObsolete::Yes(slot_marked_obsolete),
+ );
+ }
ObsoleteAccountsStats {
accounts_marked_obsolete: reclaims.len() as u64,
slots_removed: stats.total_removed_storage_entries.load(Ordering::Relaxed)
@@ -7169,7 +6953,7 @@ impl AccountsDb {
return;
}
// This number is chosen to keep the initial ram usage sufficiently small
- // The process of generating the index is goverened entirely by how fast the disk index can be populated.
+ // The process of generating the index is governed entirely by how fast the disk index can be populated.
// 10M accounts is sufficiently small that it will never have memory usage. It seems sufficiently large that it will provide sufficient performance.
// Performance is measured by total time to generate the index.
// Just estimating - 150M accounts can easily be held in memory in the accounts index on a 256G machine. 2-300M are also likely 'fine' during startup.
@@ -7190,8 +6974,19 @@ impl AccountsDb {
/// Visit zero lamport pubkeys and populate zero_lamport_single_ref info on
/// storage.
/// Returns the number of zero lamport single ref accounts found.
- fn visit_zero_lamport_pubkeys_during_startup(&self, pubkeys: &HashSet) -> u64 {
- let mut count = 0;
+ fn visit_zero_lamport_pubkeys_during_startup(&self, mut pubkeys: Vec) -> u64 {
+ let mut slot_offsets = HashMap::<_, Vec<_>>::default();
+ // sort the pubkeys first so that in scan, the pubkeys are visited in
+ // index bucket in order. This helps to reduce the page faults and speed
+ // up the scan compared to visiting the pubkeys in random order.
+ let orig_len = pubkeys.len();
+ pubkeys.sort_unstable();
+ pubkeys.dedup();
+ let uniq_len = pubkeys.len();
+ info!(
+ "visit_zero_lamport_pubkeys_during_startup: {orig_len} pubkeys, {uniq_len} after dedup",
+ );
+
self.accounts_index.scan(
pubkeys.iter(),
|_pubkey, slots_refs, _entry| {
@@ -7201,8 +6996,10 @@ impl AccountsDb {
let (slot_alive, account_info) = slot_list.first().unwrap();
assert!(!account_info.is_cached());
if account_info.is_zero_lamport() {
- count += 1;
- self.zero_lamport_single_ref_found(*slot_alive, account_info.offset());
+ slot_offsets
+ .entry(*slot_alive)
+ .or_default()
+ .push(account_info.offset());
}
}
AccountsIndexScanResult::OnlyKeepInMemoryIfDirty
@@ -7211,6 +7008,46 @@ impl AccountsDb {
false,
ScanFilter::All,
);
+
+ let mut count = 0;
+ let mut dead_stores = 0;
+ let mut shrink_stores = 0;
+ let mut non_shrink_stores = 0;
+ for (slot, offsets) in slot_offsets {
+ if let Some(store) = self.storage.get_slot_storage_entry(slot) {
+ count += store.batch_insert_zero_lamport_single_ref_account_offsets(&offsets);
+ if store.num_zero_lamport_single_ref_accounts() == store.count() {
+ // all accounts in this storage can be dead
+ self.dirty_stores.entry(slot).or_insert(store);
+ dead_stores += 1;
+ } else if Self::is_shrinking_productive(&store)
+ && self.is_candidate_for_shrink(&store)
+ {
+ // this store might be eligible for shrinking now
+ if self.shrink_candidate_slots.lock().unwrap().insert(slot) {
+ shrink_stores += 1;
+ }
+ } else {
+ non_shrink_stores += 1;
+ }
+ }
+ }
+ self.shrink_stats
+ .num_zero_lamport_single_ref_accounts_found
+ .fetch_add(count, Ordering::Relaxed);
+
+ self.shrink_stats
+ .num_dead_slots_added_to_clean
+ .fetch_add(dead_stores, Ordering::Relaxed);
+
+ self.shrink_stats
+ .num_slots_with_zero_lamport_accounts_added_to_shrink
+ .fetch_add(shrink_stores, Ordering::Relaxed);
+
+ self.shrink_stats
+ .marking_zero_dead_accounts_in_non_shrinkable_store
+ .fetch_add(non_shrink_stores, Ordering::Relaxed);
+
count
}
@@ -7228,15 +7065,10 @@ impl AccountsDb {
fn visit_duplicate_pubkeys_during_startup(
&self,
pubkeys: &[Pubkey],
- timings: &GenerateIndexTimings,
- ) -> (u64, u64, Option>) {
+ ) -> (u64, u64, Box) {
let mut accounts_data_len_from_duplicates = 0;
let mut num_duplicate_accounts = 0_u64;
- // With obsolete accounts, the duplicates_lt_hash should NOT be created.
- // And skip calculating the lt_hash from accounts too.
- let mut duplicates_lt_hash =
- (!self.mark_obsolete_accounts).then(|| Box::new(DuplicatesLtHash::default()));
- let mut lt_hash_time = Duration::default();
+ let mut duplicates_lt_hash = Box::new(DuplicatesLtHash::default());
self.accounts_index.scan(
pubkeys.iter(),
|pubkey, slots_refs, _entry| {
@@ -7265,14 +7097,9 @@ impl AccountsDb {
accounts_data_len_from_duplicates += data_len;
}
num_duplicate_accounts += 1;
- if let Some(duplicates_lt_hash) = duplicates_lt_hash.as_mut() {
- let (_, duration) = meas_dur!({
- let account_lt_hash =
- Self::lt_hash_account(&loaded_account, pubkey);
- duplicates_lt_hash.0.mix_in(&account_lt_hash.0);
- });
- lt_hash_time += duration;
- }
+ let account_lt_hash =
+ Self::lt_hash_account(&loaded_account, pubkey);
+ duplicates_lt_hash.0.mix_in(&account_lt_hash.0);
});
});
}
@@ -7283,9 +7110,6 @@ impl AccountsDb {
false,
ScanFilter::All,
);
- timings
- .par_duplicates_lt_hash_us
- .fetch_add(lt_hash_time.as_micros() as u64, Ordering::Relaxed);
(
accounts_data_len_from_duplicates as u64,
num_duplicate_accounts,
@@ -7312,16 +7136,15 @@ impl AccountsDb {
store.count(),
);
{
- let mut count_and_status = store.count_and_status.lock_write();
- assert_eq!(count_and_status.0, 0);
- count_and_status.0 = entry.count;
+ let prev_count = store.count.swap(entry.count, Ordering::Release);
+ assert_eq!(prev_count, 0);
}
store
.alive_bytes
.store(entry.stored_size, Ordering::Release);
} else {
trace!("id: {id} clearing count");
- store.count_and_status.lock_write().0 = 0;
+ store.count.store(0, Ordering::Release);
}
}
storage_size_storages_time.stop();
@@ -7361,10 +7184,10 @@ impl AccountsDb {
for slot in &slots {
let entry = self.storage.get_slot_storage_entry(*slot).unwrap();
info!(
- " slot: {} id: {} count_and_status: {:?} len: {} capacity: {}",
+ " slot: {} id: {} count: {} len: {} capacity: {}",
slot,
entry.id(),
- entry.count_and_status.read(),
+ entry.count(),
entry.accounts.len(),
entry.accounts.capacity(),
);
@@ -7375,15 +7198,13 @@ impl AccountsDb {
#[derive(Debug, Copy, Clone)]
enum HandleReclaims<'a> {
ProcessDeadSlots(&'a PurgeStats),
- DoNotProcessDeadSlots,
}
/// Specify whether obsolete accounts should be marked or not during reclaims
/// They should only be marked if they are also getting unreffed in the index
-/// Temporariliy allow dead code until the feature is implemented
-#[derive(Debug, Copy, Clone)]
+/// Temporarily allow dead code until the feature is implemented
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
enum MarkAccountsObsolete {
- #[allow(dead_code)]
Yes(Slot),
No,
}
@@ -7412,12 +7233,63 @@ impl AccountStorageEntry {
// These functions/fields are only usable from a dev context (i.e. tests and benches)
#[cfg(feature = "dev-context-only-utils")]
impl AccountsDb {
+ pub fn default_for_tests() -> Self {
+ Self::new_single_for_tests()
+ }
+
+ pub fn new_single_for_tests() -> Self {
+ AccountsDb::new_for_tests(Vec::new())
+ }
+
+ pub fn new_single_for_tests_with_provider_and_config(
+ file_provider: AccountsFileProvider,
+ accounts_db_config: AccountsDbConfig,
+ ) -> Self {
+ AccountsDb::new_for_tests_with_provider_and_config(
+ Vec::new(),
+ file_provider,
+ accounts_db_config,
+ )
+ }
+
+ pub fn new_for_tests(paths: Vec) -> Self {
+ Self::new_for_tests_with_provider_and_config(
+ paths,
+ AccountsFileProvider::default(),
+ ACCOUNTS_DB_CONFIG_FOR_TESTING,
+ )
+ }
+
+ fn new_for_tests_with_provider_and_config(
+ paths: Vec,
+ accounts_file_provider: AccountsFileProvider,
+ accounts_db_config: AccountsDbConfig,
+ ) -> Self {
+ let mut db = AccountsDb::new_with_config(paths, accounts_db_config, None, Arc::default());
+ db.accounts_file_provider = accounts_file_provider;
+ db
+ }
+
/// Return the number of slots marked with uncleaned pubkeys.
- /// This is useful for testing clean aglorithms.
+ /// This is useful for testing clean algorithms.
pub fn get_len_of_slots_with_uncleaned_pubkeys(&self) -> usize {
self.uncleaned_pubkeys.len()
}
+ #[cfg(test)]
+ pub fn storage_access(&self) -> StorageAccess {
+ self.storage_access
+ }
+
+ /// Call clean_accounts() with the common parameters that tests/benches use.
+ pub fn clean_accounts_for_tests(&self) {
+ self.clean_accounts(None, false, &EpochSchedule::default())
+ }
+
+ pub fn flush_accounts_cache_slot_for_tests(&self, slot: Slot) {
+ self.flush_slot_cache(slot);
+ }
+
/// useful to adapt tests written prior to introduction of the write cache
/// to use the write cache
pub fn add_root_and_flush_write_cache(&self, slot: Slot) {
@@ -7469,10 +7341,28 @@ impl AccountsDb {
}
}
+ /// Iterate over all accounts from all `storages` and call `callback` with each account.
+ ///
+ /// `callback` parameters:
+ /// * Offset: the offset within the file of this account
+ /// * StoredAccountInfo: the account itself, with account data
+ pub fn scan_accounts_from_storages(
+ storages: &[Arc],
+ mut callback: impl for<'local> FnMut(Offset, StoredAccountInfo<'local>),
+ ) {
+ let mut reader = append_vec::new_scan_accounts_reader();
+ for storage in storages {
+ storage
+ .accounts
+ .scan_accounts(&mut reader, &mut callback)
+ .expect("must scan accounts storage");
+ }
+ }
+
/// callers used to call store_uncached. But, this is not allowed anymore.
- pub fn store_for_tests(&self, slot: Slot, accounts: &[(&Pubkey, &AccountSharedData)]) {
- self.store(
- (slot, accounts),
+ pub fn store_for_tests<'a>(&self, accounts: impl StorableAccounts<'a>) {
+ self.store_accounts_unfrozen(
+ accounts,
None,
UpdateIndexThreadSelection::PoolWithThreshold,
);
@@ -7486,13 +7376,12 @@ impl AccountsDb {
0,
AccountSharedData::default().owner(),
);
- self.store_for_tests(slot, &[(&pubkeys[idx], &account)]);
+ self.store_for_tests((slot, [(&pubkeys[idx], &account)].as_slice()));
}
}
pub fn check_storage(&self, slot: Slot, alive_count: usize, total_count: usize) {
let store = self.storage.get_slot_storage_entry(slot).unwrap();
- assert_eq!(store.status(), AccountStorageStatus::Available);
assert_eq!(store.count(), alive_count);
assert_eq!(store.accounts_count(), total_count);
}
@@ -7512,7 +7401,7 @@ impl AccountsDb {
AccountSharedData::new((t + 1) as u64, space, AccountSharedData::default().owner());
pubkeys.push(pubkey);
assert!(self.load_without_fixed_root(&ancestors, &pubkey).is_none());
- self.store_for_tests(slot, &[(&pubkey, &account)]);
+ self.store_for_tests((slot, [(&pubkey, &account)].as_slice()));
}
for t in 0..num_vote {
let pubkey = solana_pubkey::new_rand();
@@ -7521,7 +7410,7 @@ impl AccountsDb {
pubkeys.push(pubkey);
let ancestors = vec![(slot, 0)].into_iter().collect();
assert!(self.load_without_fixed_root(&ancestors, &pubkey).is_none());
- self.store_for_tests(slot, &[(&pubkey, &account)]);
+ self.store_for_tests((slot, [(&pubkey, &account)].as_slice()));
}
}
@@ -7538,8 +7427,20 @@ impl AccountsDb {
sizes
}
- pub fn ref_count_for_pubkey(&self, pubkey: &Pubkey) -> RefCount {
- self.accounts_index.ref_count_from_storage(pubkey)
+ // With obsolete accounts marked, obsolete references are marked in the storage
+ // and no longer need to be referenced. This leads to a static reference count
+ // of 1. As referencing checking is common in tests, this test wrapper abstracts the behavior
+ pub fn assert_ref_count(&self, pubkey: &Pubkey, expected_ref_count: RefCount) {
+ let expected_ref_count = match self.mark_obsolete_accounts {
+ MarkObsoleteAccounts::Disabled => expected_ref_count,
+ // When obsolete accounts are marked, the ref count is always 1 or 0
+ MarkObsoleteAccounts::Enabled => expected_ref_count.min(1),
+ };
+
+ assert_eq!(
+ expected_ref_count,
+ self.accounts_index.ref_count_from_storage(pubkey)
+ );
}
pub fn alive_account_count_in_slot(&self, slot: Slot) -> usize {
@@ -7583,39 +7484,3 @@ impl AccountsDb {
&self.uncleaned_pubkeys
}
}
-
-/// A set of utility functions used for testing and benchmarking
-#[cfg(feature = "dev-context-only-utils")]
-pub mod test_utils {
- use {super::*, crate::accounts::Accounts};
-
- pub fn create_test_accounts(
- accounts: &Accounts,
- pubkeys: &mut Vec,
- num: usize,
- slot: Slot,
- ) {
- let data_size = 0;
-
- for t in 0..num {
- let pubkey = solana_pubkey::new_rand();
- let account = AccountSharedData::new(
- (t + 1) as u64,
- data_size,
- AccountSharedData::default().owner(),
- );
- accounts.store_cached((slot, &[(&pubkey, &account)][..]), None);
- pubkeys.push(pubkey);
- }
- }
-
- // Only used by bench, not safe to call otherwise accounts can conflict with the
- // accounts cache!
- pub fn update_accounts_bench(accounts: &Accounts, pubkeys: &[Pubkey], slot: u64) {
- for pubkey in pubkeys {
- let amount = thread_rng().gen_range(0..10);
- let account = AccountSharedData::new(amount, 0, AccountSharedData::default().owner());
- accounts.store_cached((slot, &[(pubkey, &account)][..]), None);
- }
- }
-}
diff --git a/accounts-db/src/accounts_db/geyser_plugin_utils.rs b/accounts-db/src/accounts_db/geyser_plugin_utils.rs
index 61f076fb7359bb..11477b5d9939d7 100644
--- a/accounts-db/src/accounts_db/geyser_plugin_utils.rs
+++ b/accounts-db/src/accounts_db/geyser_plugin_utils.rs
@@ -1,82 +1,9 @@
use {
- crate::{
- accounts_db::{AccountStorageEntry, AccountsDb},
- accounts_update_notifier_interface::AccountsUpdateNotifierInterface,
- },
- solana_account::AccountSharedData,
- solana_clock::Slot,
- solana_measure::meas_dur,
- solana_metrics::*,
- solana_pubkey::Pubkey,
- solana_transaction::sanitized::SanitizedTransaction,
- std::{
- cmp::Reverse,
- ops::AddAssign,
- time::{Duration, Instant},
- },
+ crate::accounts_db::AccountsDb, solana_account::AccountSharedData, solana_clock::Slot,
+ solana_pubkey::Pubkey, solana_transaction::sanitized::SanitizedTransaction,
};
-#[derive(Default)]
-pub struct GeyserPluginNotifyAtSnapshotRestoreStats {
- pub notified_accounts: usize,
- pub elapsed_notifying: Duration,
- pub total_pure_notify: Duration,
-}
-
-impl GeyserPluginNotifyAtSnapshotRestoreStats {
- pub fn report(&self) {
- datapoint_info!(
- "accountsdb_plugin_notify_account_restore_from_snapshot_summary",
- ("notified_accounts", self.notified_accounts, i64),
- (
- "elapsed_notifying_us",
- self.elapsed_notifying.as_micros(),
- i64
- ),
- (
- "total_pure_notify_us",
- self.total_pure_notify.as_micros(),
- i64
- ),
- );
- }
-}
-
-impl AddAssign for GeyserPluginNotifyAtSnapshotRestoreStats {
- fn add_assign(&mut self, other: Self) {
- self.notified_accounts += other.notified_accounts;
- self.elapsed_notifying += other.elapsed_notifying;
- self.total_pure_notify += other.total_pure_notify;
- }
-}
-
impl AccountsDb {
- /// Notify the plugins of account data when AccountsDb is restored from a snapshot.
- ///
- /// Since accounts may have multiple versions in different slots, plugins must handle
- /// deduplication by inspected the slot and write version of each account notification.
- pub fn notify_account_restore_from_snapshot(&self) {
- let Some(accounts_update_notifier) = &self.accounts_update_notifier else {
- return;
- };
-
- let mut notify_stats = GeyserPluginNotifyAtSnapshotRestoreStats::default();
- if accounts_update_notifier.snapshot_notifications_enabled() {
- let mut slots = self.storage.all_slots();
- slots.sort_unstable_by_key(|&slot| Reverse(slot));
- slots
- .into_iter()
- .filter_map(|slot| self.storage.get_slot_storage_entry(slot))
- .map(|storage| {
- Self::notify_accounts_in_storage(accounts_update_notifier.as_ref(), &storage)
- })
- .for_each(|stats| notify_stats += stats);
- }
-
- accounts_update_notifier.notify_end_of_restore_from_snapshot();
- notify_stats.report();
- }
-
pub fn notify_account_at_accounts_update(
&self,
slot: Slot,
@@ -95,45 +22,17 @@ impl AccountsDb {
);
}
}
-
- fn notify_accounts_in_storage(
- notifier: &dyn AccountsUpdateNotifierInterface,
- storage: &AccountStorageEntry,
- ) -> GeyserPluginNotifyAtSnapshotRestoreStats {
- let mut pure_notify_time = Duration::ZERO;
- let mut i = 0;
- let notifying_start = Instant::now();
- storage
- .accounts
- .scan_accounts_for_geyser(|account| {
- i += 1;
- // later entries in the same slot are more recent and override earlier accounts for the same pubkey
- // We can pass an incrementing number here for write_version in the future, if the storage does not have a write_version.
- // As long as all accounts for this slot are in 1 append vec that can be iterated oldest to newest.
- let (_, notify_dur) = meas_dur!(notifier.notify_account_restore_from_snapshot(
- storage.slot(),
- i as u64,
- &account
- ));
- pure_notify_time += notify_dur;
- })
- .expect("must scan accounts storage");
- let notifying_time = notifying_start.elapsed();
-
- GeyserPluginNotifyAtSnapshotRestoreStats {
- notified_accounts: i,
- elapsed_notifying: notifying_time,
- total_pure_notify: pure_notify_time,
- }
- }
}
#[cfg(test)]
pub mod tests {
use {
super::*,
- crate::accounts_update_notifier_interface::{
- AccountForGeyser, AccountsUpdateNotifier, AccountsUpdateNotifierInterface,
+ crate::{
+ accounts_db::{AccountsDbConfig, MarkObsoleteAccounts, ACCOUNTS_DB_CONFIG_FOR_TESTING},
+ accounts_update_notifier_interface::{
+ AccountForGeyser, AccountsUpdateNotifier, AccountsUpdateNotifierInterface,
+ },
},
dashmap::DashMap,
solana_account::ReadableAccount as _,
@@ -141,6 +40,7 @@ pub mod tests {
atomic::{AtomicBool, Ordering},
Arc,
},
+ test_case::test_case,
};
impl AccountsDb {
@@ -195,9 +95,18 @@ pub mod tests {
}
}
- #[test]
- fn test_notify_account_restore_from_snapshot() {
- let mut accounts = AccountsDb::new_single_for_tests();
+ #[test_case(MarkObsoleteAccounts::Enabled)]
+ #[test_case(MarkObsoleteAccounts::Disabled)]
+ fn test_notify_account_restore_from_snapshot(mark_obsolete_accounts: MarkObsoleteAccounts) {
+ let mut accounts_db = AccountsDb::new_with_config(
+ Vec::new(),
+ AccountsDbConfig {
+ mark_obsolete_accounts,
+ ..ACCOUNTS_DB_CONFIG_FOR_TESTING
+ },
+ None,
+ Arc::default(),
+ );
let key1 = Pubkey::new_unique();
let key2 = Pubkey::new_unique();
let account = AccountSharedData::new(1, 0, &Pubkey::default());
@@ -206,31 +115,48 @@ pub mod tests {
// Need to add root and flush write cache for each slot to ensure accounts are written
// to correct slots. Cache flush can skip writes if accounts have already been written to
// a newer slot
- accounts.store_for_tests(0, &[(&key1, &account)]);
- accounts.add_root_and_flush_write_cache(0);
- accounts.store_for_tests(1, &[(&key1, &account)]);
- accounts.add_root_and_flush_write_cache(1);
+ let slot0 = 0;
+ let storage0 = accounts_db.create_and_insert_store(slot0, /*size*/ 4_096, "");
+ storage0
+ .accounts
+ .write_accounts(&(slot0, [(&key1, &account)].as_slice()), /*skip*/ 0);
+
+ let slot1 = 1;
+ let storage1 = accounts_db.create_and_insert_store(slot1, /*size*/ 4_096, "");
+ storage1
+ .accounts
+ .write_accounts(&(slot1, [(&key1, &account)].as_slice()), /*skip*/ 0);
// Account with key2 is updated in a single slot, should get notified once
- accounts.store_for_tests(2, &[(&key2, &account)]);
- accounts.add_root_and_flush_write_cache(2);
+ let slot2 = 2;
+ let storage2 = accounts_db.create_and_insert_store(slot2, /*size*/ 4_096, "");
+ storage2
+ .accounts
+ .write_accounts(&(slot2, [(&key2, &account)].as_slice()), /*skip*/ 0);
// Do the notification
let notifier = GeyserTestPlugin::default();
let notifier = Arc::new(notifier);
- accounts.set_geyser_plugin_notifier(Some(notifier.clone()));
- accounts.notify_account_restore_from_snapshot();
+ accounts_db.set_geyser_plugin_notifier(Some(notifier.clone()));
+ accounts_db.generate_index(None, false);
// Ensure key1 was notified twice in different slots
{
let notified_key1 = notifier.accounts_notified.get(&key1).unwrap();
assert_eq!(notified_key1.len(), 2);
- let (slot, write_version, _account) = ¬ified_key1[0];
- assert_eq!(*slot, 1);
- assert_eq!(*write_version, 1);
- let (slot, write_version, _account) = ¬ified_key1[1];
- assert_eq!(*slot, 0);
- assert_eq!(*write_version, 1);
+
+ // Since index generation goes through storages in parallel, there's not a
+ // deterministic order for which slots will notify first.
+ // So, we sort the accounts_notified values to ensure we can assert correctly.
+ let mut notified_key1_values = notified_key1.value().clone();
+ notified_key1_values.sort_unstable_by_key(|k| k.0);
+
+ let (slot, write_version, _account) = ¬ified_key1_values[0];
+ assert_eq!(*slot, slot0);
+ assert_eq!(*write_version, 0);
+ let (slot, write_version, _account) = ¬ified_key1_values[1];
+ assert_eq!(*slot, slot1);
+ assert_eq!(*write_version, 0);
}
// Ensure key2 was notified once
@@ -238,8 +164,8 @@ pub mod tests {
let notified_key2 = notifier.accounts_notified.get(&key2).unwrap();
assert_eq!(notified_key2.len(), 1);
let (slot, write_version, _account) = ¬ified_key2[0];
- assert_eq!(*slot, 2);
- assert_eq!(*write_version, 1);
+ assert_eq!(*slot, slot2);
+ assert_eq!(*write_version, 0);
}
// Ensure we were notified that startup is done
@@ -263,24 +189,24 @@ pub mod tests {
let account1 =
AccountSharedData::new(account1_lamports1, 1, AccountSharedData::default().owner());
let slot0 = 0;
- accounts.store_cached((slot0, &[(&key1, &account1)][..]));
+ accounts.store_for_tests((slot0, &[(&key1, &account1)][..]));
let key2 = solana_pubkey::new_rand();
let account2_lamports: u64 = 200;
let account2 =
AccountSharedData::new(account2_lamports, 1, AccountSharedData::default().owner());
- accounts.store_cached((slot0, &[(&key2, &account2)][..]));
+ accounts.store_for_tests((slot0, &[(&key2, &account2)][..]));
let account1_lamports2 = 2;
let slot1 = 1;
let account1 = AccountSharedData::new(account1_lamports2, 1, account1.owner());
- accounts.store_cached((slot1, &[(&key1, &account1)][..]));
+ accounts.store_for_tests((slot1, &[(&key1, &account1)][..]));
let key3 = solana_pubkey::new_rand();
let account3_lamports: u64 = 300;
let account3 =
AccountSharedData::new(account3_lamports, 1, AccountSharedData::default().owner());
- accounts.store_cached((slot1, &[(&key3, &account3)][..]));
+ accounts.store_for_tests((slot1, &[(&key3, &account3)][..]));
assert_eq!(notifier.accounts_notified.get(&key1).unwrap().len(), 2);
assert_eq!(
diff --git a/accounts-db/src/accounts_db/stats.rs b/accounts-db/src/accounts_db/stats.rs
index 6a5f5409696e59..a59c1497a309ea 100644
--- a/accounts-db/src/accounts_db/stats.rs
+++ b/accounts-db/src/accounts_db/stats.rs
@@ -10,12 +10,7 @@ use {
#[derive(Debug, Default)]
pub struct AccountsStats {
- pub delta_hash_scan_time_total_us: AtomicU64,
- pub delta_hash_accumulate_time_total_us: AtomicU64,
- pub delta_hash_num: AtomicU64,
-
pub last_store_report: AtomicInterval,
- pub store_hash_accounts: AtomicU64,
pub store_accounts: AtomicU64,
pub store_update_index: AtomicU64,
pub store_handle_reclaims: AtomicU64,
@@ -23,13 +18,16 @@ pub struct AccountsStats {
pub stakes_cache_check_and_store_us: AtomicU64,
pub store_num_accounts: AtomicU64,
pub store_total_data: AtomicU64,
+ pub num_reclaims: AtomicU64,
pub create_store_count: AtomicU64,
- pub store_get_slot_store: AtomicU64,
- pub store_find_existing: AtomicU64,
pub dropped_stores: AtomicU64,
pub handle_dead_keys_us: AtomicU64,
pub purge_exact_us: AtomicU64,
pub purge_exact_count: AtomicU64,
+ pub num_obsolete_slots_removed: AtomicUsize,
+ pub num_obsolete_bytes_removed: AtomicU64,
+ pub add_zero_lamport_accounts_us: AtomicU64,
+ pub num_zero_lamport_accounts_added: AtomicU64,
}
#[derive(Debug, Default)]
diff --git a/accounts-db/src/accounts_db/tests.rs b/accounts-db/src/accounts_db/tests.rs
index f2533e62c29926..abb79aa3b9cf2c 100644
--- a/accounts-db/src/accounts_db/tests.rs
+++ b/accounts-db/src/accounts_db/tests.rs
@@ -2,10 +2,8 @@
use {
super::*,
crate::{
- account_info::StoredSize,
accounts_file::AccountsFileProvider,
accounts_index::{tests::*, AccountSecondaryIndexesIncludeExclude},
- ancient_append_vecs,
append_vec::{
aligned_stored_size, test_utils::TempFile, AccountMeta, AppendVec, StoredAccountMeta,
StoredMeta,
@@ -27,7 +25,7 @@ use {
sync::{atomic::AtomicBool, RwLock},
thread::{self, Builder, JoinHandle},
},
- test_case::test_case,
+ test_case::{test_case, test_matrix},
};
fn linear_ancestors(end_slot: u64) -> Ancestors {
@@ -118,28 +116,36 @@ impl AccountStorageEntry {
/// For test that should panic, use the following syntax.
/// define_accounts_db_test!(TEST_NAME, panic = "PANIC_MSG", |accounts_db| { TEST_BODY });
macro_rules! define_accounts_db_test {
- (@testfn $name:ident, $accounts_file_provider: ident, |$accounts_db:ident| $inner: tt) => {
- fn run_test($accounts_db: AccountsDb) {
- $inner
- }
- let accounts_db =
- AccountsDb::new_single_for_tests_with_provider($accounts_file_provider);
- run_test(accounts_db);
-
+ (@testfn $name:ident, $accounts_file_provider: ident, $mark_obsolete_accounts: ident, |$accounts_db:ident| $inner: tt) => {
+ fn run_test($accounts_db: AccountsDb) {
+ $inner
+ }
+ let accounts_db = AccountsDb::new_single_for_tests_with_provider_and_config(
+ $accounts_file_provider,
+ AccountsDbConfig {
+ mark_obsolete_accounts: $mark_obsolete_accounts,
+ ..ACCOUNTS_DB_CONFIG_FOR_TESTING
+ },
+ );
+ run_test(accounts_db);
};
($name:ident, |$accounts_db:ident| $inner: tt) => {
- #[test_case(AccountsFileProvider::AppendVec; "append_vec")]
- #[test_case(AccountsFileProvider::HotStorage; "hot_storage")]
- fn $name(accounts_file_provider: AccountsFileProvider) {
- define_accounts_db_test!(@testfn $name, accounts_file_provider, |$accounts_db| $inner);
+ #[test_matrix(
+ [AccountsFileProvider::AppendVec, AccountsFileProvider::HotStorage],
+ [MarkObsoleteAccounts::Enabled, MarkObsoleteAccounts::Disabled]
+ )]
+ fn $name(accounts_file_provider: AccountsFileProvider, mark_obsolete_accounts: MarkObsoleteAccounts) {
+ define_accounts_db_test!(@testfn $name, accounts_file_provider, mark_obsolete_accounts, |$accounts_db| $inner);
}
};
($name:ident, panic = $panic_message:literal, |$accounts_db:ident| $inner: tt) => {
- #[test_case(AccountsFileProvider::AppendVec; "append_vec")]
- #[test_case(AccountsFileProvider::HotStorage; "hot_storage")]
+ #[test_matrix(
+ [AccountsFileProvider::AppendVec, AccountsFileProvider::HotStorage],
+ [MarkObsoleteAccounts::Enabled, MarkObsoleteAccounts::Disabled]
+ )]
#[should_panic(expected = $panic_message)]
- fn $name(accounts_file_provider: AccountsFileProvider) {
- define_accounts_db_test!(@testfn $name, accounts_file_provider, |$accounts_db| $inner);
+ fn $name(accounts_file_provider: AccountsFileProvider, mark_obsolete_accounts: MarkObsoleteAccounts) {
+ define_accounts_db_test!(@testfn $name, accounts_file_provider, mark_obsolete_accounts, |$accounts_db| $inner);
}
};
}
@@ -175,7 +181,16 @@ fn run_generate_index_duplicates_within_slot_test(db: AccountsDb, reverse: bool)
append_vec.accounts.write_accounts(&storable_accounts, 0);
assert!(!db.accounts_index.contains(&pubkey));
- db.generate_index(None, false);
+ let storage_info = StorageSizeAndCountMap::default();
+ let storage = db.get_storage_for_slot(slot0).unwrap();
+ let mut reader = append_vec::new_scan_accounts_reader();
+ db.generate_index_for_slot(
+ &mut reader,
+ &storage,
+ storage.slot(),
+ storage.id(),
+ &storage_info,
+ );
}
define_accounts_db_test!(
@@ -358,7 +373,7 @@ pub(crate) fn append_single_account_with_default_hash(
account,
&AccountSecondaryIndexes::default(),
account_info,
- &mut Vec::default(),
+ &mut ReclaimsSlotList::new(),
UpsertReclaim::IgnoreReclaims,
);
}
@@ -386,6 +401,7 @@ fn sample_storage_with_entries_id_fill_percentage(
mark_alive: bool,
account_data_size: Option,
fill_percentage: u64,
+ storage_access: StorageAccess,
) -> Arc {
let (_temp_dirs, paths) = get_temp_accounts_paths(1).unwrap();
let file_size = account_data_size.unwrap_or(123) * 100 / fill_percentage;
@@ -396,11 +412,13 @@ fn sample_storage_with_entries_id_fill_percentage(
id,
size_aligned as u64,
AccountsFileProvider::AppendVec,
+ storage_access,
);
let av = AccountsFile::AppendVec(AppendVec::new(
&tf.path,
true,
(1024 * 1024).max(size_aligned),
+ storage_access,
));
data.accounts = av;
@@ -416,6 +434,7 @@ fn sample_storage_with_entries_id(
id: AccountsFileId,
mark_alive: bool,
account_data_size: Option,
+ storage_access: StorageAccess,
) -> Arc {
sample_storage_with_entries_id_fill_percentage(
tf,
@@ -425,6 +444,7 @@ fn sample_storage_with_entries_id(
mark_alive,
account_data_size,
100,
+ storage_access,
)
}
@@ -432,7 +452,7 @@ define_accounts_db_test!(test_accountsdb_add_root, |db| {
let key = Pubkey::default();
let account0 = AccountSharedData::new(1, 0, &key);
- db.store_for_tests(0, &[(&key, &account0)]);
+ db.store_for_tests((0, [(&key, &account0)].as_slice()));
db.add_root(0);
let ancestors = vec![(1, 1)].into_iter().collect();
assert_eq!(
@@ -445,10 +465,10 @@ define_accounts_db_test!(test_accountsdb_latest_ancestor, |db| {
let key = Pubkey::default();
let account0 = AccountSharedData::new(1, 0, &key);
- db.store_for_tests(0, &[(&key, &account0)]);
+ db.store_for_tests((0, [(&key, &account0)].as_slice()));
let account1 = AccountSharedData::new(0, 0, &key);
- db.store_for_tests(1, &[(&key, &account1)]);
+ db.store_for_tests((1, [(&key, &account1)].as_slice()));
let ancestors = vec![(1, 1)].into_iter().collect();
assert_eq!(
@@ -463,14 +483,17 @@ define_accounts_db_test!(test_accountsdb_latest_ancestor, |db| {
);
let mut accounts = Vec::new();
- db.unchecked_scan_accounts(
- "",
+ db.scan_accounts(
&ancestors,
- |_, account, _| {
- accounts.push(account.take_account());
+ 0,
+ |scan_result| {
+ if let Some((_, account, _)) = scan_result {
+ accounts.push(account);
+ }
},
&ScanConfig::default(),
- );
+ )
+ .expect("should scan accounts");
assert_eq!(accounts, vec![account1]);
});
@@ -478,10 +501,10 @@ define_accounts_db_test!(test_accountsdb_latest_ancestor_with_root, |db| {
let key = Pubkey::default();
let account0 = AccountSharedData::new(1, 0, &key);
- db.store_for_tests(0, &[(&key, &account0)]);
+ db.store_for_tests((0, [(&key, &account0)].as_slice()));
let account1 = AccountSharedData::new(0, 0, &key);
- db.store_for_tests(1, &[(&key, &account1)]);
+ db.store_for_tests((1, [(&key, &account1)].as_slice()));
db.add_root(0);
let ancestors = vec![(1, 1)].into_iter().collect();
@@ -502,7 +525,7 @@ define_accounts_db_test!(test_accountsdb_root_one_slot, |db| {
let account0 = AccountSharedData::new(1, 0, &key);
// store value 1 in the "root", i.e. db zero
- db.store_for_tests(0, &[(&key, &account0)]);
+ db.store_for_tests((0, [(&key, &account0)].as_slice()));
// now we have:
//
@@ -515,7 +538,7 @@ define_accounts_db_test!(test_accountsdb_root_one_slot, |db| {
// store value 0 in one child
let account1 = AccountSharedData::new(0, 0, &key);
- db.store_for_tests(1, &[(&key, &account1)]);
+ db.store_for_tests((1, [(&key, &account1)].as_slice()));
// masking accounts is done at the Accounts level, at accountsDB we see
// original account (but could also accept "None", which is implemented
@@ -593,14 +616,21 @@ define_accounts_db_test!(test_accountsdb_count_stores, |db| {
let pubkey = solana_pubkey::new_rand();
let account = AccountSharedData::new(1, DEFAULT_FILE_SIZE as usize / 3, &pubkey);
- db.store_for_tests(1, &[(&pubkey, &account)]);
- db.store_for_tests(1, &[(&pubkeys[0], &account)]);
+ db.store_for_tests((1, [(&pubkey, &account)].as_slice()));
+ db.store_for_tests((1, [(&pubkeys[0], &account)].as_slice()));
// adding root doesn't change anything
db.add_root_and_flush_write_cache(1);
{
let slot_0_store = &db.storage.get_slot_storage_entry(0).unwrap();
let slot_1_store = &db.storage.get_slot_storage_entry(1).unwrap();
- assert_eq!(slot_0_store.count(), 2);
+
+ // With obsolete accounts enabled, flush_write_cache will clean pubkeys in slot0
+ // when flushing slot1
+ if db.mark_obsolete_accounts == MarkObsoleteAccounts::Enabled {
+ assert_eq!(slot_0_store.count(), 1);
+ } else {
+ assert_eq!(slot_0_store.count(), 2);
+ }
assert_eq!(slot_1_store.count(), 2);
assert_eq!(slot_0_store.accounts_count(), 2);
assert_eq!(slot_1_store.accounts_count(), 2);
@@ -609,7 +639,7 @@ define_accounts_db_test!(test_accountsdb_count_stores, |db| {
// overwrite old rooted account version; only the r_slot_0_stores.count() should be
// decremented
// slot 2 is not a root and should be ignored by clean
- db.store_for_tests(2, &[(&pubkeys[0], &account)]);
+ db.store_for_tests((2, [(&pubkeys[0], &account)].as_slice()));
db.clean_accounts_for_tests();
{
let slot_0_store = &db.storage.get_slot_storage_entry(0).unwrap();
@@ -626,11 +656,11 @@ define_accounts_db_test!(test_accounts_unsquashed, |db0| {
// 1 token in the "root", i.e. db zero
let account0 = AccountSharedData::new(1, 0, &key);
- db0.store_for_tests(0, &[(&key, &account0)]);
+ db0.store_for_tests((0, [(&key, &account0)].as_slice()));
// 0 lamports in the child
let account1 = AccountSharedData::new(0, 0, &key);
- db0.store_for_tests(1, &[(&key, &account1)]);
+ db0.store_for_tests((1, [(&key, &account1)].as_slice()));
// masking accounts is done at the Accounts level, at accountsDB we see
// original account
@@ -646,6 +676,81 @@ define_accounts_db_test!(test_accounts_unsquashed, |db0| {
);
});
+/// Test to verify that reclaiming old storages during flush works correctly.
+/// Creates multiple storages with accounts, flushes them, and then creates a new storage
+/// that invalidates some of the old accounts. The test checks that one of the old storages
+/// is reclaimed as the storage is fully invalidated
+#[test]
+fn test_flush_slots_with_reclaim_old_slots() {
+ let accounts = AccountsDb::new_single_for_tests();
+ let mut pubkeys = vec![];
+
+ // Create and flush 5 slots with 5 accounts each
+ for slot in 0..5 {
+ let mut slot_pubkeys = vec![];
+ for _ in 0..5 {
+ let pubkey = solana_pubkey::new_rand();
+ let account = AccountSharedData::new(slot + 1, 0, &pubkey);
+ accounts.store_for_tests((slot, [(&pubkey, &account)].as_slice()));
+ slot_pubkeys.push(pubkey);
+ }
+ pubkeys.push(slot_pubkeys);
+ accounts.add_root_and_flush_write_cache(slot);
+ }
+
+ // Create another slot which invalidates 5 accounts from the first slot,
+ // 4 accounts from the second slot, etc.
+ let new_slot = 5;
+ for (slot, slot_pubkeys) in pubkeys.iter().enumerate() {
+ for pubkey in slot_pubkeys.iter().take(5 - slot) {
+ let account = AccountSharedData::new(new_slot + 1, 0, pubkey);
+ accounts.store_for_tests((new_slot, [(pubkey, &account)].as_slice()));
+ }
+ }
+
+ // Get the accounts from the write cache slot
+ let accounts_list: Vec<(_, _)> = accounts
+ .accounts_cache
+ .slot_cache(new_slot)
+ .unwrap()
+ .iter()
+ .map(|iter_item| {
+ let pubkey = *iter_item.key();
+ let account = iter_item.value().account.clone();
+ (pubkey, account)
+ })
+ .collect();
+
+ let storage = accounts.create_and_insert_store(new_slot, 4096, "test_flush_slots");
+
+ accounts.accounts_index.add_root(new_slot);
+
+ // Flushing this storage directly using _store_accounts_frozen. This is done to pass in UpsertReclaim::ReclaimOldSlots
+ accounts._store_accounts_frozen(
+ (new_slot, &accounts_list[..]),
+ &storage,
+ UpsertReclaim::ReclaimOldSlots,
+ UpdateIndexThreadSelection::Inline,
+ );
+
+ // Remove the flushed slot from the cache
+ assert!(accounts.accounts_cache.remove_slot(new_slot).is_some());
+
+ // Verify that the storage for the first slot has been removed
+ assert!(accounts.storage.get_slot_storage_entry(0).is_none());
+ for slot in 1..5 {
+ assert!(accounts.storage.get_slot_storage_entry(slot).is_some());
+
+ // Verify that the obsolete accounts for the remaining slots are correct
+ let storage = accounts.storage.get_slot_storage_entry(slot).unwrap();
+ assert_eq!(
+ storage.get_obsolete_accounts(Some(new_slot)).len() as u64,
+ 5 - slot
+ );
+ }
+ assert!(accounts.storage.get_slot_storage_entry(new_slot).is_some());
+}
+
fn run_test_remove_unrooted_slot(is_cached: bool, db: AccountsDb) {
let unrooted_slot = 9;
let unrooted_bank_id = 9;
@@ -654,10 +759,19 @@ fn run_test_remove_unrooted_slot(is_cached: bool, db: AccountsDb) {
let ancestors = vec![(unrooted_slot, 1)].into_iter().collect();
assert!(!db.accounts_index.contains(&key));
if is_cached {
- db.store_cached((unrooted_slot, &[(&key, &account0)][..]));
+ db.store_for_tests((unrooted_slot, &[(&key, &account0)][..]));
+ assert!(db.accounts_cache.contains(unrooted_slot));
} else {
- db.store_for_tests(unrooted_slot, &[(&key, &account0)]);
+ let file_size = 4096; // value doesn't need to be exact, just big enough to hold account0
+ let storage = db.create_and_insert_store(unrooted_slot, file_size, "");
+ db.store_accounts_frozen(
+ (unrooted_slot, [(&key, &account0)].as_slice()),
+ &storage,
+ UpdateIndexThreadSelection::Inline,
+ );
+ assert!(db.storage.get_slot_storage_entry(unrooted_slot).is_some());
}
+ assert!(!db.accounts_index.is_alive_root(unrooted_slot));
assert!(db.accounts_index.contains(&key));
db.assert_load_account(unrooted_slot, key, 1);
@@ -670,7 +784,7 @@ fn run_test_remove_unrooted_slot(is_cached: bool, db: AccountsDb) {
// Test we can store for the same slot again and get the right information
let account0 = AccountSharedData::new(2, 0, &key);
- db.store_for_tests(unrooted_slot, &[(&key, &account0)]);
+ db.store_for_tests((unrooted_slot, [(&key, &account0)].as_slice()));
db.assert_load_account(unrooted_slot, key, 2);
}
@@ -689,7 +803,7 @@ fn update_accounts(accounts: &AccountsDb, pubkeys: &[Pubkey], slot: Slot, range:
if let Some((mut account, _)) = accounts.load_without_fixed_root(&ancestors, &pubkeys[idx])
{
account.checked_add_lamports(1).unwrap();
- accounts.store_for_tests(slot, &[(&pubkeys[idx], &account)]);
+ accounts.store_for_tests((slot, [(&pubkeys[idx], &account)].as_slice()));
if account.is_zero_lamport() {
let ancestors = vec![(slot, 0)].into_iter().collect();
assert!(accounts
@@ -752,7 +866,7 @@ fn test_account_grow_many() {
for i in 0..9 {
let key = solana_pubkey::new_rand();
let account = AccountSharedData::new(i + 1, size as usize / 4, &key);
- accounts.store_for_tests(0, &[(&key, &account)]);
+ accounts.store_for_tests((0, [(&key, &account)].as_slice()));
keys.push(key);
}
let ancestors = vec![(0, 0)].into_iter().collect();
@@ -785,28 +899,25 @@ fn test_account_grow() {
for pass in 0..27 {
let accounts = AccountsDb::new_single_for_tests();
- let status = [AccountStorageStatus::Available, AccountStorageStatus::Full];
let pubkey1 = solana_pubkey::new_rand();
let account1 = AccountSharedData::new(1, DEFAULT_FILE_SIZE as usize / 2, &pubkey1);
- accounts.store_for_tests(0, &[(&pubkey1, &account1)]);
+ accounts.store_for_tests((0, [(&pubkey1, &account1)].as_slice()));
if pass == 0 {
accounts.add_root_and_flush_write_cache(0);
let store = &accounts.storage.get_slot_storage_entry(0).unwrap();
assert_eq!(store.count(), 1);
- assert_eq!(store.status(), AccountStorageStatus::Available);
continue;
}
let pubkey2 = solana_pubkey::new_rand();
let account2 = AccountSharedData::new(1, DEFAULT_FILE_SIZE as usize / 2, &pubkey2);
- accounts.store_for_tests(0, &[(&pubkey2, &account2)]);
+ accounts.store_for_tests((0, [(&pubkey2, &account2)].as_slice()));
if pass == 1 {
accounts.add_root_and_flush_write_cache(0);
assert_eq!(accounts.storage.len(), 1);
let store = &accounts.storage.get_slot_storage_entry(0).unwrap();
assert_eq!(store.count(), 2);
- assert_eq!(store.status(), AccountStorageStatus::Available);
continue;
}
let ancestors = vec![(0, 0)].into_iter().collect();
@@ -827,13 +938,11 @@ fn test_account_grow() {
// lots of writes, but they are all duplicates
for i in 0..25 {
- accounts.store_for_tests(0, &[(&pubkey1, &account1)]);
+ accounts.store_for_tests((0, [(&pubkey1, &account1)].as_slice()));
let flush = pass == i + 2;
if flush {
accounts.add_root_and_flush_write_cache(0);
assert_eq!(accounts.storage.len(), 1);
- let store = &accounts.storage.get_slot_storage_entry(0).unwrap();
- assert_eq!(store.status(), status[0]);
}
let ancestors = vec![(0, 0)].into_iter().collect();
assert_eq!(
@@ -860,14 +969,23 @@ fn test_account_grow() {
#[test]
fn test_lazy_gc_slot() {
solana_logger::setup();
- //This test is pedantic
- //A slot is purged when a non root bank is cleaned up. If a slot is behind root but it is
- //not root, it means we are retaining dead banks.
- let accounts = AccountsDb::new_single_for_tests();
+
+ // Only run this test with mark obsolete accounts disabled as garbage collection
+ // is not lazy with mark obsolete accounts enabled
+ let accounts = AccountsDb::new_with_config(
+ Vec::new(),
+ AccountsDbConfig {
+ mark_obsolete_accounts: MarkObsoleteAccounts::Disabled,
+ ..ACCOUNTS_DB_CONFIG_FOR_TESTING
+ },
+ None,
+ Arc::default(),
+ );
+
let pubkey = solana_pubkey::new_rand();
let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
//store an account
- accounts.store_for_tests(0, &[(&pubkey, &account)]);
+ accounts.store_for_tests((0, [(&pubkey, &account)].as_slice()));
accounts.add_root_and_flush_write_cache(0);
let ancestors = vec![(0, 0)].into_iter().collect();
@@ -886,7 +1004,7 @@ fn test_lazy_gc_slot() {
assert_eq!(accounts.storage.get_slot_storage_entry(0).unwrap().id(), id);
//store causes clean
- accounts.store_for_tests(1, &[(&pubkey, &account)]);
+ accounts.store_for_tests((1, [(&pubkey, &account)].as_slice()));
//slot is gone
accounts.print_accounts_stats("pre-clean");
@@ -914,8 +1032,8 @@ fn test_clean_zero_lamport_and_dead_slot() {
let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
// Store two accounts
- accounts.store_for_tests(0, &[(&pubkey1, &account)]);
- accounts.store_for_tests(0, &[(&pubkey2, &account)]);
+ accounts.store_for_tests((0, [(&pubkey1, &account)].as_slice()));
+ accounts.store_for_tests((0, [(&pubkey2, &account)].as_slice()));
// Make sure both accounts are in the same AppendVec in slot 0, which
// will prevent pubkey1 from being cleaned up later even when it's a
@@ -950,10 +1068,10 @@ fn test_clean_zero_lamport_and_dead_slot() {
);
// Update account 1 in slot 1
- accounts.store_for_tests(1, &[(&pubkey1, &account)]);
+ accounts.store_for_tests((1, [(&pubkey1, &account)].as_slice()));
// Update account 1 as zero lamports account
- accounts.store_for_tests(2, &[(&pubkey1, &zero_lamport_account)]);
+ accounts.store_for_tests((2, [(&pubkey1, &zero_lamport_account)].as_slice()));
// Pubkey 1 was the only account in slot 1, and it was updated in slot 2, so
// slot 1 should be purged
@@ -983,49 +1101,58 @@ fn test_clean_dead_slot_with_obsolete_accounts() {
// Obsolete accounts are already unreffed so they should not be unreffed again
- let accounts = AccountsDb::new_single_for_tests();
+ let accounts = AccountsDb::new_with_config(
+ Vec::new(),
+ AccountsDbConfig {
+ mark_obsolete_accounts: MarkObsoleteAccounts::Enabled,
+ ..ACCOUNTS_DB_CONFIG_FOR_TESTING
+ },
+ None,
+ Arc::default(),
+ );
+
let pubkey = solana_pubkey::new_rand();
+ let pubkey2 = solana_pubkey::new_rand();
let account = AccountSharedData::new(1, 1, AccountSharedData::default().owner());
+ let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
+ accounts.set_latest_full_snapshot_slot(2);
- // Store account 1 in slot 0
- accounts.store_for_tests(0, &[(&pubkey, &account)]);
+ // Store pubkey1 and pubkey2 in slot 0
+ accounts.store_for_tests((0, [(&pubkey, &account), (&pubkey2, &account)].as_slice()));
- // Update account 1 as in slot 1
- accounts.store_for_tests(1, &[(&pubkey, &account)]);
+ // Update pubkey1 and make pubkey2 a zero lamport account in slot 1
+ accounts.store_for_tests((
+ 1,
+ [(&pubkey, &account), (&pubkey2, &zero_lamport_account)].as_slice(),
+ ));
- // Update account 1 as in slot 2
- accounts.store_for_tests(2, &[(&pubkey, &account)]);
+ // Update pubkey1 as in slot 2
+ accounts.store_for_tests((2, [(&pubkey, &account)].as_slice()));
// Flush the slots individually to avoid reclaims
accounts.add_root_and_flush_write_cache(0);
accounts.add_root_and_flush_write_cache(1);
accounts.add_root_and_flush_write_cache(2);
- // Pubkey1 should be in 3 slots, 0 and 1 and 2
- assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey), 3);
+ // Slot 1 should not be removed as it has the zero lamport account
+ assert!(accounts.storage.get_slot_storage_entry(1).is_some());
+ let slot = accounts.storage.get_slot_storage_entry(1).unwrap();
- // Mark pubkey in slot 1 as obsolete, simulating obsolete accounts being enabled
- let old_storage = accounts
- .storage
- .get_slot_storage_entry_shrinking_in_progress_ok(1)
- .unwrap();
- old_storage.mark_accounts_obsolete(vec![(0, 1)].into_iter(), 2);
+ // Ensure that slot1 also still contains the obsolete account
+ assert_eq!(slot.get_obsolete_accounts(None).len(), 1);
- // Unreference pubkey, which would occur during the normal mark_accounts_obsolete flow
- accounts.unref_pubkeys([pubkey].iter(), 1, &HashSet::new());
+ // Ref count for pubkey1 should be 1 as obsolete accounts are enabled
+ accounts.assert_ref_count(&pubkey, 1);
- // Pubkey1 should now have two references: Slot0 and Slot2.
- assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey), 2);
-
- // Clean, remove slot0/1.
+ // Clean, which will remove slot1
accounts.clean_accounts_for_tests();
+
assert!(accounts.storage.get_slot_storage_entry(0).is_none());
assert!(accounts.storage.get_slot_storage_entry(1).is_none());
- // Ref count for pubkey should be 1. It was decremented for slot1 and above, and decremented
- // for slot0 during clean_accounts_for_tests
- // It was NOT decremented for slot1 during clean_accounts_for_test as it was marked obsolete
- assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey), 1);
+ // Ref count for pubkey should be 1. It was NOT decremented during clean_accounts_for_tests
+ // despite slot 1 being removed, because the account was already obsolete
+ accounts.assert_ref_count(&pubkey, 1);
}
#[test]
@@ -1038,11 +1165,17 @@ fn test_remove_zero_lamport_multi_ref_accounts_panic() {
let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
let slot = 1;
- accounts.store_for_tests(slot, &[(&pubkey_zero, &one_lamport_account)]);
- accounts.add_root_and_flush_write_cache(slot);
+ accounts.store_for_tests((slot, [(&pubkey_zero, &one_lamport_account)].as_slice()));
+
+ // Flush without cleaning to avoid reclaiming pubkey_zero early
+ accounts.add_root(1);
+ accounts.flush_rooted_accounts_cache(Some(slot), false);
+
+ accounts.store_for_tests((slot + 1, [(&pubkey_zero, &zero_lamport_account)].as_slice()));
- accounts.store_for_tests(slot + 1, &[(&pubkey_zero, &zero_lamport_account)]);
- accounts.add_root_and_flush_write_cache(slot + 1);
+ // Flush without cleaning to avoid reclaiming pubkey_zero early
+ accounts.add_root(2);
+ accounts.flush_rooted_accounts_cache(Some(slot + 1), false);
// This should panic because there are 2 refs for pubkey_zero.
accounts.remove_zero_lamport_single_ref_accounts_after_shrink(
@@ -1064,20 +1197,25 @@ fn test_remove_zero_lamport_single_ref_accounts_after_shrink() {
AccountSharedData::new(0, 0, AccountSharedData::default().owner());
let slot = 1;
- accounts.store_for_tests(
+ accounts.store_for_tests((
slot,
- &[(&pubkey_zero, &zero_lamport_account), (&pubkey2, &account)],
- );
+ [(&pubkey_zero, &zero_lamport_account), (&pubkey2, &account)].as_slice(),
+ ));
// Simulate rooting the zero-lamport account, writes it to storage
accounts.add_root_and_flush_write_cache(slot);
if pass > 0 {
// store in write cache
- accounts.store_for_tests(slot + 1, &[(&pubkey_zero, &zero_lamport_account)]);
+ accounts
+ .store_for_tests((slot + 1, [(&pubkey_zero, &zero_lamport_account)].as_slice()));
if pass == 2 {
- // move to a storage (causing ref count to increase)
- accounts.add_root_and_flush_write_cache(slot + 1);
+ // This test pass is still relevant with obsolete accounts enabled, but can be
+ // removed if all scenarios where flush_write_cache doesn't clean are eliminated.
+
+ // add root and flush without clean (causing ref count to increase)
+ accounts.add_root(slot + 1);
+ accounts.flush_rooted_accounts_cache(None, false);
}
}
@@ -1182,10 +1320,10 @@ fn test_shrink_zero_lamport_single_ref_account() {
AccountSharedData::new(0, 0, AccountSharedData::default().owner());
let slot = 1;
// Store a zero-lamport account and a non-zero lamport account
- accounts.store_for_tests(
+ accounts.store_for_tests((
slot,
- &[(&pubkey_zero, &zero_lamport_account), (&pubkey2, &account)],
- );
+ [(&pubkey_zero, &zero_lamport_account), (&pubkey2, &account)].as_slice(),
+ ));
// Simulate rooting the zero-lamport account, should be a
// candidate for cleaning
@@ -1260,11 +1398,15 @@ fn test_clean_multiple_zero_lamport_decrements_index_ref_count() {
let pubkey2 = solana_pubkey::new_rand();
let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
+ // If there is no latest full snapshot, zero lamport accounts can be cleaned and removed
+ // immediately. Set latest full snapshot slot to zero to avoid cleaning zero lamport accounts
+ accounts.set_latest_full_snapshot_slot(0);
+
// Store 2 accounts in slot 0, then update account 1 in two more slots
- accounts.store_for_tests(0, &[(&pubkey1, &zero_lamport_account)]);
- accounts.store_for_tests(0, &[(&pubkey2, &zero_lamport_account)]);
- accounts.store_for_tests(1, &[(&pubkey1, &zero_lamport_account)]);
- accounts.store_for_tests(2, &[(&pubkey1, &zero_lamport_account)]);
+ accounts.store_for_tests((0, [(&pubkey1, &zero_lamport_account)].as_slice()));
+ accounts.store_for_tests((0, [(&pubkey2, &zero_lamport_account)].as_slice()));
+ accounts.store_for_tests((1, [(&pubkey1, &zero_lamport_account)].as_slice()));
+ accounts.store_for_tests((2, [(&pubkey1, &zero_lamport_account)].as_slice()));
// Root all slots
accounts.add_root_and_flush_write_cache(0);
accounts.add_root_and_flush_write_cache(1);
@@ -1272,8 +1414,8 @@ fn test_clean_multiple_zero_lamport_decrements_index_ref_count() {
// Account ref counts should match how many slots they were stored in
// Account 1 = 3 slots; account 2 = 1 slot
- assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey1), 3);
- assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey2), 1);
+ accounts.assert_ref_count(&pubkey1, 3);
+ accounts.assert_ref_count(&pubkey2, 1);
accounts.clean_accounts_for_tests();
// Slots 0 and 1 should each have been cleaned because all of their
@@ -1286,13 +1428,15 @@ fn test_clean_multiple_zero_lamport_decrements_index_ref_count() {
// Index ref counts should be consistent with the slot stores. Account 1 ref count
// should be 1 since slot 2 is the only alive slot; account 2 should have a ref
// count of 0 due to slot 0 being dead
- assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey1), 1);
- assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey2), 0);
+ accounts.assert_ref_count(&pubkey1, 1);
+ accounts.assert_ref_count(&pubkey2, 0);
+ // Allow clean to clean any zero lamports up to and including slot 2
+ accounts.set_latest_full_snapshot_slot(2);
accounts.clean_accounts_for_tests();
// Slot 2 will now be cleaned, which will leave account 1 with a ref count of 0
assert!(accounts.storage.get_slot_storage_entry(2).is_none());
- assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey1), 0);
+ accounts.assert_ref_count(&pubkey1, 0);
}
#[test]
@@ -1305,8 +1449,8 @@ fn test_clean_zero_lamport_and_old_roots() {
let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
// Store a zero-lamport account
- accounts.store_for_tests(0, &[(&pubkey, &account)]);
- accounts.store_for_tests(1, &[(&pubkey, &zero_lamport_account)]);
+ accounts.store_for_tests((0, [(&pubkey, &account)].as_slice()));
+ accounts.store_for_tests((1, [(&pubkey, &zero_lamport_account)].as_slice()));
// Simulate rooting the zero-lamport account, should be a
// candidate for cleaning
@@ -1334,71 +1478,106 @@ fn test_clean_zero_lamport_and_old_roots() {
assert!(!accounts.accounts_index.contains_with(&pubkey, None, None));
}
-#[test]
-fn test_clean_old_with_normal_account() {
+#[test_case(MarkObsoleteAccounts::Enabled)]
+#[test_case(MarkObsoleteAccounts::Disabled)]
+fn test_clean_old_with_normal_account(mark_obsolete_accounts: MarkObsoleteAccounts) {
solana_logger::setup();
- let accounts = AccountsDb::new_single_for_tests();
+ let accounts = AccountsDb::new_with_config(
+ Vec::new(),
+ AccountsDbConfig {
+ mark_obsolete_accounts,
+ ..ACCOUNTS_DB_CONFIG_FOR_TESTING
+ },
+ None,
+ Arc::default(),
+ );
+
let pubkey = solana_pubkey::new_rand();
let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
//store an account
- accounts.store_for_tests(0, &[(&pubkey, &account)]);
- accounts.store_for_tests(1, &[(&pubkey, &account)]);
+ accounts.store_for_tests((0, [(&pubkey, &account)].as_slice()));
+ accounts.store_for_tests((1, [(&pubkey, &account)].as_slice()));
// simulate slots are rooted after while
accounts.add_root_and_flush_write_cache(0);
accounts.add_root_and_flush_write_cache(1);
- //even if rooted, old state isn't cleaned up
- assert_eq!(accounts.alive_account_count_in_slot(0), 1);
assert_eq!(accounts.alive_account_count_in_slot(1), 1);
- accounts.clean_accounts_for_tests();
+ // With obsolete accounts enabled, slot 0 is cleaned during flush
+ if mark_obsolete_accounts == MarkObsoleteAccounts::Disabled {
+ assert_eq!(accounts.alive_account_count_in_slot(0), 1);
+ accounts.clean_accounts_for_tests();
+ }
//now old state is cleaned up
assert_eq!(accounts.alive_account_count_in_slot(0), 0);
assert_eq!(accounts.alive_account_count_in_slot(1), 1);
}
-#[test]
-fn test_clean_old_with_zero_lamport_account() {
+#[test_case(MarkObsoleteAccounts::Enabled)]
+#[test_case(MarkObsoleteAccounts::Disabled)]
+fn test_clean_old_with_zero_lamport_account(mark_obsolete_accounts: MarkObsoleteAccounts) {
solana_logger::setup();
- let accounts = AccountsDb::new_single_for_tests();
+ let accounts = AccountsDb::new_with_config(
+ Vec::new(),
+ AccountsDbConfig {
+ mark_obsolete_accounts,
+ ..ACCOUNTS_DB_CONFIG_FOR_TESTING
+ },
+ None,
+ Arc::default(),
+ );
let pubkey1 = solana_pubkey::new_rand();
let pubkey2 = solana_pubkey::new_rand();
let normal_account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
let zero_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
//store an account
- accounts.store_for_tests(0, &[(&pubkey1, &normal_account)]);
- accounts.store_for_tests(1, &[(&pubkey1, &zero_account)]);
- accounts.store_for_tests(0, &[(&pubkey2, &normal_account)]);
- accounts.store_for_tests(1, &[(&pubkey2, &normal_account)]);
+ accounts.store_for_tests((0, [(&pubkey1, &normal_account)].as_slice()));
+ accounts.store_for_tests((1, [(&pubkey1, &zero_account)].as_slice()));
+ accounts.store_for_tests((0, [(&pubkey2, &normal_account)].as_slice()));
+ accounts.store_for_tests((1, [(&pubkey2, &normal_account)].as_slice()));
//simulate slots are rooted after while
accounts.add_root_and_flush_write_cache(0);
accounts.add_root_and_flush_write_cache(1);
- //even if rooted, old state isn't cleaned up
- assert_eq!(accounts.alive_account_count_in_slot(0), 2);
assert_eq!(accounts.alive_account_count_in_slot(1), 2);
accounts.print_accounts_stats("");
- accounts.clean_accounts_for_tests();
+ // With obsolete accounts enabled, slot 0 is cleaned during flush
+ if mark_obsolete_accounts == MarkObsoleteAccounts::Disabled {
+ // even if rooted, old state isn't cleaned up
+ assert_eq!(accounts.alive_account_count_in_slot(0), 2);
+ accounts.clean_accounts_for_tests();
+ }
//Old state behind zero-lamport account is cleaned up
assert_eq!(accounts.alive_account_count_in_slot(0), 0);
assert_eq!(accounts.alive_account_count_in_slot(1), 2);
}
-#[test]
-fn test_clean_old_with_both_normal_and_zero_lamport_accounts() {
+#[test_case(MarkObsoleteAccounts::Enabled)]
+#[test_case(MarkObsoleteAccounts::Disabled)]
+fn test_clean_old_with_both_normal_and_zero_lamport_accounts(
+ mark_obsolete_accounts: MarkObsoleteAccounts,
+) {
solana_logger::setup();
let mut accounts = AccountsDb {
account_indexes: spl_token_mint_index_enabled(),
- ..AccountsDb::new_single_for_tests()
+ ..AccountsDb::new_with_config(
+ Vec::new(),
+ AccountsDbConfig {
+ mark_obsolete_accounts,
+ ..ACCOUNTS_DB_CONFIG_FOR_TESTING
+ },
+ None,
+ Arc::default(),
+ )
};
let pubkey1 = solana_pubkey::new_rand();
let pubkey2 = solana_pubkey::new_rand();
@@ -1418,19 +1597,24 @@ fn test_clean_old_with_both_normal_and_zero_lamport_accounts() {
zero_account.set_data(account_data_with_mint);
//store an account
- accounts.store_for_tests(0, &[(&pubkey1, &normal_account)]);
- accounts.store_for_tests(0, &[(&pubkey1, &normal_account)]);
- accounts.store_for_tests(1, &[(&pubkey1, &zero_account)]);
- accounts.store_for_tests(0, &[(&pubkey2, &normal_account)]);
- accounts.store_for_tests(2, &[(&pubkey2, &normal_account)]);
+ accounts.store_for_tests((0, [(&pubkey1, &normal_account)].as_slice()));
+ accounts.store_for_tests((0, [(&pubkey1, &normal_account)].as_slice()));
+ accounts.store_for_tests((1, [(&pubkey1, &zero_account)].as_slice()));
+ accounts.store_for_tests((0, [(&pubkey2, &normal_account)].as_slice()));
+ accounts.store_for_tests((2, [(&pubkey2, &normal_account)].as_slice()));
//simulate slots are rooted after while
accounts.add_root_and_flush_write_cache(0);
accounts.add_root_and_flush_write_cache(1);
accounts.add_root_and_flush_write_cache(2);
- //even if rooted, old state isn't cleaned up
- assert_eq!(accounts.alive_account_count_in_slot(0), 2);
+ if mark_obsolete_accounts == MarkObsoleteAccounts::Enabled {
+ // With obsolete accounts enabled, slot 0 is cleaned during flush
+ assert_eq!(accounts.alive_account_count_in_slot(0), 0);
+ } else {
+ //even if rooted, old state isn't cleaned up
+ assert_eq!(accounts.alive_account_count_in_slot(0), 2);
+ }
assert_eq!(accounts.alive_account_count_in_slot(1), 1);
assert_eq!(accounts.alive_account_count_in_slot(2), 1);
@@ -1529,30 +1713,43 @@ fn test_clean_old_with_both_normal_and_zero_lamport_accounts() {
assert_eq!(found_accounts, vec![pubkey2]);
}
-#[test]
-fn test_clean_max_slot_zero_lamport_account() {
+#[test_case(MarkObsoleteAccounts::Enabled)]
+#[test_case(MarkObsoleteAccounts::Disabled)]
+fn test_clean_max_slot_zero_lamport_account(mark_obsolete_accounts: MarkObsoleteAccounts) {
solana_logger::setup();
- let accounts = AccountsDb::new_single_for_tests();
+ let accounts = AccountsDb::new_with_config(
+ Vec::new(),
+ AccountsDbConfig {
+ mark_obsolete_accounts,
+ ..ACCOUNTS_DB_CONFIG_FOR_TESTING
+ },
+ None,
+ Arc::default(),
+ );
let pubkey = solana_pubkey::new_rand();
let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
let zero_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
// store an account, make it a zero lamport account
// in slot 1
- accounts.store_for_tests(0, &[(&pubkey, &account)]);
- accounts.store_for_tests(1, &[(&pubkey, &zero_account)]);
+ accounts.store_for_tests((0, [(&pubkey, &account)].as_slice()));
+ accounts.store_for_tests((1, [(&pubkey, &zero_account)].as_slice()));
// simulate slots are rooted after while
accounts.add_root_and_flush_write_cache(0);
accounts.add_root_and_flush_write_cache(1);
- // Only clean up to account 0, should not purge slot 0 based on
- // updates in later slots in slot 1
- assert_eq!(accounts.alive_account_count_in_slot(0), 1);
- assert_eq!(accounts.alive_account_count_in_slot(1), 1);
- accounts.clean_accounts(Some(0), false, &EpochSchedule::default());
- assert_eq!(accounts.alive_account_count_in_slot(0), 1);
+ // Clean is performed as part of flush with obsolete accounts marked, so explicit clean isn't needed
+ if mark_obsolete_accounts == MarkObsoleteAccounts::Disabled {
+ // Only clean up to account 0, should not purge slot 0 based on
+ // updates in later slots in slot 1
+ assert_eq!(accounts.alive_account_count_in_slot(0), 1);
+ assert_eq!(accounts.alive_account_count_in_slot(1), 1);
+ accounts.clean_accounts(Some(0), false, &EpochSchedule::default());
+ assert_eq!(accounts.alive_account_count_in_slot(0), 1);
+ }
+
assert_eq!(accounts.alive_account_count_in_slot(1), 1);
assert!(accounts.accounts_index.contains_with(&pubkey, None, None));
@@ -1590,12 +1787,16 @@ fn test_accounts_db_purge_keep_live() {
let accounts = AccountsDb::new_single_for_tests();
accounts.add_root_and_flush_write_cache(0);
+ // If there is no latest full snapshot, zero lamport accounts can be cleaned and removed
+ // immediately. Set latest full snapshot slot to zero to avoid cleaning zero lamport accounts
+ accounts.set_latest_full_snapshot_slot(0);
+
// Step A
let mut current_slot = 1;
- accounts.store_for_tests(current_slot, &[(&pubkey, &account)]);
+ accounts.store_for_tests((current_slot, [(&pubkey, &account)].as_slice()));
// Store another live account to slot 1 which will prevent any purge
// since the store count will not be zero
- accounts.store_for_tests(current_slot, &[(&pubkey2, &account2)]);
+ accounts.store_for_tests((current_slot, [(&pubkey2, &account2)].as_slice()));
accounts.add_root_and_flush_write_cache(current_slot);
let (slot1, account_info1) = accounts
.accounts_index
@@ -1616,7 +1817,7 @@ fn test_accounts_db_purge_keep_live() {
// Step B
current_slot += 1;
let zero_lamport_slot = current_slot;
- accounts.store_for_tests(current_slot, &[(&pubkey, &zero_lamport_account)]);
+ accounts.store_for_tests((current_slot, [(&pubkey, &zero_lamport_account)].as_slice()));
accounts.add_root_and_flush_write_cache(current_slot);
accounts.assert_load_account(current_slot, pubkey, zero_lamport);
@@ -1640,7 +1841,7 @@ fn test_accounts_db_purge_keep_live() {
// Zero lamport entry was not the one purged
assert_eq!(index_slot, zero_lamport_slot);
// The ref count should still be 2 because no slots were purged
- assert_eq!(accounts.ref_count_for_pubkey(&pubkey), 2);
+ accounts.assert_ref_count(&pubkey, 2);
// storage for slot 1 had 2 accounts, now has 1 after pubkey 1
// was reclaimed
@@ -1666,11 +1867,11 @@ fn test_accounts_db_purge1() {
accounts.add_root(0);
let mut current_slot = 1;
- accounts.store_for_tests(current_slot, &[(&pubkey, &account)]);
+ accounts.store_for_tests((current_slot, [(&pubkey, &account)].as_slice()));
accounts.add_root_and_flush_write_cache(current_slot);
current_slot += 1;
- accounts.store_for_tests(current_slot, &[(&pubkey, &zero_lamport_account)]);
+ accounts.store_for_tests((current_slot, [(&pubkey, &zero_lamport_account)].as_slice()));
accounts.add_root_and_flush_write_cache(current_slot);
accounts.assert_load_account(current_slot, pubkey, zero_lamport);
@@ -1728,7 +1929,7 @@ fn test_store_account_stress() {
loop {
let account_bal = thread_rng().gen_range(1..99);
account.set_lamports(account_bal);
- db.store_for_tests(slot, &[(&pubkey, &account)]);
+ db.store_for_tests((slot, [(&pubkey, &account)].as_slice()));
let (account, slot) = db
.load_without_fixed_root(&Ancestors::default(), &pubkey)
@@ -1757,34 +1958,40 @@ fn test_accountsdb_scan_accounts() {
let key0 = solana_pubkey::new_rand();
let account0 = AccountSharedData::new(1, 0, &key);
- db.store_for_tests(0, &[(&key0, &account0)]);
+ db.store_for_tests((0, [(&key0, &account0)].as_slice()));
let key1 = solana_pubkey::new_rand();
let account1 = AccountSharedData::new(2, 0, &key);
- db.store_for_tests(1, &[(&key1, &account1)]);
+ db.store_for_tests((1, [(&key1, &account1)].as_slice()));
let ancestors = vec![(0, 0)].into_iter().collect();
let mut accounts = Vec::new();
- db.unchecked_scan_accounts(
- "",
+ db.scan_accounts(
&ancestors,
- |_, account, _| {
- accounts.push(account.take_account());
+ 0,
+ |scan_result| {
+ if let Some((_, account, _)) = scan_result {
+ accounts.push(account);
+ }
},
&ScanConfig::default(),
- );
+ )
+ .expect("should scan accounts");
assert_eq!(accounts, vec![account0]);
let ancestors = vec![(1, 1), (0, 0)].into_iter().collect();
let mut accounts = Vec::new();
- db.unchecked_scan_accounts(
- "",
+ db.scan_accounts(
&ancestors,
- |_, account, _| {
- accounts.push(account.take_account());
+ 0,
+ |scan_result| {
+ if let Some((_, account, _)) = scan_result {
+ accounts.push(account);
+ }
},
&ScanConfig::default(),
- );
+ )
+ .expect("should scan accounts");
assert_eq!(accounts.len(), 2);
}
@@ -1797,20 +2004,20 @@ fn test_cleanup_key_not_removed() {
let key0 = solana_pubkey::new_rand();
let account0 = AccountSharedData::new(1, 0, &key);
- db.store_for_tests(0, &[(&key0, &account0)]);
+ db.store_for_tests((0, [(&key0, &account0)].as_slice()));
let key1 = solana_pubkey::new_rand();
let account1 = AccountSharedData::new(2, 0, &key);
- db.store_for_tests(1, &[(&key1, &account1)]);
+ db.store_for_tests((1, [(&key1, &account1)].as_slice()));
db.print_accounts_stats("pre");
let slots: HashSet = vec![1].into_iter().collect();
let purge_keys = [(key1, slots)];
- let _ = db.purge_keys_exact(purge_keys.iter());
+ let _ = db.purge_keys_exact(purge_keys);
let account2 = AccountSharedData::new(3, 0, &key);
- db.store_for_tests(2, &[(&key1, &account2)]);
+ db.store_for_tests((2, [(&key1, &account2)].as_slice()));
db.print_accounts_stats("post");
let ancestors = vec![(2, 0)].into_iter().collect();
@@ -1832,7 +2039,7 @@ fn test_store_large_account() {
let data_len = DEFAULT_FILE_SIZE as usize + 7;
let account = AccountSharedData::new(1, data_len, &key);
- db.store_for_tests(0, &[(&key, &account)]);
+ db.store_for_tests((0, [(&key, &account)].as_slice()));
let ancestors = vec![(0, 0)].into_iter().collect();
let ret = db.load_without_fixed_root(&ancestors, &key).unwrap();
@@ -1876,10 +2083,6 @@ fn test_stored_readable_account() {
assert!(accounts_equal(&account, &stored_account));
}
-/// A place holder stored size for a cached entry. We don't need to store the size for cached entries, but we have to pass something.
-/// stored size is only used for shrinking. We don't shrink items in the write cache.
-const CACHE_VIRTUAL_STORED_SIZE: StoredSize = 0;
-
#[test]
fn test_hash_stored_account() {
// Number are just sequential.
@@ -1911,7 +2114,7 @@ fn test_hash_stored_account() {
account_meta: &account_meta,
data: &data,
offset,
- stored_size: CACHE_VIRTUAL_STORED_SIZE as usize,
+ stored_size: 0,
};
let account = stored_account.to_account_shared_data();
@@ -1952,7 +2155,7 @@ fn test_verify_bank_capitalization() {
let account = AccountSharedData::new(1, some_data_len, &key);
let ancestors = vec![(some_slot, 0)].into_iter().collect();
- db.store_for_tests(some_slot, &[(&key, &account)]);
+ db.store_for_tests((some_slot, [(&key, &account)].as_slice()));
if pass == 0 {
db.add_root_and_flush_write_cache(some_slot);
@@ -1964,13 +2167,14 @@ fn test_verify_bank_capitalization() {
}
let native_account_pubkey = solana_pubkey::new_rand();
- db.store_for_tests(
+ db.store_for_tests((
some_slot,
- &[(
+ [(
&native_account_pubkey,
&create_loadable_account_for_test("foo"),
- )],
- );
+ )]
+ .as_slice(),
+ ));
db.add_root_and_flush_write_cache(some_slot);
assert_eq!(
@@ -1992,7 +2196,7 @@ fn test_storage_finder() {
let account = AccountSharedData::new(lamports, data_len, &solana_pubkey::new_rand());
// pre-populate with a smaller empty store
db.create_and_insert_store(1, 8192, "test_storage_finder");
- db.store_for_tests(1, &[(&key, &account)]);
+ db.store_for_tests((1, [(&key, &account)].as_slice()));
}
#[test]
@@ -2011,7 +2215,7 @@ fn test_get_snapshot_storages_only_older_than_or_equal_to_snapshot_slot() {
let base_slot = before_slot + 1;
let after_slot = base_slot + 1;
- db.store_for_tests(base_slot, &[(&key, &account)]);
+ db.store_for_tests((base_slot, [(&key, &account)].as_slice()));
db.add_root_and_flush_write_cache(base_slot);
assert!(db.get_storages(..=before_slot).0.is_empty());
@@ -2029,7 +2233,7 @@ fn test_get_snapshot_storages_only_non_empty() {
let base_slot = 0;
let after_slot = base_slot + 1;
- db.store_for_tests(base_slot, &[(&key, &account)]);
+ db.store_for_tests((base_slot, [(&key, &account)].as_slice()));
if pass == 0 {
db.add_root_and_flush_write_cache(base_slot);
db.storage.remove(&base_slot, false);
@@ -2037,7 +2241,7 @@ fn test_get_snapshot_storages_only_non_empty() {
continue;
}
- db.store_for_tests(base_slot, &[(&key, &account)]);
+ db.store_for_tests((base_slot, [(&key, &account)].as_slice()));
db.add_root_and_flush_write_cache(base_slot);
assert_eq!(1, db.get_storages(..=after_slot).0.len());
}
@@ -2052,7 +2256,7 @@ fn test_get_snapshot_storages_only_roots() {
let base_slot = 0;
let after_slot = base_slot + 1;
- db.store_for_tests(base_slot, &[(&key, &account)]);
+ db.store_for_tests((base_slot, [(&key, &account)].as_slice()));
assert!(db.get_storages(..=after_slot).0.is_empty());
db.add_root_and_flush_write_cache(base_slot);
@@ -2068,7 +2272,7 @@ fn test_get_snapshot_storages_exclude_empty() {
let base_slot = 0;
let after_slot = base_slot + 1;
- db.store_for_tests(base_slot, &[(&key, &account)]);
+ db.store_for_tests((base_slot, [(&key, &account)].as_slice()));
db.add_root_and_flush_write_cache(base_slot);
assert_eq!(1, db.get_storages(..=after_slot).0.len());
@@ -2087,7 +2291,7 @@ fn test_get_snapshot_storages_with_base_slot() {
let account = AccountSharedData::new(1, 0, &key);
let slot = 10;
- db.store_for_tests(slot, &[(&key, &account)]);
+ db.store_for_tests((slot, [(&key, &account)].as_slice()));
db.add_root_and_flush_write_cache(slot);
assert_eq!(0, db.get_storages(slot + 1..=slot + 1).0.len());
assert_eq!(1, db.get_storages(slot..=slot + 1).0.len());
@@ -2095,11 +2299,11 @@ fn test_get_snapshot_storages_with_base_slot() {
define_accounts_db_test!(
test_storage_remove_account_double_remove,
- panic = "double remove of account in slot: 0/store: 0!!",
+ panic = "Too many bytes or accounts removed from storage! slot: 0, id: 0",
|accounts| {
let pubkey = solana_pubkey::new_rand();
let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
- accounts.store_for_tests(0, &[(&pubkey, &account)]);
+ accounts.store_for_tests((0, [(&pubkey, &account)].as_slice()));
accounts.add_root_and_flush_write_cache(0);
let storage_entry = accounts.storage.get_slot_storage_entry(0).unwrap();
storage_entry.remove_accounts(0, 1);
@@ -2133,11 +2337,11 @@ fn do_full_clean_refcount(mut accounts: AccountsDb, store1_first: bool, store_si
// A: Initialize AccountsDb with pubkey1 and pubkey2
current_slot += 1;
if store1_first {
- accounts.store_for_tests(current_slot, &[(&pubkey1, &account)]);
- accounts.store_for_tests(current_slot, &[(&pubkey2, &account)]);
+ accounts.store_for_tests((current_slot, [(&pubkey1, &account)].as_slice()));
+ accounts.store_for_tests((current_slot, [(&pubkey2, &account)].as_slice()));
} else {
- accounts.store_for_tests(current_slot, &[(&pubkey2, &account)]);
- accounts.store_for_tests(current_slot, &[(&pubkey1, &account)]);
+ accounts.store_for_tests((current_slot, [(&pubkey2, &account)].as_slice()));
+ accounts.store_for_tests((current_slot, [(&pubkey1, &account)].as_slice()));
}
accounts.add_root_and_flush_write_cache(current_slot);
@@ -2147,14 +2351,14 @@ fn do_full_clean_refcount(mut accounts: AccountsDb, store1_first: bool, store_si
// B: Test multiple updates to pubkey1 in a single slot/storage
current_slot += 1;
assert_eq!(0, accounts.alive_account_count_in_slot(current_slot));
- assert_eq!(1, accounts.ref_count_for_pubkey(&pubkey1));
- accounts.store_for_tests(current_slot, &[(&pubkey1, &account2)]);
- accounts.store_for_tests(current_slot, &[(&pubkey1, &account2)]);
+ accounts.assert_ref_count(&pubkey1, 1);
+ accounts.store_for_tests((current_slot, [(&pubkey1, &account2)].as_slice()));
+ accounts.store_for_tests((current_slot, [(&pubkey1, &account2)].as_slice()));
accounts.add_root_and_flush_write_cache(current_slot);
assert_eq!(1, accounts.alive_account_count_in_slot(current_slot));
// Stores to same pubkey, same slot only count once towards the
// ref count
- assert_eq!(2, accounts.ref_count_for_pubkey(&pubkey1));
+ accounts.assert_ref_count(&pubkey1, 2);
accounts.add_root_and_flush_write_cache(current_slot);
accounts.print_accounts_stats("Post-B pre-clean");
@@ -2166,12 +2370,12 @@ fn do_full_clean_refcount(mut accounts: AccountsDb, store1_first: bool, store_si
// C: more updates to trigger clean of previous updates
current_slot += 1;
- assert_eq!(2, accounts.ref_count_for_pubkey(&pubkey1));
- accounts.store_for_tests(current_slot, &[(&pubkey1, &account3)]);
- accounts.store_for_tests(current_slot, &[(&pubkey2, &account3)]);
- accounts.store_for_tests(current_slot, &[(&pubkey3, &account4)]);
+ accounts.assert_ref_count(&pubkey1, 2);
+ accounts.store_for_tests((current_slot, [(&pubkey1, &account3)].as_slice()));
+ accounts.store_for_tests((current_slot, [(&pubkey2, &account3)].as_slice()));
+ accounts.store_for_tests((current_slot, [(&pubkey3, &account4)].as_slice()));
accounts.add_root_and_flush_write_cache(current_slot);
- assert_eq!(3, accounts.ref_count_for_pubkey(&pubkey1));
+ accounts.assert_ref_count(&pubkey1, 3);
info!("post C");
@@ -2179,10 +2383,10 @@ fn do_full_clean_refcount(mut accounts: AccountsDb, store1_first: bool, store_si
// D: Make all keys 0-lamport, cleans all keys
current_slot += 1;
- assert_eq!(3, accounts.ref_count_for_pubkey(&pubkey1));
- accounts.store_for_tests(current_slot, &[(&pubkey1, &zero_lamport_account)]);
- accounts.store_for_tests(current_slot, &[(&pubkey2, &zero_lamport_account)]);
- accounts.store_for_tests(current_slot, &[(&pubkey3, &zero_lamport_account)]);
+ accounts.assert_ref_count(&pubkey1, 3);
+ accounts.store_for_tests((current_slot, [(&pubkey1, &zero_lamport_account)].as_slice()));
+ accounts.store_for_tests((current_slot, [(&pubkey2, &zero_lamport_account)].as_slice()));
+ accounts.store_for_tests((current_slot, [(&pubkey3, &zero_lamport_account)].as_slice()));
let snapshot_stores = accounts.get_storages(..=current_slot).0;
let total_accounts: usize = snapshot_stores.iter().map(|s| s.accounts_count()).sum();
@@ -2201,9 +2405,9 @@ fn do_full_clean_refcount(mut accounts: AccountsDb, store1_first: bool, store_si
assert_eq!(total_accounts, total_accounts_post_clean);
// should clean all 3 pubkeys
- assert_eq!(accounts.ref_count_for_pubkey(&pubkey1), 0);
- assert_eq!(accounts.ref_count_for_pubkey(&pubkey2), 0);
- assert_eq!(accounts.ref_count_for_pubkey(&pubkey3), 0);
+ accounts.assert_ref_count(&pubkey1, 0);
+ accounts.assert_ref_count(&pubkey2, 0);
+ accounts.assert_ref_count(&pubkey3, 0);
}
// Setup 3 scenarios which try to differentiate between pubkey1 being in an
@@ -2269,7 +2473,7 @@ fn test_shrink_candidate_slots() {
current_slot += 1;
for pubkey in &pubkeys {
- accounts.store_for_tests(current_slot, &[(pubkey, &account)]);
+ accounts.store_for_tests((current_slot, [(pubkey, &account)].as_slice()));
}
let shrink_slot = current_slot;
accounts.add_root_and_flush_write_cache(current_slot);
@@ -2279,7 +2483,7 @@ fn test_shrink_candidate_slots() {
let updated_pubkeys = &pubkeys[0..pubkey_count - pubkey_count_after_shrink];
for pubkey in updated_pubkeys {
- accounts.store_for_tests(current_slot, &[(pubkey, &account)]);
+ accounts.store_for_tests((current_slot, [(pubkey, &account)].as_slice()));
}
accounts.add_root_and_flush_write_cache(current_slot);
accounts.clean_accounts_for_tests();
@@ -2335,7 +2539,7 @@ fn test_shrink_candidate_slots_with_dead_ancient_account() {
.map(|(pubkey, account)| (pubkey, account))
.collect();
let starting_ancient_slot = 1;
- db.store_for_tests(starting_ancient_slot, &accounts);
+ db.store_for_tests((starting_ancient_slot, accounts.as_slice()));
db.add_root_and_flush_write_cache(starting_ancient_slot);
let storage = db.get_storage_for_slot(starting_ancient_slot).unwrap();
let ancient_accounts = db.get_unique_accounts_from_storage(&storage);
@@ -2354,10 +2558,10 @@ fn test_shrink_candidate_slots_with_dead_ancient_account() {
let ancient_append_vec_offset = db.ancient_append_vec_offset.unwrap().abs();
let current_slot = epoch_schedule.slots_per_epoch + ancient_append_vec_offset as u64 + 1;
// Simulate killing of the ancient account by overwriting it in the current slot.
- db.store_for_tests(
+ db.store_for_tests((
current_slot,
- &[(&modified_account_pubkey, &modified_account)],
- );
+ [(&modified_account_pubkey, &modified_account)].as_slice(),
+ ));
db.add_root_and_flush_write_cache(current_slot);
// This should remove the dead ancient account from the index.
db.clean_accounts_for_tests();
@@ -2405,8 +2609,9 @@ fn test_select_candidates_by_total_usage_no_candidates() {
assert_eq!(0, next_candidates.len());
}
-#[test]
-fn test_select_candidates_by_total_usage_3_way_split_condition() {
+#[test_case(StorageAccess::Mmap)]
+#[test_case(StorageAccess::File)]
+fn test_select_candidates_by_total_usage_3_way_split_condition(storage_access: StorageAccess) {
// three candidates, one selected for shrink, one is put back to the candidate list and one is ignored
solana_logger::setup();
let mut candidates = ShrinkCandidates::default();
@@ -2422,6 +2627,7 @@ fn test_select_candidates_by_total_usage_3_way_split_condition() {
store1_slot as AccountsFileId,
store_file_size,
AccountsFileProvider::AppendVec,
+ storage_access,
));
db.storage.insert(store1_slot, Arc::clone(&store1));
store1.alive_bytes.store(0, Ordering::Release);
@@ -2434,6 +2640,7 @@ fn test_select_candidates_by_total_usage_3_way_split_condition() {
store2_slot as AccountsFileId,
store_file_size,
AccountsFileProvider::AppendVec,
+ storage_access,
));
db.storage.insert(store2_slot, Arc::clone(&store2));
store2
@@ -2448,6 +2655,7 @@ fn test_select_candidates_by_total_usage_3_way_split_condition() {
store3_slot as AccountsFileId,
store_file_size,
AccountsFileProvider::AppendVec,
+ storage_access,
));
db.storage.insert(store3_slot, Arc::clone(&store3));
store3
@@ -2468,8 +2676,9 @@ fn test_select_candidates_by_total_usage_3_way_split_condition() {
assert!(next_candidates.contains(&store2_slot));
}
-#[test]
-fn test_select_candidates_by_total_usage_2_way_split_condition() {
+#[test_case(StorageAccess::Mmap)]
+#[test_case(StorageAccess::File)]
+fn test_select_candidates_by_total_usage_2_way_split_condition(storage_access: StorageAccess) {
// three candidates, 2 are selected for shrink, one is ignored
solana_logger::setup();
let db = AccountsDb::new_single_for_tests();
@@ -2485,6 +2694,7 @@ fn test_select_candidates_by_total_usage_2_way_split_condition() {
store1_slot as AccountsFileId,
store_file_size,
AccountsFileProvider::AppendVec,
+ storage_access,
));
db.storage.insert(store1_slot, Arc::clone(&store1));
store1.alive_bytes.store(0, Ordering::Release);
@@ -2497,6 +2707,7 @@ fn test_select_candidates_by_total_usage_2_way_split_condition() {
store2_slot as AccountsFileId,
store_file_size,
AccountsFileProvider::AppendVec,
+ storage_access,
));
db.storage.insert(store2_slot, Arc::clone(&store2));
store2
@@ -2511,6 +2722,7 @@ fn test_select_candidates_by_total_usage_2_way_split_condition() {
store3_slot as AccountsFileId,
store_file_size,
AccountsFileProvider::AppendVec,
+ storage_access,
));
db.storage.insert(store3_slot, Arc::clone(&store3));
store3
@@ -2528,8 +2740,9 @@ fn test_select_candidates_by_total_usage_2_way_split_condition() {
assert_eq!(0, next_candidates.len());
}
-#[test]
-fn test_select_candidates_by_total_usage_all_clean() {
+#[test_case(StorageAccess::Mmap)]
+#[test_case(StorageAccess::File)]
+fn test_select_candidates_by_total_usage_all_clean(storage_access: StorageAccess) {
// 2 candidates, they must be selected to achieve the target alive ratio
solana_logger::setup();
let db = AccountsDb::new_single_for_tests();
@@ -2545,6 +2758,7 @@ fn test_select_candidates_by_total_usage_all_clean() {
store1_slot as AccountsFileId,
store_file_size,
AccountsFileProvider::AppendVec,
+ storage_access,
));
db.storage.insert(store1_slot, Arc::clone(&store1));
store1
@@ -2559,6 +2773,7 @@ fn test_select_candidates_by_total_usage_all_clean() {
store2_slot as AccountsFileId,
store_file_size,
AccountsFileProvider::AppendVec,
+ storage_access,
));
db.storage.insert(store2_slot, Arc::clone(&store2));
store2
@@ -2576,8 +2791,6 @@ fn test_select_candidates_by_total_usage_all_clean() {
assert_eq!(0, next_candidates.len());
}
-const UPSERT_POPULATE_RECLAIMS: UpsertReclaim = UpsertReclaim::PopulateReclaims;
-
#[test]
fn test_delete_dependencies() {
solana_logger::setup();
@@ -2589,7 +2802,7 @@ fn test_delete_dependencies() {
let info1 = AccountInfo::new(StorageLocation::AppendVec(1, 0), true);
let info2 = AccountInfo::new(StorageLocation::AppendVec(2, 0), true);
let info3 = AccountInfo::new(StorageLocation::AppendVec(3, 0), true);
- let mut reclaims = vec![];
+ let mut reclaims = ReclaimsSlotList::new();
accounts_index.upsert(
0,
0,
@@ -2598,7 +2811,7 @@ fn test_delete_dependencies() {
&AccountSecondaryIndexes::default(),
info0,
&mut reclaims,
- UPSERT_POPULATE_RECLAIMS,
+ UpsertReclaim::IgnoreReclaims,
);
accounts_index.upsert(
1,
@@ -2608,7 +2821,7 @@ fn test_delete_dependencies() {
&AccountSecondaryIndexes::default(),
info1,
&mut reclaims,
- UPSERT_POPULATE_RECLAIMS,
+ UpsertReclaim::IgnoreReclaims,
);
accounts_index.upsert(
1,
@@ -2618,7 +2831,7 @@ fn test_delete_dependencies() {
&AccountSecondaryIndexes::default(),
info1,
&mut reclaims,
- UPSERT_POPULATE_RECLAIMS,
+ UpsertReclaim::IgnoreReclaims,
);
accounts_index.upsert(
2,
@@ -2628,7 +2841,7 @@ fn test_delete_dependencies() {
&AccountSecondaryIndexes::default(),
info2,
&mut reclaims,
- UPSERT_POPULATE_RECLAIMS,
+ UpsertReclaim::IgnoreReclaims,
);
accounts_index.upsert(
2,
@@ -2638,7 +2851,7 @@ fn test_delete_dependencies() {
&AccountSecondaryIndexes::default(),
info2,
&mut reclaims,
- UPSERT_POPULATE_RECLAIMS,
+ UpsertReclaim::IgnoreReclaims,
);
accounts_index.upsert(
3,
@@ -2648,7 +2861,7 @@ fn test_delete_dependencies() {
&AccountSecondaryIndexes::default(),
info3,
&mut reclaims,
- UPSERT_POPULATE_RECLAIMS,
+ UpsertReclaim::IgnoreReclaims,
);
accounts_index.add_root(0);
accounts_index.add_root(1);
@@ -2735,7 +2948,7 @@ fn test_store_overhead() {
let accounts = AccountsDb::new_single_for_tests();
let account = AccountSharedData::default();
let pubkey = solana_pubkey::new_rand();
- accounts.store_for_tests(0, &[(&pubkey, &account)]);
+ accounts.store_for_tests((0, [(&pubkey, &account)].as_slice()));
accounts.add_root_and_flush_write_cache(0);
let store = accounts.storage.get_slot_storage_entry(0).unwrap();
let total_len = store.accounts.len();
@@ -2751,13 +2964,13 @@ fn test_store_clean_after_shrink() {
let account = AccountSharedData::new(1, 16 * 4096, &Pubkey::default());
let pubkey1 = solana_pubkey::new_rand();
- accounts.store_cached((0, &[(&pubkey1, &account)][..]));
+ accounts.store_for_tests((0, &[(&pubkey1, &account)][..]));
let pubkey2 = solana_pubkey::new_rand();
- accounts.store_cached((0, &[(&pubkey2, &account)][..]));
+ accounts.store_for_tests((0, &[(&pubkey2, &account)][..]));
let zero_account = AccountSharedData::new(0, 1, &Pubkey::default());
- accounts.store_cached((1, &[(&pubkey1, &zero_account)][..]));
+ accounts.store_for_tests((1, &[(&pubkey1, &zero_account)][..]));
// Add root 0 and flush separately
accounts.add_root(0);
@@ -2782,7 +2995,7 @@ fn test_store_clean_after_shrink() {
accounts.clean_accounts_for_tests();
accounts.print_accounts_stats("post-clean");
- assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey1), 0);
+ accounts.assert_ref_count(&pubkey1, 0);
}
#[test]
@@ -2799,7 +3012,7 @@ fn test_wrapping_storage_id() {
// write unique keys to successive slots
keys.iter().enumerate().for_each(|(slot, key)| {
let slot = slot as Slot;
- db.store_for_tests(slot, &[(key, &zero_lamport_account)]);
+ db.store_for_tests((slot, [(key, &zero_lamport_account)].as_slice()));
db.add_root_and_flush_write_cache(slot);
});
assert_eq!(slots - 1, db.next_id.load(Ordering::Acquire));
@@ -2824,7 +3037,7 @@ fn test_reuse_storage_id() {
// write unique keys to successive slots
keys.iter().enumerate().for_each(|(slot, key)| {
let slot = slot as Slot;
- db.store_for_tests(slot, &[(key, &zero_lamport_account)]);
+ db.store_for_tests((slot, [(key, &zero_lamport_account)].as_slice()));
db.add_root_and_flush_write_cache(slot);
// reset next_id to what it was previously to cause us to re-use the same id
db.next_id.store(AccountsFileId::MAX, Ordering::Release);
@@ -2842,8 +3055,8 @@ fn test_zero_lamport_new_root_not_cleaned() {
let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
// Store zero lamport account into slots 0 and 1, root both slots
- db.store_for_tests(0, &[(&account_key, &zero_lamport_account)]);
- db.store_for_tests(1, &[(&account_key, &zero_lamport_account)]);
+ db.store_for_tests((0, [(&account_key, &zero_lamport_account)].as_slice()));
+ db.store_for_tests((1, [(&account_key, &zero_lamport_account)].as_slice()));
db.add_root_and_flush_write_cache(0);
db.add_root_and_flush_write_cache(1);
@@ -2863,7 +3076,7 @@ fn test_store_load_cached() {
let key = Pubkey::default();
let account0 = AccountSharedData::new(1, 0, &key);
let slot = 0;
- db.store_cached((slot, &[(&key, &account0)][..]));
+ db.store_for_tests((slot, &[(&key, &account0)][..]));
// Load with no ancestors and no root will return nothing
assert!(db
@@ -2895,7 +3108,7 @@ fn test_store_flush_load_cached() {
let key = Pubkey::default();
let account0 = AccountSharedData::new(1, 0, &key);
let slot = 0;
- db.store_cached((slot, &[(&key, &account0)][..]));
+ db.store_for_tests((slot, &[(&key, &account0)][..]));
db.mark_slot_frozen(slot);
// No root was added yet, requires an ancestor to find
@@ -2927,9 +3140,9 @@ fn test_flush_accounts_cache() {
let unrooted_key = solana_pubkey::new_rand();
let key5 = solana_pubkey::new_rand();
let key6 = solana_pubkey::new_rand();
- db.store_cached((unrooted_slot, &[(&unrooted_key, &account0)][..]));
- db.store_cached((root5, &[(&key5, &account0)][..]));
- db.store_cached((root6, &[(&key6, &account0)][..]));
+ db.store_for_tests((unrooted_slot, &[(&unrooted_key, &account0)][..]));
+ db.store_for_tests((root5, &[(&key5, &account0)][..]));
+ db.store_for_tests((root6, &[(&key6, &account0)][..]));
for slot in &[unrooted_slot, root5, root6] {
db.mark_slot_frozen(*slot);
}
@@ -2988,7 +3201,7 @@ fn run_test_flush_accounts_cache_if_needed(num_roots: usize, num_unrooted: usize
let num_slots = 2 * max_cache_slots();
for i in 0..num_roots + num_unrooted {
let key = Pubkey::new_unique();
- db.store_cached((i as Slot, &[(&key, &account0)][..]));
+ db.store_for_tests((i as Slot, &[(&key, &account0)][..]));
keys.push(key);
db.mark_slot_frozen(i as Slot);
if i < num_roots {
@@ -3041,8 +3254,8 @@ fn test_read_only_accounts_cache() {
let account_key = Pubkey::new_unique();
let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
let slot1_account = AccountSharedData::new(1, 1, AccountSharedData::default().owner());
- db.store_cached((0, &[(&account_key, &zero_lamport_account)][..]));
- db.store_cached((1, &[(&account_key, &slot1_account)][..]));
+ db.store_for_tests((0, &[(&account_key, &zero_lamport_account)][..]));
+ db.store_for_tests((1, &[(&account_key, &slot1_account)][..]));
db.add_root(0);
db.add_root(1);
@@ -3064,7 +3277,7 @@ fn test_read_only_accounts_cache() {
.unwrap();
assert_eq!(account.lamports(), 1);
assert_eq!(db.read_only_accounts_cache.cache_len(), 1);
- db.store_cached((2, &[(&account_key, &zero_lamport_account)][..]));
+ db.store_for_tests((2, &[(&account_key, &zero_lamport_account)][..]));
assert_eq!(db.read_only_accounts_cache.cache_len(), 1);
let account = db
.load_with_fixed_root(&Ancestors::default(), &account_key)
@@ -3080,8 +3293,8 @@ fn test_load_with_read_only_accounts_cache() {
let account_key = Pubkey::new_unique();
let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
let slot1_account = AccountSharedData::new(1, 1, AccountSharedData::default().owner());
- db.store_cached((0, &[(&account_key, &zero_lamport_account)][..]));
- db.store_cached((1, &[(&account_key, &slot1_account)][..]));
+ db.store_for_tests((0, &[(&account_key, &zero_lamport_account)][..]));
+ db.store_for_tests((1, &[(&account_key, &slot1_account)][..]));
db.add_root(0);
db.add_root(1);
@@ -3092,43 +3305,43 @@ fn test_load_with_read_only_accounts_cache() {
assert_eq!(db.read_only_accounts_cache.cache_len(), 0);
let (account, slot) = db
- .load_account_with(&Ancestors::default(), &account_key, |_| false)
+ .load_account_with(&Ancestors::default(), &account_key, false)
.unwrap();
assert_eq!(account.lamports(), 1);
assert_eq!(db.read_only_accounts_cache.cache_len(), 0);
assert_eq!(slot, 1);
let (account, slot) = db
- .load_account_with(&Ancestors::default(), &account_key, |_| true)
+ .load_account_with(&Ancestors::default(), &account_key, true)
.unwrap();
assert_eq!(account.lamports(), 1);
assert_eq!(db.read_only_accounts_cache.cache_len(), 1);
assert_eq!(slot, 1);
- db.store_cached((2, &[(&account_key, &zero_lamport_account)][..]));
- let account = db.load_account_with(&Ancestors::default(), &account_key, |_| false);
+ db.store_for_tests((2, &[(&account_key, &zero_lamport_account)][..]));
+ let account = db.load_account_with(&Ancestors::default(), &account_key, false);
assert!(account.is_none());
assert_eq!(db.read_only_accounts_cache.cache_len(), 1);
db.read_only_accounts_cache.reset_for_tests();
assert_eq!(db.read_only_accounts_cache.cache_len(), 0);
- let account = db.load_account_with(&Ancestors::default(), &account_key, |_| true);
+ let account = db.load_account_with(&Ancestors::default(), &account_key, true);
assert!(account.is_none());
assert_eq!(db.read_only_accounts_cache.cache_len(), 0);
let slot2_account = AccountSharedData::new(2, 1, AccountSharedData::default().owner());
- db.store_cached((2, &[(&account_key, &slot2_account)][..]));
+ db.store_for_tests((2, &[(&account_key, &slot2_account)][..]));
let (account, slot) = db
- .load_account_with(&Ancestors::default(), &account_key, |_| false)
+ .load_account_with(&Ancestors::default(), &account_key, false)
.unwrap();
assert_eq!(account.lamports(), 2);
assert_eq!(db.read_only_accounts_cache.cache_len(), 0);
assert_eq!(slot, 2);
let slot2_account = AccountSharedData::new(2, 1, AccountSharedData::default().owner());
- db.store_cached((2, &[(&account_key, &slot2_account)][..]));
+ db.store_for_tests((2, &[(&account_key, &slot2_account)][..]));
let (account, slot) = db
- .load_account_with(&Ancestors::default(), &account_key, |_| true)
+ .load_account_with(&Ancestors::default(), &account_key, true)
.unwrap();
assert_eq!(account.lamports(), 2);
// The account shouldn't be added to read_only_cache because it is in write_cache.
@@ -3136,100 +3349,6 @@ fn test_load_with_read_only_accounts_cache() {
assert_eq!(slot, 2);
}
-#[test]
-fn test_account_matches_owners() {
- let db = Arc::new(AccountsDb::new_single_for_tests());
-
- let owners: Vec = (0..2).map(|_| Pubkey::new_unique()).collect();
-
- let account1_key = Pubkey::new_unique();
- let account1 = AccountSharedData::new(321, 10, &owners[0]);
-
- let account2_key = Pubkey::new_unique();
- let account2 = AccountSharedData::new(1, 1, &owners[1]);
-
- let account3_key = Pubkey::new_unique();
- let account3 = AccountSharedData::new(1, 1, &Pubkey::new_unique());
-
- // Account with 0 lamports
- let account4_key = Pubkey::new_unique();
- let account4 = AccountSharedData::new(0, 1, &owners[1]);
-
- db.store_cached((0, &[(&account1_key, &account1)][..]));
- db.store_cached((1, &[(&account2_key, &account2)][..]));
- db.store_cached((2, &[(&account3_key, &account3)][..]));
- db.store_cached((3, &[(&account4_key, &account4)][..]));
-
- db.add_root(0);
- db.add_root(1);
- db.add_root(2);
- db.add_root(3);
-
- // Set the latest full snapshot slot to one that is *older* than the slot account4 is in.
- // This is required to ensure account4 is not purged during `clean`,
- // which is required to have account_matches_owners() return NoMatch.
- db.set_latest_full_snapshot_slot(2);
-
- // Flush the cache so that the account meta will be read from the storage
- db.flush_accounts_cache(true, None);
- db.clean_accounts_for_tests();
-
- assert_eq!(
- db.account_matches_owners(&Ancestors::default(), &account1_key, &owners),
- Ok(0)
- );
- assert_eq!(
- db.account_matches_owners(&Ancestors::default(), &account2_key, &owners),
- Ok(1)
- );
- assert_eq!(
- db.account_matches_owners(&Ancestors::default(), &account3_key, &owners),
- Err(MatchAccountOwnerError::NoMatch)
- );
- assert_eq!(
- db.account_matches_owners(&Ancestors::default(), &account4_key, &owners),
- Err(MatchAccountOwnerError::NoMatch)
- );
- assert_eq!(
- db.account_matches_owners(&Ancestors::default(), &Pubkey::new_unique(), &owners),
- Err(MatchAccountOwnerError::UnableToLoad)
- );
-
- // Flush the cache and load account1 (so that it's in the cache)
- db.flush_accounts_cache(true, None);
- db.clean_accounts_for_tests();
- let _ = db
- .do_load(
- &Ancestors::default(),
- &account1_key,
- Some(0),
- LoadHint::Unspecified,
- LoadZeroLamports::SomeWithZeroLamportAccountForTests,
- )
- .unwrap();
-
- assert_eq!(
- db.account_matches_owners(&Ancestors::default(), &account1_key, &owners),
- Ok(0)
- );
- assert_eq!(
- db.account_matches_owners(&Ancestors::default(), &account2_key, &owners),
- Ok(1)
- );
- assert_eq!(
- db.account_matches_owners(&Ancestors::default(), &account3_key, &owners),
- Err(MatchAccountOwnerError::NoMatch)
- );
- assert_eq!(
- db.account_matches_owners(&Ancestors::default(), &account4_key, &owners),
- Err(MatchAccountOwnerError::NoMatch)
- );
- assert_eq!(
- db.account_matches_owners(&Ancestors::default(), &Pubkey::new_unique(), &owners),
- Err(MatchAccountOwnerError::UnableToLoad)
- );
-}
-
/// a test that will accept either answer
const LOAD_ZERO_LAMPORTS_ANY_TESTS: LoadZeroLamports = LoadZeroLamports::None;
@@ -3240,8 +3359,8 @@ fn test_flush_cache_clean() {
let account_key = Pubkey::new_unique();
let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
let slot1_account = AccountSharedData::new(1, 1, AccountSharedData::default().owner());
- db.store_cached((0, &[(&account_key, &zero_lamport_account)][..]));
- db.store_cached((1, &[(&account_key, &slot1_account)][..]));
+ db.store_for_tests((0, &[(&account_key, &zero_lamport_account)][..]));
+ db.store_for_tests((1, &[(&account_key, &slot1_account)][..]));
db.add_root(0);
db.add_root(1);
@@ -3276,9 +3395,22 @@ fn test_flush_cache_clean() {
.is_none());
}
-#[test]
-fn test_flush_cache_dont_clean_zero_lamport_account() {
- let db = Arc::new(AccountsDb::new_single_for_tests());
+#[test_case(MarkObsoleteAccounts::Enabled)]
+#[test_case(MarkObsoleteAccounts::Disabled)]
+fn test_flush_cache_dont_clean_zero_lamport_account(mark_obsolete_accounts: MarkObsoleteAccounts) {
+ let db = AccountsDb::new_with_config(
+ Vec::new(),
+ AccountsDbConfig {
+ mark_obsolete_accounts,
+ ..ACCOUNTS_DB_CONFIG_FOR_TESTING
+ },
+ None,
+ Arc::default(),
+ );
+
+ // If there is no latest full snapshot, zero lamport accounts can be cleaned and removed
+ // immediately. Set latest full snapshot slot to zero to avoid cleaning zero lamport accounts
+ db.set_latest_full_snapshot_slot(0);
let zero_lamport_account_key = Pubkey::new_unique();
let other_account_key = Pubkey::new_unique();
@@ -3289,24 +3421,24 @@ fn test_flush_cache_dont_clean_zero_lamport_account() {
let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
// Store into slot 0, and then flush the slot to storage
- db.store_cached((0, &[(&zero_lamport_account_key, &slot0_account)][..]));
+ db.store_for_tests((0, &[(&zero_lamport_account_key, &slot0_account)][..]));
// Second key keeps other lamport account entry for slot 0 alive,
// preventing clean of the zero_lamport_account in slot 1.
- db.store_cached((0, &[(&other_account_key, &slot0_account)][..]));
+ db.store_for_tests((0, &[(&other_account_key, &slot0_account)][..]));
db.add_root(0);
db.flush_accounts_cache(true, None);
assert!(db.storage.get_slot_storage_entry(0).is_some());
// Store into slot 1, a dummy slot that will be dead and purged before flush
- db.store_cached((1, &[(&zero_lamport_account_key, &zero_lamport_account)][..]));
+ db.store_for_tests((1, &[(&zero_lamport_account_key, &zero_lamport_account)][..]));
// Store into slot 2, which makes all updates from slot 1 outdated.
// This means slot 1 is a dead slot. Later, slot 1 will be cleaned/purged
- // before it even reaches storage, but this purge of slot 1should not affect
+ // before it even reaches storage, but this purge of slot 1 should not affect
// the refcount of `zero_lamport_account_key` because cached keys do not bump
// the refcount in the index. This means clean should *not* remove
// `zero_lamport_account_key` from slot 2
- db.store_cached((2, &[(&zero_lamport_account_key, &zero_lamport_account)][..]));
+ db.store_for_tests((2, &[(&zero_lamport_account_key, &zero_lamport_account)][..]));
db.add_root(1);
db.add_root(2);
@@ -3315,20 +3447,20 @@ fn test_flush_cache_dont_clean_zero_lamport_account() {
db.flush_accounts_cache(true, None);
db.clean_accounts_for_tests();
- // The `zero_lamport_account_key` is still alive in slot 1, so refcount for the
+ // The `zero_lamport_account_key` is still alive in slot 0, so refcount for the
// pubkey should be 2
- assert_eq!(
- db.accounts_index
- .ref_count_from_storage(&zero_lamport_account_key),
- 2
- );
- assert_eq!(
- db.accounts_index.ref_count_from_storage(&other_account_key),
- 1
- );
+ if mark_obsolete_accounts == MarkObsoleteAccounts::Disabled {
+ db.assert_ref_count(&zero_lamport_account_key, 2);
+ } else {
+ // However, if obsolete accounts are enabled, it will only be alive in slot 2
+ db.assert_ref_count(&zero_lamport_account_key, 1);
+ }
+ db.assert_ref_count(&other_account_key, 1);
// The zero-lamport account in slot 2 should not be purged yet, because the
- // entry in slot 1 is blocking cleanup of the zero-lamport account.
+ // entry in slot 0 is blocking cleanup of the zero-lamport account.
+ // With obsolete accounts enabled, the zero lamport account being newer
+ // than the latest full snapshot blocks cleanup
let max_root = None;
// Fine to simulate a transaction load since we are not doing any out of band
// removals, only using clean_accounts
@@ -3358,7 +3490,7 @@ fn test_flush_cache_populates_uncleaned_pubkeys() {
let account = AccountSharedData::new(10, 0, &Pubkey::default());
// storing accounts doesn't add anything to uncleaned_pubkeys
- accounts_db.store_cached((slot, [(pubkey, account)].as_slice()));
+ accounts_db.store_for_tests((slot, [(pubkey, account)].as_slice()));
assert_eq!(accounts_db.get_len_of_slots_with_uncleaned_pubkeys(), 0);
// ...but ensure that rooting and flushing the write cache does
@@ -3443,11 +3575,11 @@ fn test_scan_flush_accounts_cache_then_clean_drop() {
/ \
1 2 (root)
*/
- db.store_cached((0, &[(&account_key, &zero_lamport_account)][..]));
- db.store_cached((1, &[(&account_key, &slot1_account)][..]));
+ db.store_for_tests((0, &[(&account_key, &zero_lamport_account)][..]));
+ db.store_for_tests((1, &[(&account_key, &slot1_account)][..]));
// Fodder for the scan so that the lock on `account_key` is not held
- db.store_cached((1, &[(&account_key2, &slot1_account)][..]));
- db.store_cached((2, &[(&account_key, &slot2_account)][..]));
+ db.store_for_tests((1, &[(&account_key2, &slot1_account)][..]));
+ db.store_for_tests((2, &[(&account_key, &slot2_account)][..]));
let max_scan_root = 0;
db.add_root(max_scan_root);
@@ -3541,7 +3673,7 @@ define_accounts_db_test!(test_alive_bytes, |accounts_db| {
for data_size in 0..num_keys {
let account = AccountSharedData::new(1, data_size, &Pubkey::default());
- accounts_db.store_cached((slot, &[(&Pubkey::new_unique(), &account)][..]));
+ accounts_db.store_for_tests((slot, &[(&Pubkey::new_unique(), &account)][..]));
}
accounts_db.add_root(slot);
@@ -3600,7 +3732,7 @@ define_accounts_db_test!(
let zero_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
let key = Pubkey::new_unique();
- accounts_db.store_cached((slot, &[(&key, &zero_account)][..]));
+ accounts_db.store_for_tests((slot, &[(&key, &zero_account)][..]));
pubkeys.push(key);
}
@@ -3668,7 +3800,7 @@ fn setup_accounts_db_cache_clean(
.take(num_slots)
.collect();
if scan_slot.is_some() {
- accounts_db.store_cached(
+ accounts_db.store_for_tests(
// Store it in a slot that isn't returned in `slots`
(
stall_slot,
@@ -3685,7 +3817,7 @@ fn setup_accounts_db_cache_clean(
for slot in &slots {
for key in &keys[*slot as usize..] {
let space = 1; // 1 byte allows us to track by size
- accounts_db.store_cached((
+ accounts_db.store_for_tests((
*slot,
&[(key, &AccountSharedData::new(1, space, &Pubkey::default()))][..],
));
@@ -3728,7 +3860,7 @@ fn test_accounts_db_cache_clean_dead_slots() {
slots.push(alive_slot);
for key in &keys {
// Store a slot that overwrites all previous keys, rendering all previous keys dead
- accounts_db.store_cached((
+ accounts_db.store_for_tests((
alive_slot,
&[(key, &AccountSharedData::new(1, 0, &Pubkey::default()))][..],
));
@@ -4027,15 +4159,15 @@ fn test_shrink_unref() {
let account1 = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
// Store into slot 0
- db.store_for_tests(0, &[(&account_key1, &account1)]);
- db.store_for_tests(0, &[(&account_key2, &account1)]);
+ db.store_for_tests((0, [(&account_key1, &account1)].as_slice()));
+ db.store_for_tests((0, [(&account_key2, &account1)].as_slice()));
db.add_root(0);
// Make account_key1 in slot 0 outdated by updating in rooted slot 1
- db.store_cached((1, &[(&account_key1, &account1)][..]));
+ db.store_for_tests((1, &[(&account_key1, &account1)][..]));
db.add_root(1);
- // Flushes all roots
- db.flush_accounts_cache(true, None);
+ // Flush without cleaning to avoid reclaiming account_key1 early
+ db.flush_rooted_accounts_cache(None, false);
// Clean to remove outdated entry from slot 0
db.clean_accounts(Some(1), false, &EpochSchedule::default());
@@ -4048,11 +4180,11 @@ fn test_shrink_unref() {
db.shrink_candidate_slots(&epoch_schedule);
// Make slot 0 dead by updating the remaining key
- db.store_cached((2, &[(&account_key2, &account1)][..]));
+ db.store_for_tests((2, &[(&account_key2, &account1)][..]));
db.add_root(2);
- // Flushes all roots
- db.flush_accounts_cache(true, None);
+ // Flush without cleaning to avoid reclaiming account_key2 early
+ db.flush_rooted_accounts_cache(None, false);
// Should be one store before clean for slot 0
db.get_and_assert_single_storage(0);
@@ -4064,7 +4196,7 @@ fn test_shrink_unref() {
// Ref count for `account_key1` (account removed earlier by shrink)
// should be 1, since it was only stored in slot 0 and 1, and slot 0
// is now dead
- assert_eq!(db.accounts_index.ref_count_from_storage(&account_key1), 1);
+ db.assert_ref_count(&account_key1, 1);
}
#[test]
@@ -4078,12 +4210,12 @@ fn test_clean_drop_dead_zero_lamport_single_ref_accounts() {
// slot 0 - stored a 1-lamport account
let slot = 0;
- accounts_db.store_cached((slot, &[(&key1, &one_account)][..]));
+ accounts_db.store_for_tests((slot, &[(&key1, &one_account)][..]));
accounts_db.add_root(slot);
// slot 1 - store a 0 -lamport account
let slot = 1;
- accounts_db.store_cached((slot, &[(&key1, &zero_account)][..]));
+ accounts_db.store_for_tests((slot, &[(&key1, &zero_account)][..]));
accounts_db.add_root(slot);
accounts_db.flush_accounts_cache(true, None);
@@ -4106,13 +4238,13 @@ fn test_clean_drop_dead_storage_handle_zero_lamport_single_ref_accounts() {
let account0 = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
// Store into slot 0
- db.store_for_tests(0, &[(&account_key1, &account1)]);
+ db.store_for_tests((0, [(&account_key1, &account1)].as_slice()));
db.add_root_and_flush_write_cache(0);
// Make account_key1 in slot 0 outdated by updating in rooted slot 1 with a zero lamport account
// And store one additional live account to make the store still alive after clean.
- db.store_cached((1, &[(&account_key1, &account0)][..]));
- db.store_cached((1, &[(&account_key2, &account1)][..]));
+ db.store_for_tests((1, &[(&account_key1, &account0)][..]));
+ db.store_for_tests((1, &[(&account_key2, &account1)][..]));
db.add_root(1);
// Flushes all roots
db.flush_accounts_cache(true, None);
@@ -4128,7 +4260,7 @@ fn test_clean_drop_dead_storage_handle_zero_lamport_single_ref_accounts() {
// has one other alive account, it is not completely dead. So it won't
// be a candidate for "clean" to drop. Instead, it becomes a candidate
// for next round shrinking.
- assert_eq!(db.accounts_index.ref_count_from_storage(&account_key1), 1);
+ db.assert_ref_count(&account_key1, 1);
assert_eq!(
db.get_and_assert_single_storage(1)
.num_zero_lamport_single_ref_accounts(),
@@ -4137,6 +4269,9 @@ fn test_clean_drop_dead_storage_handle_zero_lamport_single_ref_accounts() {
assert!(db.shrink_candidate_slots.lock().unwrap().contains(&1));
}
+/// Tests that shrink correctly marks newly single ref zero lamport accounts and sends them to clean
+/// This test is still relevant with obsolete accounts enabled, but can be removed if all
+/// scenarios where flush_write_cache doesn't clean are eliminated.
#[test]
fn test_shrink_unref_handle_zero_lamport_single_ref_accounts() {
let db = AccountsDb::new_single_for_tests();
@@ -4147,15 +4282,15 @@ fn test_shrink_unref_handle_zero_lamport_single_ref_accounts() {
let account0 = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
// Store into slot 0
- db.store_for_tests(0, &[(&account_key1, &account1)]);
- db.store_for_tests(0, &[(&account_key2, &account1)]);
+ db.store_for_tests((0, [(&account_key1, &account1)].as_slice()));
+ db.store_for_tests((0, [(&account_key2, &account1)].as_slice()));
db.add_root_and_flush_write_cache(0);
// Make account_key1 in slot 0 outdated by updating in rooted slot 1 with a zero lamport account
- db.store_cached((1, &[(&account_key1, &account0)][..]));
+ db.store_for_tests((1, &[(&account_key1, &account0)][..]));
db.add_root(1);
- // Flushes all roots
- db.flush_accounts_cache(true, None);
+ // Flushes all roots without clean
+ db.flush_rooted_accounts_cache(None, false);
// Clean to remove outdated entry from slot 0
db.clean_accounts(Some(1), false, &EpochSchedule::default());
@@ -4169,7 +4304,7 @@ fn test_shrink_unref_handle_zero_lamport_single_ref_accounts() {
// After shrink slot 0, check that the zero_lamport account on slot 1
// should be marked since it become singe_ref.
- assert_eq!(db.accounts_index.ref_count_from_storage(&account_key1), 1);
+ db.assert_ref_count(&account_key1, 1);
assert_eq!(
db.get_and_assert_single_storage(1)
.num_zero_lamport_single_ref_accounts(),
@@ -4182,23 +4317,24 @@ fn test_shrink_unref_handle_zero_lamport_single_ref_accounts() {
assert!(!db.shrink_candidate_slots.lock().unwrap().contains(&1));
// Now, make slot 0 dead by updating the remaining key
- db.store_cached((2, &[(&account_key2, &account1)][..]));
+ db.store_for_tests((2, &[(&account_key2, &account1)][..]));
db.add_root(2);
// Flushes all roots
db.flush_accounts_cache(true, None);
- // Should be one store before clean for slot 0 and slot 1
- db.get_and_assert_single_storage(0);
+ // Should be one store before clean for slot 1
db.get_and_assert_single_storage(1);
db.clean_accounts(Some(2), false, &EpochSchedule::default());
- // No stores should exist for slot 0 after clean
+ // No stores should exist for slot 0. If obsolete accounts are enabled, slot 0 stores are
+ // cleaned when slot 2 is flushed. If obsolete accounts are disabled, slot 0 stores are
+ // cleaned during the clean_accounts function call.
assert_no_storages_at_slot(&db, 0);
// No store should exit for slot 1 too as it has only a zero lamport single ref account.
assert_no_storages_at_slot(&db, 1);
// Store 2 should have a single account.
- assert_eq!(db.accounts_index.ref_count_from_storage(&account_key2), 1);
+ db.assert_ref_count(&account_key2, 1);
db.get_and_assert_single_storage(2);
}
@@ -4211,8 +4347,11 @@ define_accounts_db_test!(test_partial_clean, |db| {
let account4 = AccountSharedData::new(4, 0, AccountSharedData::default().owner());
// Store accounts into slots 0 and 1
- db.store_for_tests(0, &[(&account_key1, &account1), (&account_key2, &account1)]);
- db.store_for_tests(1, &[(&account_key1, &account2)]);
+ db.store_for_tests((
+ 0,
+ [(&account_key1, &account1), (&account_key2, &account1)].as_slice(),
+ ));
+ db.store_for_tests((1, [(&account_key1, &account2)].as_slice()));
db.print_accounts_stats("pre-clean1");
@@ -4233,7 +4372,10 @@ define_accounts_db_test!(test_partial_clean, |db| {
db.add_root_and_flush_write_cache(0);
// store into slot 2
- db.store_for_tests(2, &[(&account_key2, &account3), (&account_key1, &account3)]);
+ db.store_for_tests((
+ 2,
+ [(&account_key2, &account3), (&account_key1, &account3)].as_slice(),
+ ));
db.clean_accounts_for_tests();
db.print_accounts_stats("post-clean2");
@@ -4243,7 +4385,7 @@ define_accounts_db_test!(test_partial_clean, |db| {
db.print_accounts_stats("post-clean3");
- db.store_for_tests(3, &[(&account_key2, &account4)]);
+ db.store_for_tests((3, [(&account_key2, &account4)].as_slice()));
db.add_root_and_flush_write_cache(3);
// Check that we can clean where max_root=3 and slot=2 is not rooted
@@ -4282,7 +4424,7 @@ fn start_load_thread(
return;
}
// Meddle load_limit to cover all branches of implementation.
- // There should absolutely no behaviorial difference; the load_limit triggered
+ // There should absolutely no behavioral difference; the load_limit triggered
// slow branch should only affect the performance.
// Ordering::Relaxed is ok because of no data dependencies; the modified field is
// completely free-standing cfg(test) control-flow knob.
@@ -4318,7 +4460,7 @@ fn test_load_account_and_cache_flush_race() {
let db = Arc::new(db);
let pubkey = Arc::new(Pubkey::new_unique());
let exit = Arc::new(AtomicBool::new(false));
- db.store_cached((
+ db.store_for_tests((
0,
&[(
pubkey.as_ref(),
@@ -4342,7 +4484,7 @@ fn test_load_account_and_cache_flush_race() {
return;
}
account.set_lamports(slot + 1);
- db.store_cached((slot, &[(pubkey.as_ref(), &account)][..]));
+ db.store_for_tests((slot, &[(pubkey.as_ref(), &account)][..]));
db.add_root(slot);
sleep(Duration::from_millis(RACY_SLEEP_MS));
db.flush_accounts_cache(true, None);
@@ -4380,7 +4522,7 @@ fn do_test_load_account_and_shrink_race(with_retry: bool) {
let lamports = 42;
let mut account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
account.set_lamports(lamports);
- db.store_for_tests(slot, &[(&pubkey, &account)]);
+ db.store_for_tests((slot, [(pubkey.as_ref(), &account)].as_slice()));
// Set the slot as a root so account loads will see the contents of this slot
db.add_root(slot);
@@ -4478,7 +4620,7 @@ fn test_cache_flush_delayed_remove_unrooted_race() {
let num_trials = 10;
for _ in 0..num_trials {
let pubkey = Pubkey::new_unique();
- db.store_cached((slot, &[(&pubkey, &account)][..]));
+ db.store_for_tests((slot, &[(&pubkey, &account)][..]));
// Wait for both threads to finish
flush_trial_start_sender.send(()).unwrap();
remove_trial_start_sender.send(()).unwrap();
@@ -4562,7 +4704,7 @@ fn test_cache_flush_remove_unrooted_race_multiple_slots() {
let slot_to_pubkey_map: HashMap = (0..num_cached_slots)
.map(|slot| {
let pubkey = Pubkey::new_unique();
- db.store_cached((slot, &[(&pubkey, &account)][..]));
+ db.store_for_tests((slot, &[(&pubkey, &account)][..]));
(slot, pubkey)
})
.collect();
@@ -4667,9 +4809,9 @@ fn test_remove_uncleaned_slots_and_collect_pubkeys_up_to_slot() {
let account2 = AccountSharedData::new(0, 0, &pubkey2);
let account3 = AccountSharedData::new(0, 0, &pubkey3);
- db.store_for_tests(slot1, &[(&pubkey1, &account1)]);
- db.store_for_tests(slot2, &[(&pubkey2, &account2)]);
- db.store_for_tests(slot3, &[(&pubkey3, &account3)]);
+ db.store_for_tests((slot1, [(&pubkey1, &account1)].as_slice()));
+ db.store_for_tests((slot2, [(&pubkey2, &account2)].as_slice()));
+ db.store_for_tests((slot3, [(&pubkey3, &account3)].as_slice()));
// slot 1 is _not_ a root on purpose
db.add_root(slot2);
@@ -4696,8 +4838,9 @@ fn test_remove_uncleaned_slots_and_collect_pubkeys_up_to_slot() {
assert!(candidates_contain(&pubkey3));
}
-#[test]
-fn test_shrink_productive() {
+#[test_case(StorageAccess::Mmap)]
+#[test_case(StorageAccess::File)]
+fn test_shrink_productive(storage_access: StorageAccess) {
solana_logger::setup();
let path = Path::new("");
let file_size = 100;
@@ -4709,6 +4852,7 @@ fn test_shrink_productive() {
slot as AccountsFileId,
file_size,
AccountsFileProvider::AppendVec,
+ storage_access,
));
store.add_account(file_size as usize);
assert!(!AccountsDb::is_shrinking_productive(&store));
@@ -4719,6 +4863,7 @@ fn test_shrink_productive() {
slot as AccountsFileId,
file_size,
AccountsFileProvider::AppendVec,
+ storage_access,
));
store.add_account(file_size as usize / 2);
store.add_account(file_size as usize / 4);
@@ -4729,8 +4874,9 @@ fn test_shrink_productive() {
assert!(!AccountsDb::is_shrinking_productive(&store));
}
-#[test]
-fn test_is_candidate_for_shrink() {
+#[test_case(StorageAccess::Mmap)]
+#[test_case(StorageAccess::File)]
+fn test_is_candidate_for_shrink(storage_access: StorageAccess) {
solana_logger::setup();
let mut accounts = AccountsDb::new_single_for_tests();
@@ -4742,6 +4888,7 @@ fn test_is_candidate_for_shrink() {
1,
store_file_size,
AccountsFileProvider::AppendVec,
+ storage_access,
));
match accounts.shrink_ratio {
AccountShrinkThreshold::TotalSpace { shrink_ratio } => {
@@ -4790,7 +4937,8 @@ define_accounts_db_test!(test_calculate_storage_count_and_alive_bytes, |accounts
let storage = accounts.storage.get_slot_storage_entry(slot0).unwrap();
let storage_info = StorageSizeAndCountMap::default();
- accounts.generate_index_for_slot(&storage, slot0, 0, &storage_info);
+ let mut reader = append_vec::new_scan_accounts_reader();
+ accounts.generate_index_for_slot(&mut reader, &storage, slot0, 0, &storage_info);
assert_eq!(storage_info.len(), 1);
for entry in storage_info.iter() {
let expected_stored_size =
@@ -4813,7 +4961,8 @@ define_accounts_db_test!(
// empty store
let storage = accounts.create_and_insert_store(0, 1, "test");
let storage_info = StorageSizeAndCountMap::default();
- accounts.generate_index_for_slot(&storage, 0, 0, &storage_info);
+ let mut reader = append_vec::new_scan_accounts_reader();
+ accounts.generate_index_for_slot(&mut reader, &storage, 0, 0, &storage_info);
assert!(storage_info.is_empty());
}
);
@@ -4849,7 +4998,8 @@ define_accounts_db_test!(
);
let storage_info = StorageSizeAndCountMap::default();
- accounts.generate_index_for_slot(&storage, 0, 0, &storage_info);
+ let mut reader = append_vec::new_scan_accounts_reader();
+ accounts.generate_index_for_slot(&mut reader, &storage, 0, 0, &storage_info);
assert_eq!(storage_info.len(), 1);
for entry in storage_info.iter() {
let expected_stored_size =
@@ -4872,14 +5022,13 @@ define_accounts_db_test!(test_set_storage_count_and_alive_bytes, |accounts| {
let shared_key = solana_pubkey::new_rand();
let account = AccountSharedData::new(1, 1, AccountSharedData::default().owner());
let slot0 = 0;
- accounts.store_for_tests(slot0, &[(&shared_key, &account)]);
+ accounts.store_for_tests((slot0, [(&shared_key, &account)].as_slice()));
accounts.add_root_and_flush_write_cache(slot0);
// fake out the store count to avoid the assert
for (_, store) in accounts.storage.iter() {
store.alive_bytes.store(0, Ordering::Release);
- let mut count_and_status = store.count_and_status.lock_write();
- count_and_status.0 = 0;
+ store.count.store(0, Ordering::Release);
}
// count needs to be <= approx stored count in store.
@@ -4897,14 +5046,14 @@ define_accounts_db_test!(test_set_storage_count_and_alive_bytes, |accounts| {
);
for (_, store) in accounts.storage.iter() {
- assert_eq!(store.count_and_status.read().0, 0);
+ assert_eq!(store.count(), 0);
assert_eq!(store.alive_bytes(), 0);
}
accounts.set_storage_count_and_alive_bytes(dashmap, &mut GenerateIndexTimings::default());
assert_eq!(accounts.storage.len(), 1);
for (_, store) in accounts.storage.iter() {
assert_eq!(store.id(), 0);
- assert_eq!(store.count_and_status.read().0, count);
+ assert_eq!(store.count(), count);
assert_eq!(store.alive_bytes(), 2);
}
});
@@ -4919,15 +5068,15 @@ define_accounts_db_test!(test_purge_alive_unrooted_slots_after_clean, |accounts|
// Store accounts with greater than 0 lamports
let account = AccountSharedData::new(1, 1, AccountSharedData::default().owner());
- accounts.store_for_tests(slot0, &[(&shared_key, &account)]);
- accounts.store_for_tests(slot0, &[(&unrooted_key, &account)]);
+ accounts.store_for_tests((slot0, [(&shared_key, &account)].as_slice()));
+ accounts.store_for_tests((slot0, [(&unrooted_key, &account)].as_slice()));
// Simulate adding dirty pubkeys on bank freeze. Note this is
// not a rooted slot
// On the next *rooted* slot, update the `shared_key` account to zero lamports
let zero_lamport_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
- accounts.store_for_tests(slot1, &[(&shared_key, &zero_lamport_account)]);
+ accounts.store_for_tests((slot1, [(&shared_key, &zero_lamport_account)].as_slice()));
// Simulate adding dirty pubkeys on bank freeze, set root
accounts.add_root_and_flush_write_cache(slot1);
@@ -4974,32 +5123,32 @@ define_accounts_db_test!(
let slot1: Slot = 1;
let account = AccountSharedData::new(111, space, &owner);
- accounts_db.store_cached((slot1, &[(&pubkey, &account)][..]));
+ accounts_db.store_for_tests((slot1, &[(&pubkey, &account)][..]));
accounts_db.add_root_and_flush_write_cache(slot1);
let slot2: Slot = 2;
let account = AccountSharedData::new(222, space, &owner);
- accounts_db.store_cached((slot2, &[(&pubkey, &account)][..]));
+ accounts_db.store_for_tests((slot2, &[(&pubkey, &account)][..]));
accounts_db.add_root_and_flush_write_cache(slot2);
let slot3: Slot = 3;
let account = AccountSharedData::new(0, space, &owner);
- accounts_db.store_cached((slot3, &[(&pubkey, &account)][..]));
+ accounts_db.store_for_tests((slot3, &[(&pubkey, &account)][..]));
accounts_db.add_root_and_flush_write_cache(slot3);
- assert_eq!(accounts_db.ref_count_for_pubkey(&pubkey), 3);
+ accounts_db.assert_ref_count(&pubkey, 3);
accounts_db.set_latest_full_snapshot_slot(slot2);
accounts_db.clean_accounts(Some(slot2), false, &EpochSchedule::default());
- assert_eq!(accounts_db.ref_count_for_pubkey(&pubkey), 2);
+ accounts_db.assert_ref_count(&pubkey, 2);
accounts_db.set_latest_full_snapshot_slot(slot2);
accounts_db.clean_accounts(None, false, &EpochSchedule::default());
- assert_eq!(accounts_db.ref_count_for_pubkey(&pubkey), 1);
+ accounts_db.assert_ref_count(&pubkey, 1);
accounts_db.set_latest_full_snapshot_slot(slot3);
accounts_db.clean_accounts(None, false, &EpochSchedule::default());
- assert_eq!(accounts_db.ref_count_for_pubkey(&pubkey), 0);
+ accounts_db.assert_ref_count(&pubkey, 0);
}
);
@@ -5026,7 +5175,7 @@ fn test_filter_zero_lamport_clean_for_incremental_snapshots() {
candidates[0].insert(
pubkey,
CleaningInfo {
- slot_list: vec![(slot, account_info)],
+ slot_list: SlotList::from([(slot, account_info)]),
ref_count: 1,
..Default::default()
},
@@ -5176,7 +5325,7 @@ fn test_unref_pubkeys_removed_from_accounts_index() {
let db = AccountsDb::new_single_for_tests();
let mut purged_slot_pubkeys = HashSet::default();
purged_slot_pubkeys.insert((slot1, pk1));
- let mut reclaims = SlotList::default();
+ let mut reclaims = ReclaimsSlotList::default();
db.accounts_index.upsert(
slot1,
slot1,
@@ -5199,8 +5348,8 @@ fn test_unref_pubkeys_removed_from_accounts_index() {
vec![(pk1, vec![slot1].into_iter().collect::>())],
purged_stored_account_slots.into_iter().collect::>()
);
- let expected = u64::from(already_removed);
- assert_eq!(db.accounts_index.ref_count_from_storage(&pk1), expected);
+ let expected = RefCount::from(already_removed);
+ db.assert_ref_count(&pk1, expected);
}
}
@@ -5230,7 +5379,7 @@ fn test_unref_accounts() {
let db = AccountsDb::new_single_for_tests();
let mut purged_slot_pubkeys = HashSet::default();
purged_slot_pubkeys.insert((slot1, pk1));
- let mut reclaims = SlotList::default();
+ let mut reclaims = ReclaimsSlotList::default();
db.accounts_index.upsert(
slot1,
slot1,
@@ -5253,13 +5402,13 @@ fn test_unref_accounts() {
vec![(pk1, vec![slot1].into_iter().collect::>())],
purged_stored_account_slots.into_iter().collect::>()
);
- assert_eq!(db.accounts_index.ref_count_from_storage(&pk1), 0);
+ db.assert_ref_count(&pk1, 0);
}
{
let db = AccountsDb::new_single_for_tests();
let mut purged_stored_account_slots = AccountSlots::default();
let mut purged_slot_pubkeys = HashSet::default();
- let mut reclaims = SlotList::default();
+ let mut reclaims = ReclaimsSlotList::default();
// pk1 and pk2 both in slot1 and slot2, so each has refcount of 2
for slot in [slot1, slot2] {
for pk in [pk1, pk2] {
@@ -5291,15 +5440,15 @@ fn test_unref_accounts() {
assert_eq!(result, slots.into_iter().collect::>());
}
assert!(purged_stored_account_slots.is_empty());
- assert_eq!(db.accounts_index.ref_count_from_storage(&pk1), 0);
- assert_eq!(db.accounts_index.ref_count_from_storage(&pk2), 1);
+ db.assert_ref_count(&pk1, 0);
+ db.assert_ref_count(&pk2, 1);
}
}
}
define_accounts_db_test!(test_many_unrefs, |db| {
let mut purged_stored_account_slots = AccountSlots::default();
- let mut reclaims = SlotList::default();
+ let mut reclaims = ReclaimsSlotList::default();
let pk1 = Pubkey::from([1; 32]);
// make sure we have > 1 batch. Bigger numbers cost more in test time here.
let n = (UNREF_ACCOUNTS_BATCH_SIZE + 1) as Slot;
@@ -5320,7 +5469,10 @@ define_accounts_db_test!(test_many_unrefs, |db| {
})
.collect::>();
- assert_eq!(db.accounts_index.ref_count_from_storage(&pk1), n);
+ assert_eq!(
+ db.accounts_index.ref_count_from_storage(&pk1),
+ n as RefCount,
+ );
// unref all 'n' slots
db.unref_accounts(
purged_slot_pubkeys,
@@ -5411,10 +5563,10 @@ fn test_sweep_get_oldest_non_ancient_slot_max() {
] {
let db = AccountsDb::new_with_config(
Vec::new(),
- Some(AccountsDbConfig {
+ AccountsDbConfig {
ancient_append_vec_offset: Some(ancient_append_vec_offset as i64),
..ACCOUNTS_DB_CONFIG_FOR_TESTING
- }),
+ },
None,
Arc::default(),
);
@@ -5442,10 +5594,10 @@ fn test_sweep_get_oldest_non_ancient_slot() {
let ancient_append_vec_offset = 50_000;
let db = AccountsDb::new_with_config(
Vec::new(),
- Some(AccountsDbConfig {
+ AccountsDbConfig {
ancient_append_vec_offset: Some(ancient_append_vec_offset),
..ACCOUNTS_DB_CONFIG_FOR_TESTING
- }),
+ },
None,
Arc::default(),
);
@@ -5494,10 +5646,10 @@ fn test_sweep_get_oldest_non_ancient_slot2() {
for starting_slot_offset in [0, avoid_saturation] {
let db = AccountsDb::new_with_config(
Vec::new(),
- Some(AccountsDbConfig {
+ AccountsDbConfig {
ancient_append_vec_offset: Some(ancient_append_vec_offset),
..ACCOUNTS_DB_CONFIG_FOR_TESTING
- }),
+ },
None,
Arc::default(),
);
@@ -5653,7 +5805,7 @@ fn test_shrink_collect_simple() {
account.set_lamports(u64::from(old_lamports == 0));
}
- db.store_for_tests(slot5, &[(pubkey, &account)]);
+ db.store_for_tests((slot5, [(pubkey, &account)].as_slice()));
account.set_lamports(old_lamports);
let mut alive = alive;
if append_opposite_alive_account
@@ -5671,8 +5823,8 @@ fn test_shrink_collect_simple() {
to_purge.iter().for_each(|pubkey| {
db.accounts_index.purge_exact(
pubkey,
- &([slot5].into_iter().collect::>()),
- &mut Vec::default(),
+ [slot5].into_iter().collect::>(),
+ &mut ReclaimsSlotList::new(),
);
});
@@ -5821,7 +5973,7 @@ fn test_shrink_collect_with_obsolete_accounts() {
account.set_lamports(200);
regular_pubkeys.push(*pubkey);
}
- db.store_for_tests(slot, &[(pubkey, &account)]);
+ db.store_for_tests((slot, [(pubkey, &account)].as_slice()));
}
// Flush the cache
@@ -5847,8 +5999,8 @@ fn test_shrink_collect_with_obsolete_accounts() {
// Purge accounts via clean and ensure that they will be unreffed.
db.accounts_index.purge_exact(
pubkey,
- &([slot].into_iter().collect::>()),
- &mut Vec::default(),
+ [slot].into_iter().collect::>(),
+ &mut ReclaimsSlotList::new(),
);
unref_pubkeys.push(*pubkey);
}
@@ -5907,12 +6059,13 @@ fn test_combine_ancient_slots_simple() {
fn get_all_accounts_from_storages<'a>(
storages: impl Iterator
- >,
) -> Vec<(Pubkey, AccountSharedData)> {
+ let mut reader = append_vec::new_scan_accounts_reader();
storages
.flat_map(|storage| {
let mut vec = Vec::default();
storage
.accounts
- .scan_accounts(|_offset, account| {
+ .scan_accounts(&mut reader, |_offset, account| {
vec.push((*account.pubkey(), account.to_account_shared_data()));
})
.expect("must scan accounts storage");
@@ -5985,147 +6138,6 @@ pub(crate) fn compare_all_accounts(
);
}
-#[test]
-fn test_shrink_ancient_overflow_with_min_size() {
- solana_logger::setup();
-
- let ideal_av_size = ancient_append_vecs::get_ancient_append_vec_capacity();
- let num_normal_slots = 2;
-
- // build an ancient append vec at slot 'ancient_slot' with one `fat`
- // account that's larger than the ideal size of ancient append vec to
- // simulate the *oversized* append vec for shrinking.
- let account_size = (1.5 * ideal_av_size as f64) as u64;
- let (db, ancient_slot) = get_one_ancient_append_vec_and_others_with_account_size(
- num_normal_slots,
- Some(account_size),
- );
-
- let max_slot_inclusive = ancient_slot + (num_normal_slots as Slot);
- let initial_accounts = get_all_accounts(&db, ancient_slot..(max_slot_inclusive + 1));
-
- let ancient = db.storage.get_slot_storage_entry(ancient_slot).unwrap();
-
- // assert that the min_size, which about 1.5 * ideal_av_size, kicked in
- // and result that the ancient append vec capacity exceeds the ideal_av_size
- assert!(ancient.capacity() > ideal_av_size);
-
- // combine 1 normal append vec into existing oversize ancient append vec.
- db.combine_ancient_slots_packed(
- (ancient_slot..max_slot_inclusive).collect(),
- CAN_RANDOMLY_SHRINK_FALSE,
- );
-
- compare_all_accounts(
- &initial_accounts,
- &get_all_accounts(&db, ancient_slot..max_slot_inclusive),
- );
-
- // the append vec at max_slot_inclusive-1 should NOT have been removed
- // since the append vec is already oversized and we created an ancient
- // append vec there.
- let ancient2 = db
- .storage
- .get_slot_storage_entry(max_slot_inclusive - 1)
- .unwrap();
- assert!(ancient2.capacity() > ideal_av_size); // min_size kicked in, which cause the appendvec to be larger than the ideal_av_size
-
- // Combine normal append vec(s) into existing ancient append vec this
- // will overflow the original ancient append vec because of the oversized
- // ancient append vec is full.
- db.combine_ancient_slots_packed(
- (ancient_slot..=max_slot_inclusive).collect(),
- CAN_RANDOMLY_SHRINK_FALSE,
- );
-
- compare_all_accounts(
- &initial_accounts,
- &get_all_accounts(&db, ancient_slot..(max_slot_inclusive + 1)),
- );
-
- // Nothing should be combined because the append vec are oversized.
- // min_size kicked in, which cause the appendvecs to be larger than the ideal_av_size.
- let ancient = db.storage.get_slot_storage_entry(ancient_slot).unwrap();
- assert!(ancient.capacity() > ideal_av_size);
-
- let ancient2 = db
- .storage
- .get_slot_storage_entry(max_slot_inclusive - 1)
- .unwrap();
- assert!(ancient2.capacity() > ideal_av_size);
-
- let ancient3 = db
- .storage
- .get_slot_storage_entry(max_slot_inclusive)
- .unwrap();
- assert!(ancient3.capacity() > ideal_av_size);
-}
-
-#[test]
-fn test_shrink_overflow_too_much() {
- let num_normal_slots = 2;
- let ideal_av_size = ancient_append_vecs::get_ancient_append_vec_capacity();
- let fat_account_size = (1.5 * ideal_av_size as f64) as u64;
-
- // Prepare 3 append vecs to combine [small, big, small]
- let account_data_sizes = vec![100, fat_account_size, 100];
- let (db, slot1) = create_db_with_storages_and_index_with_customized_account_size_per_slot(
- true,
- num_normal_slots + 1,
- account_data_sizes,
- );
- let storage = db.get_storage_for_slot(slot1).unwrap();
- let created_accounts = db.get_unique_accounts_from_storage(&storage);
-
- // Adjust alive_ratio for slot2 to test it is shrinkable and is a
- // candidate for squashing into the previous ancient append vec.
- // However, due to the fact that this append vec is `oversized`, it can't
- // be squashed into the ancient append vec at previous slot (exceeds the
- // size limit). Therefore, a new "oversized" ancient append vec is
- // created at slot2 as the overflow. This is where the "min_bytes" in
- // `fn create_ancient_append_vec` is used.
- let slot2 = slot1 + 1;
- let storage2 = db.storage.get_slot_storage_entry(slot2).unwrap();
- let original_cap_slot2 = storage2.accounts.capacity();
- storage2
- .accounts
- .set_current_len_for_tests(original_cap_slot2 as usize);
-
- // Combine append vec into ancient append vec.
- let slots_to_combine: Vec = (slot1..slot1 + (num_normal_slots + 1) as Slot).collect();
- db.combine_ancient_slots_packed(slots_to_combine, CAN_RANDOMLY_SHRINK_FALSE);
-
- // slot2 is too big to fit into ideal ancient append vec at slot1. So slot2 won't be merged into slot1.
- // slot1 will have its own ancient append vec.
- assert!(db.storage.get_slot_storage_entry(slot1).is_some());
- let ancient = db.get_storage_for_slot(slot1).unwrap();
- assert!(ancient.capacity() <= ideal_av_size);
-
- let after_store = db.get_storage_for_slot(slot1).unwrap();
- let GetUniqueAccountsResult {
- stored_accounts: after_stored_accounts,
- capacity: after_capacity,
- ..
- } = db.get_unique_accounts_from_storage(&after_store);
- assert!(created_accounts.capacity <= after_capacity);
- assert_eq!(created_accounts.stored_accounts.len(), 1);
- assert_eq!(after_stored_accounts.len(), 1);
-
- // slot2, even after shrinking, is still oversized. Therefore, slot 2
- // exists as an ancient append vec.
- let storage2_after = db.storage.get_slot_storage_entry(slot2).unwrap();
- assert!(storage2_after.capacity() > ideal_av_size);
- let after_store = db.get_storage_for_slot(slot2).unwrap();
- let GetUniqueAccountsResult {
- stored_accounts: after_stored_accounts,
- capacity: after_capacity,
- ..
- } = db.get_unique_accounts_from_storage(&after_store);
- assert!(created_accounts.capacity <= after_capacity);
- assert_eq!(created_accounts.stored_accounts.len(), 1);
- assert_eq!(after_stored_accounts.len(), 1);
-}
-
pub fn get_account_from_account_from_storage(
account: &AccountFromStorage,
db: &AccountsDb,
@@ -6158,7 +6170,7 @@ fn populate_index(db: &AccountsDb, slots: Range) {
&account,
&AccountSecondaryIndexes::default(),
info,
- &mut Vec::default(),
+ &mut ReclaimsSlotList::new(),
UpsertReclaim::IgnoreReclaims,
);
})
@@ -6171,53 +6183,6 @@ pub(crate) fn remove_account_for_tests(storage: &AccountStorageEntry, num_bytes:
storage.remove_accounts(num_bytes, 1);
}
-pub(crate) fn create_storages_and_update_index_with_customized_account_size_per_slot(
- db: &AccountsDb,
- tf: Option<&TempFile>,
- starting_slot: Slot,
- num_slots: usize,
- alive: bool,
- account_data_sizes: Vec,
-) {
- if num_slots == 0 {
- return;
- }
- assert!(account_data_sizes.len() == num_slots);
- let local_tf = (tf.is_none()).then(|| {
- crate::append_vec::test_utils::get_append_vec_path("create_storages_and_update_index")
- });
- let tf = tf.unwrap_or_else(|| local_tf.as_ref().unwrap());
-
- let starting_id = db
- .storage
- .iter()
- .map(|storage| storage.1.id())
- .max()
- .unwrap_or(999);
- for (i, account_data_size) in account_data_sizes.iter().enumerate().take(num_slots) {
- let id = starting_id + (i as AccountsFileId);
- let pubkey1 = solana_pubkey::new_rand();
- let storage = sample_storage_with_entries_id_fill_percentage(
- tf,
- starting_slot + (i as Slot),
- &pubkey1,
- id,
- alive,
- Some(*account_data_size),
- 50,
- );
- insert_store(db, Arc::clone(&storage));
- }
-
- let storage = db.get_storage_for_slot(starting_slot).unwrap();
- let created_accounts = db.get_unique_accounts_from_storage(&storage);
- assert_eq!(created_accounts.stored_accounts.len(), 1);
-
- if alive {
- populate_index(db, starting_slot..(starting_slot + (num_slots as Slot) + 1));
- }
-}
-
pub(crate) fn create_storages_and_update_index(
db: &AccountsDb,
tf: Option<&TempFile>,
@@ -6251,6 +6216,7 @@ pub(crate) fn create_storages_and_update_index(
id,
alive,
account_data_size,
+ db.storage_access(),
);
insert_store(db, Arc::clone(&storage));
}
@@ -6285,34 +6251,6 @@ pub(crate) fn create_db_with_storages_and_index(
(db, slot1)
}
-pub(crate) fn create_db_with_storages_and_index_with_customized_account_size_per_slot(
- alive: bool,
- num_slots: usize,
- account_data_size: Vec,
-) -> (AccountsDb, Slot) {
- solana_logger::setup();
-
- let db = AccountsDb::new_single_for_tests();
-
- // create a single append vec with a single account in a slot
- // add the pubkey to index if alive
- // call combine_ancient_slots with the slot
- // verify we create an ancient appendvec that has alive accounts and does not have dead accounts
-
- let slot1 = 1;
- create_storages_and_update_index_with_customized_account_size_per_slot(
- &db,
- None,
- slot1,
- num_slots,
- alive,
- account_data_size,
- );
-
- let slot1 = slot1 as Slot;
- (db, slot1)
-}
-
fn get_one_ancient_append_vec_and_others_with_account_size(
num_normal_slots: usize,
account_data_size: Option,
@@ -6365,9 +6303,10 @@ fn insert_store(db: &AccountsDb, append_vec: Arc) {
db.storage.insert(append_vec.slot(), append_vec);
}
-#[test]
+#[test_case(StorageAccess::Mmap)]
+#[test_case(StorageAccess::File)]
#[should_panic(expected = "self.storage.remove")]
-fn test_handle_dropped_roots_for_ancient_assert() {
+fn test_handle_dropped_roots_for_ancient_assert(storage_access: StorageAccess) {
solana_logger::setup();
let common_store_path = Path::new("");
let store_file_size = 10_000;
@@ -6377,6 +6316,7 @@ fn test_handle_dropped_roots_for_ancient_assert() {
1,
store_file_size,
AccountsFileProvider::AppendVec,
+ storage_access,
));
let db = AccountsDb::new_single_for_tests();
let slot0 = 0;
@@ -6392,7 +6332,17 @@ fn test_handle_dropped_roots_for_ancient_assert() {
/// `clean`. In this case, `clean` should still reclaim the old versions of these accounts.
#[test]
fn test_clean_old_storages_with_reclaims_rooted() {
- let accounts_db = AccountsDb::new_single_for_tests();
+ // Test is testing clean behaviour that is specific to obsolete accounts disabled
+ // Only run in obsolete accounts disabled mode
+ let accounts_db = AccountsDb::new_with_config(
+ Vec::new(),
+ AccountsDbConfig {
+ mark_obsolete_accounts: MarkObsoleteAccounts::Disabled,
+ ..ACCOUNTS_DB_CONFIG_FOR_TESTING
+ },
+ None,
+ Arc::default(),
+ );
let pubkey = Pubkey::new_unique();
let old_slot = 11;
let new_slot = 22;
@@ -6401,10 +6351,10 @@ fn test_clean_old_storages_with_reclaims_rooted() {
let account = AccountSharedData::new(slot, 0, &Pubkey::new_unique());
// store `pubkey` into multiple slots, and also store another unique pubkey
// to prevent the whole storage from being marked as dead by `clean`.
- accounts_db.store_for_tests(
+ accounts_db.store_for_tests((
slot,
- &[(&pubkey, &account), (&Pubkey::new_unique(), &account)],
- );
+ [(&pubkey, &account), (&Pubkey::new_unique(), &account)].as_slice(),
+ ));
accounts_db.add_root_and_flush_write_cache(slot);
accounts_db.uncleaned_pubkeys.remove(&slot);
// ensure this slot is *not* in the dirty_stores nor uncleaned_pubkeys, because we want to
@@ -6461,10 +6411,10 @@ fn test_clean_old_storages_with_reclaims_unrooted() {
let account = AccountSharedData::new(slot, 0, &Pubkey::new_unique());
// store `pubkey` into multiple slots, and also store another unique pubkey
// to prevent the whole storage from being marked as dead by `clean`.
- accounts_db.store_for_tests(
+ accounts_db.store_for_tests((
slot,
- &[(&pubkey, &account), (&Pubkey::new_unique(), &account)],
- );
+ [(&pubkey, &account), (&Pubkey::new_unique(), &account)].as_slice(),
+ ));
}
// only `old_slot` should be rooted, not `new_slot`
@@ -6505,20 +6455,22 @@ fn test_clean_old_storages_with_reclaims_unrooted() {
#[test]
fn test_calculate_capitalization_simple() {
let accounts_db = AccountsDb::new_single_for_tests();
- accounts_db.store_for_tests(
+ accounts_db.store_for_tests((
0,
- &[(
+ [(
&Pubkey::new_unique(),
&AccountSharedData::new(123, 0, &Pubkey::default()),
- )],
- );
- accounts_db.store_for_tests(
+ )]
+ .as_slice(),
+ ));
+ accounts_db.store_for_tests((
1,
- &[(
+ [(
&Pubkey::new_unique(),
&AccountSharedData::new(456, 0, &Pubkey::default()),
- )],
- );
+ )]
+ .as_slice(),
+ ));
assert_eq!(
accounts_db.calculate_capitalization_at_startup_from_index(&Ancestors::from(vec![0, 1]), 1),
123 + 456,
@@ -6532,8 +6484,8 @@ fn test_calculate_capitalization_simple() {
fn test_calculate_capitalization_overflow_intra_slot() {
let accounts_db = AccountsDb::new_single_for_tests();
let account = AccountSharedData::new(u64::MAX - 1, 0, &Pubkey::default());
- accounts_db.store_for_tests(0, &[(&Pubkey::new_unique(), &account)]);
- accounts_db.store_for_tests(0, &[(&Pubkey::new_unique(), &account)]);
+ accounts_db.store_for_tests((0, [(&Pubkey::new_unique(), &account)].as_slice()));
+ accounts_db.store_for_tests((0, [(&Pubkey::new_unique(), &account)].as_slice()));
accounts_db.calculate_capitalization_at_startup_from_index(&Ancestors::from(vec![0]), 0);
}
@@ -6544,8 +6496,8 @@ fn test_calculate_capitalization_overflow_intra_slot() {
fn test_calculate_capitalization_overflow_inter_slot() {
let accounts_db = AccountsDb::new_single_for_tests();
let account = AccountSharedData::new(u64::MAX - 1, 0, &Pubkey::default());
- accounts_db.store_for_tests(0, &[(&Pubkey::new_unique(), &account)]);
- accounts_db.store_for_tests(1, &[(&Pubkey::new_unique(), &account)]);
+ accounts_db.store_for_tests((0, [(&Pubkey::new_unique(), &account)].as_slice()));
+ accounts_db.store_for_tests((1, [(&Pubkey::new_unique(), &account)].as_slice()));
accounts_db.calculate_capitalization_at_startup_from_index(&Ancestors::from(vec![0, 1]), 1);
}
@@ -6576,11 +6528,11 @@ fn test_mark_obsolete_accounts_at_startup_purge_slot() {
// Store the same pubkey in multiple slots
// Store other pubkey in slot0 to ensure slot is not purged
- accounts_db.store_for_tests(0, &[(&pubkey1, &account), (&pubkey2, &account)]);
+ accounts_db.store_for_tests((0, [(&pubkey1, &account), (&pubkey2, &account)].as_slice()));
accounts_db.flush_accounts_cache_slot_for_tests(0);
- accounts_db.store_for_tests(1, &[(&pubkey1, &account)]);
+ accounts_db.store_for_tests((1, [(&pubkey1, &account)].as_slice()));
accounts_db.flush_accounts_cache_slot_for_tests(1);
- accounts_db.store_for_tests(2, &[(&pubkey1, &account)]);
+ accounts_db.store_for_tests((2, [(&pubkey1, &account)].as_slice()));
accounts_db.flush_accounts_cache_slot_for_tests(2);
let pubkeys_with_duplicates_by_bin = vec![vec![pubkey1]];
@@ -6595,10 +6547,7 @@ fn test_mark_obsolete_accounts_at_startup_purge_slot() {
assert!(accounts_db.storage.get_slot_storage_entry(1).is_none());
// Verify that the pubkey ref1's count is 1
- assert_eq!(
- accounts_db.accounts_index.ref_count_from_storage(&pubkey1),
- 1
- );
+ accounts_db.assert_ref_count(&pubkey1, 1);
assert_eq!(obsolete_stats.accounts_marked_obsolete, 2);
}
@@ -6612,7 +6561,10 @@ fn test_mark_obsolete_accounts_at_startup_multiple_bins() {
let account = AccountSharedData::new(100, 0, &Pubkey::default());
for slot in 0..2 {
- accounts_db.store_for_tests(slot, &[(&pubkey1, &account), (&pubkey2, &account)]);
+ accounts_db.store_for_tests((
+ slot,
+ [(&pubkey1, &account), (&pubkey2, &account)].as_slice(),
+ ));
accounts_db.flush_accounts_cache_slot_for_tests(slot);
}
@@ -6628,68 +6580,46 @@ fn test_mark_obsolete_accounts_at_startup_multiple_bins() {
assert!(accounts_db.storage.get_slot_storage_entry(1).is_some());
// Verify that both pubkeys ref_counts are 1
- assert_eq!(
- accounts_db.accounts_index.ref_count_from_storage(&pubkey1),
- 1
- );
- assert_eq!(
- accounts_db.accounts_index.ref_count_from_storage(&pubkey2),
- 1
- );
+ accounts_db.assert_ref_count(&pubkey1, 1);
+ accounts_db.assert_ref_count(&pubkey2, 1);
// Ensure that stats were accumulated correctly
assert_eq!(obsolete_stats.accounts_marked_obsolete, 2);
assert_eq!(obsolete_stats.slots_removed, 1);
}
-// This test verifies that when obsolete accounts are marked, the duplicates lt hash is set to the
-// default value. When they are not marked, it is populated. The second case ensures test validity.
-#[test_case(true; "mark_obsolete_accounts")]
-#[test_case(false; "do_not_mark_obsolete_accounts")]
-fn test_obsolete_accounts_empty_default_duplicate_hash(mark_obsolete_accounts: bool) {
- let slot0 = 0;
- let slot1 = 1;
-
- let db = AccountsDb::new_with_config(
- Vec::new(),
- Some(AccountsDbConfig {
- mark_obsolete_accounts,
- ..ACCOUNTS_DB_CONFIG_FOR_TESTING
- }),
- None,
- Arc::default(),
- );
-
- let pubkey = Pubkey::new_unique();
-
- let storage = db.create_and_insert_store(slot0, 1000, "test");
-
- let account0 = AccountSharedData::new(1, 0, &Pubkey::default());
- let account1 = AccountSharedData::new(100, 0, &Pubkey::default());
-
- storage
- .accounts
- .write_accounts(&(slot0, &[(&pubkey, &account0)][..]), 0);
-
- let storage = db.create_and_insert_store(slot1, 1000, "test");
-
- storage
- .accounts
- .write_accounts(&(slot1, &[(&pubkey, &account1)][..]), 0);
-
- assert!(!db.accounts_index.contains(&pubkey));
- let result = db.generate_index(None, false);
- if mark_obsolete_accounts {
- // If obsolete accounts are marked, the duplicates lt hash should be the default value
- // This is because all duplicates are marked as obsolete and skipped during lt hash calculation.
- assert_eq!(
- *result.duplicates_lt_hash.unwrap(),
- DuplicatesLtHash::default()
- );
- } else {
- assert_ne!(
- *result.duplicates_lt_hash.unwrap(),
- DuplicatesLtHash::default()
- );
- }
+#[test]
+fn test_batch_insert_zero_lamport_single_ref_account_offsets() {
+ let accounts = AccountsDb::new_single_for_tests();
+ let storage = accounts.create_and_insert_store(1, 100, "test");
+
+ // Test inserting new offsets
+ let offsets1 = vec![10, 20, 30];
+ let count1 = storage.batch_insert_zero_lamport_single_ref_account_offsets(&offsets1);
+ assert_eq!(count1, 3, "Should insert all 3 new offsets");
+ assert_eq!(storage.num_zero_lamport_single_ref_accounts(), 3);
+
+ // Test inserting some duplicate and some new offsets
+ let offsets2 = vec![20, 30, 40, 50]; // 20,30 are duplicates, 40,50 are new
+ let count2 = storage.batch_insert_zero_lamport_single_ref_account_offsets(&offsets2);
+ assert_eq!(count2, 2, "Should insert only 2 new offsets (40, 50)");
+ assert_eq!(storage.num_zero_lamport_single_ref_accounts(), 5);
+
+ // Test inserting all duplicates
+ let offsets3 = vec![10, 20];
+ let count3 = storage.batch_insert_zero_lamport_single_ref_account_offsets(&offsets3);
+ assert_eq!(count3, 0, "Should not insert any duplicates");
+ assert_eq!(storage.num_zero_lamport_single_ref_accounts(), 5);
+
+ // Test inserting empty slice
+ let empty_offsets: Vec = vec![];
+ let count4 = storage.batch_insert_zero_lamport_single_ref_account_offsets(&empty_offsets);
+ assert_eq!(count4, 0, "Should handle empty slice");
+ assert_eq!(storage.num_zero_lamport_single_ref_accounts(), 5);
+
+ // Test inserting large batch with mixed duplicates
+ let offsets5 = vec![10, 60, 20, 70, 30, 80, 40]; // 10,20,30,40 duplicates, 60,70,80 new
+ let count5 = storage.batch_insert_zero_lamport_single_ref_account_offsets(&offsets5);
+ assert_eq!(count5, 3, "Should insert only 3 new offsets (60, 70, 80)");
+ assert_eq!(storage.num_zero_lamport_single_ref_accounts(), 8);
}
diff --git a/accounts-db/src/accounts_file.rs b/accounts-db/src/accounts_file.rs
index a6869356bb80cb..e14f9f3a281470 100644
--- a/accounts-db/src/accounts_file.rs
+++ b/accounts-db/src/accounts_file.rs
@@ -1,18 +1,18 @@
#[cfg(feature = "dev-context-only-utils")]
-use crate::append_vec::StoredAccountMeta;
+use crate::append_vec::{self, StoredAccountMeta};
use {
crate::{
account_info::{AccountInfo, Offset},
account_storage::stored_account_info::{StoredAccountInfo, StoredAccountInfoWithoutData},
accounts_db::AccountsFileId,
- accounts_update_notifier_interface::AccountForGeyser,
append_vec::{AppendVec, AppendVecError},
+ buffered_reader::RequiredLenBufFileRead,
storable_accounts::StorableAccounts,
tiered_storage::{
error::TieredStorageError, hot::HOT_FORMAT, index::IndexOffset, TieredStorage,
},
},
- solana_account::{AccountSharedData, ReadableAccount as _},
+ solana_account::AccountSharedData,
solana_clock::Slot,
solana_pubkey::Pubkey,
std::{
@@ -46,14 +46,6 @@ pub enum AccountsFileError {
TieredStorageError(#[from] TieredStorageError),
}
-#[derive(Error, Debug, PartialEq, Eq)]
-pub enum MatchAccountOwnerError {
- #[error("The account owner does not match with the provided list")]
- NoMatch,
- #[error("Unable to load the account")]
- UnableToLoad,
-}
-
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
pub enum StorageAccess {
/// storages should be accessed by Mmap
@@ -103,22 +95,10 @@ impl AccountsFile {
Ok(Self::AppendVec(av))
}
- /// true if this storage can possibly be appended to (independent of capacity check)
- //
- // NOTE: Only used by ancient append vecs "append" method, which is test-only now.
- #[cfg(test)]
- pub(crate) fn can_append(&self) -> bool {
- match self {
- Self::AppendVec(av) => av.can_append(),
- // once created, tiered storages cannot be appended to
- Self::TieredStorage(_) => false,
- }
- }
-
/// if storage is not readonly, reopen another instance that is read only
pub(crate) fn reopen_as_readonly(&self) -> Option {
match self {
- Self::AppendVec(av) => av.reopen_as_readonly().map(Self::AppendVec),
+ Self::AppendVec(av) => av.reopen_as_readonly_file_io().map(Self::AppendVec),
Self::TieredStorage(_) => None,
}
}
@@ -132,6 +112,7 @@ impl AccountsFile {
}
}
+ /// Flushes contents to disk
pub fn flush(&self) -> Result<()> {
match self {
Self::AppendVec(av) => av.flush(),
@@ -139,13 +120,6 @@ impl AccountsFile {
}
}
- pub fn reset(&self) {
- match self {
- Self::AppendVec(av) => av.reset(),
- Self::TieredStorage(_) => {}
- }
- }
-
pub fn remaining_bytes(&self) -> u64 {
match self {
Self::AppendVec(av) => av.remaining_bytes(),
@@ -153,6 +127,7 @@ impl AccountsFile {
}
}
+ /// Returns the number of bytes, *not accounts*, used in the AccountsFile
pub fn len(&self) -> usize {
match self {
Self::AppendVec(av) => av.len(),
@@ -167,6 +142,7 @@ impl AccountsFile {
}
}
+ /// Returns the total number of bytes, *not accounts*, the AccountsFile can hold
pub fn capacity(&self) -> u64 {
match self {
Self::AppendVec(av) => av.capacity(),
@@ -262,28 +238,6 @@ impl AccountsFile {
}
}
- pub fn account_matches_owners(
- &self,
- offset: usize,
- owners: &[Pubkey],
- ) -> std::result::Result {
- match self {
- Self::AppendVec(av) => av.account_matches_owners(offset, owners),
- // Note: The conversion here is needed as the AccountsDB currently
- // assumes all offsets are multiple of 8 while TieredStorage uses
- // IndexOffset that is equivalent to AccountInfo::reduced_offset.
- Self::TieredStorage(ts) => {
- let Some(reader) = ts.reader() else {
- return Err(MatchAccountOwnerError::UnableToLoad);
- };
- reader.account_matches_owners(
- IndexOffset(AccountInfo::get_reduced_offset(offset)),
- owners,
- )
- }
- }
- }
-
/// Return the path of the underlying account file.
pub fn path(&self) -> &Path {
match self {
@@ -322,12 +276,13 @@ impl AccountsFile {
///
/// Prefer scan_accounts_without_data() when account data is not needed,
/// as it can potentially read less and be faster.
- pub fn scan_accounts(
- &self,
+ pub(crate) fn scan_accounts<'a>(
+ &'a self,
+ reader: &mut impl RequiredLenBufFileRead<'a>,
callback: impl for<'local> FnMut(Offset, StoredAccountInfo<'local>),
) -> Result<()> {
match self {
- Self::AppendVec(av) => av.scan_accounts(callback),
+ Self::AppendVec(av) => av.scan_accounts(reader, callback),
Self::TieredStorage(ts) => {
if let Some(reader) = ts.reader() {
reader.scan_accounts(callback)?;
@@ -346,38 +301,20 @@ impl AccountsFile {
&self,
callback: impl for<'local> FnMut(StoredAccountMeta<'local>),
) -> Result<()> {
+ let mut reader = append_vec::new_scan_accounts_reader();
match self {
- Self::AppendVec(av) => av.scan_accounts_stored_meta(callback),
+ Self::AppendVec(av) => av.scan_accounts_stored_meta(&mut reader, callback),
Self::TieredStorage(_) => {
unimplemented!("StoredAccountMeta is only implemented for AppendVec")
}
}
}
- /// Iterate over all accounts and call `callback` with each account.
- /// Only intended to be used by Geyser.
- pub fn scan_accounts_for_geyser(
- &self,
- mut callback: impl for<'local> FnMut(AccountForGeyser<'local>),
- ) -> Result<()> {
- self.scan_accounts(|_offset, account| {
- let account_for_geyser = AccountForGeyser {
- pubkey: account.pubkey(),
- lamports: account.lamports(),
- owner: account.owner(),
- executable: account.executable(),
- rent_epoch: account.rent_epoch(),
- data: account.data(),
- };
- callback(account_for_geyser)
- })
- }
-
/// Calculate the amount of storage required for an account with the passed
/// in data_len
pub(crate) fn calculate_stored_size(&self, data_len: usize) -> usize {
match self {
- Self::AppendVec(av) => av.calculate_stored_size(data_len),
+ Self::AppendVec(_) => AppendVec::calculate_stored_size(data_len),
Self::TieredStorage(ts) => ts
.reader()
.expect("Reader must be initialized as stored size is specific to format")
@@ -460,11 +397,19 @@ pub enum AccountsFileProvider {
}
impl AccountsFileProvider {
- pub fn new_writable(&self, path: impl Into, file_size: u64) -> AccountsFile {
+ pub fn new_writable(
+ &self,
+ path: impl Into,
+ file_size: u64,
+ storage_access: StorageAccess,
+ ) -> AccountsFile {
match self {
- Self::AppendVec => {
- AccountsFile::AppendVec(AppendVec::new(path, true, file_size as usize))
- }
+ Self::AppendVec => AccountsFile::AppendVec(AppendVec::new(
+ path,
+ true,
+ file_size as usize,
+ storage_access,
+ )),
Self::HotStorage => AccountsFile::TieredStorage(TieredStorage::new_writable(path)),
}
}
@@ -487,16 +432,3 @@ pub struct StoredAccountsInfo {
/// total size of all the stored accounts
pub size: usize,
}
-
-#[cfg(test)]
-pub mod tests {
- use crate::accounts_file::AccountsFile;
- impl AccountsFile {
- pub(crate) fn set_current_len_for_tests(&self, len: usize) {
- match self {
- Self::AppendVec(av) => av.set_current_len_for_tests(len),
- Self::TieredStorage(_) => {}
- }
- }
- }
-}
diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs
index 36abe9c90321c3..ccbb37b8fe5ca9 100644
--- a/accounts-db/src/accounts_index.rs
+++ b/accounts-db/src/accounts_index.rs
@@ -24,6 +24,7 @@ use {
rayon::iter::{IntoParallelIterator, ParallelIterator},
roots_tracker::RootsTracker,
secondary::{RwLockSecondaryIndexEntry, SecondaryIndex, SecondaryIndexEntry},
+ smallvec::SmallVec,
solana_account::ReadableAccount,
solana_clock::{BankId, Slot},
solana_measure::measure::Measure,
@@ -35,7 +36,7 @@ use {
ops::{Bound, Range, RangeBounds},
path::PathBuf,
sync::{
- atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering},
+ atomic::{AtomicBool, AtomicU32, AtomicU64, AtomicUsize, Ordering},
Arc, Mutex, RwLock,
},
},
@@ -70,9 +71,16 @@ pub const ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS: AccountsIndexConfig = AccountsIn
scan_results_limit_bytes: None,
};
pub type ScanResult = Result;
-pub type SlotList = Vec<(Slot, T)>;
-pub type RefCount = u64;
-pub type AtomicRefCount = AtomicU64;
+pub type SlotList = SmallVec<[(Slot, T); 1]>;
+pub type ReclaimsSlotList = Vec<(Slot, T)>;
+
+// The ref count cannot be higher than the total number of storages, and we should never have more
+// than 1 million storages. A 32-bit ref count should be *significantly* more than enough.
+// (We already effectively limit the number of storages to 2^32 since the storage ID type is a u32.)
+// The majority of accounts should only exist in one storage, so the most common ref count is '1'.
+// Heavily updated accounts should still have a ref count that is < 100.
+pub type RefCount = u32;
+pub type AtomicRefCount = AtomicU32;
/// values returned from `insert_new_if_missing_into_primary_index()`
#[derive(Default, Debug, PartialEq, Eq)]
@@ -122,6 +130,9 @@ pub enum UpsertReclaim {
PopulateReclaims,
/// overwrite existing data in the same slot and do not return in 'reclaims'
IgnoreReclaims,
+ // Reclaim all older versions of the account from the index and return
+ // in the 'reclaims'
+ ReclaimOldSlots,
}
#[derive(Debug)]
@@ -668,21 +679,6 @@ impl + Into> AccountsIndex {
}
}
- #[cfg(feature = "dev-context-only-utils")]
- pub fn do_unchecked_scan_accounts(
- &self,
- metric_name: &'static str,
- ancestors: &Ancestors,
- func: F,
- range: Option,
- config: &ScanConfig,
- ) where
- F: FnMut(&Pubkey, (&T, Slot)),
- R: RangeBounds + std::fmt::Debug,
- {
- self.do_scan_accounts(metric_name, ancestors, func, range, None, config);
- }
-
// Scan accounts and return latest version of each account that is either:
// 1) rooted or
// 2) present in ancestors
@@ -869,15 +865,15 @@ impl + Into> AccountsIndex {
#[must_use]
pub fn handle_dead_keys(
&self,
- dead_keys: &[&Pubkey],
+ dead_keys: &[Pubkey],
account_indexes: &AccountSecondaryIndexes,
) -> HashSet {
let mut pubkeys_removed_from_accounts_index = HashSet::default();
if !dead_keys.is_empty() {
for key in dead_keys.iter() {
let w_index = self.get_bin(key);
- if w_index.remove_if_slot_list_empty(**key) {
- pubkeys_removed_from_accounts_index.insert(**key);
+ if w_index.remove_if_slot_list_empty(*key) {
+ pubkeys_removed_from_accounts_index.insert(*key);
// Note it's only safe to remove all the entries for this key
// because we have the lock for this key's entry in the AccountsIndex,
// so no other thread is also updating the index
@@ -910,24 +906,6 @@ impl + Into> AccountsIndex {
)
}
- #[cfg(feature = "dev-context-only-utils")]
- pub(crate) fn unchecked_scan_accounts(
- &self,
- metric_name: &'static str,
- ancestors: &Ancestors,
- func: F,
- config: &ScanConfig,
- ) where
- F: FnMut(&Pubkey, (&T, Slot)),
- {
- self.do_unchecked_scan_accounts(
- metric_name,
- ancestors,
- func,
- None::>,
- config,
- );
- }
/// call func with every pubkey and index visible from a given set of ancestors
pub(crate) fn index_scan_accounts(
&self,
@@ -968,15 +946,12 @@ impl + Into> AccountsIndex {
/// returns true if, after this fn call:
/// accounts index entry for `pubkey` has an empty slot list
/// or `pubkey` does not exist in accounts index
- pub(crate) fn purge_exact<'a, C>(
- &'a self,
+ pub(crate) fn purge_exact(
+ &self,
pubkey: &Pubkey,
- slots_to_purge: &'a C,
- reclaims: &mut SlotList,
- ) -> bool
- where
- C: Contains<'a, Slot>,
- {
+ slots_to_purge: impl for<'a> Contains<'a, Slot>,
+ reclaims: &mut ReclaimsSlotList,
+ ) -> bool {
self.slot_list_mut(pubkey, |slot_list| {
slot_list.retain(|(slot, item)| {
let should_purge = slots_to_purge.contains(slot);
@@ -1364,6 +1339,7 @@ impl + Into> AccountsIndex {
.then_with(|| pubkey_a.cmp(pubkey_b))
});
+ let storage = self.storage.storage.as_ref();
while !items.is_empty() {
let mut start_index = items.len() - 1;
let mut last_pubkey = &items[start_index].0;
@@ -1383,7 +1359,7 @@ impl + Into> AccountsIndex {
last_pubkey = next_pubkey;
}
- let r_account_maps = &self.account_maps[pubkey_bin];
+ let r_account_maps = self.account_maps[pubkey_bin].as_ref();
// count only considers non-duplicate accounts
count += items.len() - start_index;
@@ -1395,12 +1371,8 @@ impl + Into> AccountsIndex {
// this is no longer the default case
let mut duplicates_from_in_memory = vec![];
items.for_each(|(pubkey, account_info)| {
- let new_entry = PreAllocatedAccountMapEntry::new(
- slot,
- account_info,
- &self.storage.storage,
- use_disk,
- );
+ let new_entry =
+ PreAllocatedAccountMapEntry::new(slot, account_info, storage, use_disk);
match r_account_maps.insert_new_entry_if_missing_with_lock(pubkey, new_entry) {
InsertNewEntryResults::DidNotExist => {
num_did_not_exist += 1;
@@ -1471,7 +1443,7 @@ impl + Into> AccountsIndex {
account: &impl ReadableAccount,
account_indexes: &AccountSecondaryIndexes,
account_info: T,
- reclaims: &mut SlotList,
+ reclaims: &mut ReclaimsSlotList,
reclaim: UpsertReclaim,
) {
// vast majority of updates are to item already in accounts index, so store as raw to avoid unnecessary allocations
@@ -1531,7 +1503,7 @@ impl + Into> AccountsIndex {
fn purge_older_root_entries(
&self,
slot_list: &mut SlotList,
- reclaims: &mut SlotList,
+ reclaims: &mut ReclaimsSlotList,
max_clean_root_inclusive: Option,
) {
if slot_list.len() <= 1 {
@@ -1572,7 +1544,7 @@ impl + Into> AccountsIndex {
pub fn clean_rooted_entries(
&self,
pubkey: &Pubkey,
- reclaims: &mut SlotList,
+ reclaims: &mut ReclaimsSlotList,
max_clean_root_inclusive: Option,
) -> bool {
let mut is_slot_list_empty = false;
@@ -1603,7 +1575,7 @@ impl + Into> AccountsIndex {
fn clean_and_unref_slot_list_on_startup(
&self,
entry: &AccountMapEntry,
- reclaims: &mut SlotList,
+ reclaims: &mut ReclaimsSlotList,
) -> (u64, T) {
let mut slot_list = entry.slot_list.write().unwrap();
let max_slot = slot_list
@@ -1648,9 +1620,8 @@ impl + Into> AccountsIndex {
pub fn clean_and_unref_rooted_entries_by_bin(
&self,
pubkeys_by_bin: &[Pubkey],
- callback: impl Fn(Slot, T),
- ) -> SlotList {
- let mut reclaims = Vec::new();
+ ) -> ReclaimsSlotList {
+ let mut reclaims = ReclaimsSlotList::new();
let map = match pubkeys_by_bin.first() {
Some(pubkey) => self.get_bin(pubkey),
@@ -1660,9 +1631,7 @@ impl + Into> AccountsIndex {
for pubkey in pubkeys_by_bin {
map.get_internal_inner(pubkey, |entry| {
let entry = entry.expect("Expected entry to exist in accounts index");
- let (slot, account_info) =
- self.clean_and_unref_slot_list_on_startup(entry, &mut reclaims);
- callback(slot, account_info);
+ self.clean_and_unref_slot_list_on_startup(entry, &mut reclaims);
(false, ())
});
}
@@ -1795,6 +1764,7 @@ pub mod tests {
Bound::{Excluded, Included, Unbounded},
RangeInclusive,
},
+ test_case::test_matrix,
};
pub enum SecondaryIndexTypes<'a> {
@@ -1855,7 +1825,7 @@ pub mod tests {
age: AtomicAge::new(entry.age()),
};
PreAllocatedAccountMapEntry::Entry(Arc::new(AccountMapEntry::new(
- vec![(slot, account_info)],
+ SlotList::from([(slot, account_info)]),
entry.ref_count(),
meta,
)))
@@ -1875,12 +1845,14 @@ pub mod tests {
assert!(!index.contains_with(key, None, None));
let mut num = 0;
- index.unchecked_scan_accounts(
- "",
- &ancestors,
- |_pubkey, _index| num += 1,
- &ScanConfig::default(),
- );
+ index
+ .scan_accounts(
+ &ancestors,
+ 0,
+ |_pubkey, _index| num += 1,
+ &ScanConfig::default(),
+ )
+ .expect("scan should succeed");
assert_eq!(num, 0);
}
@@ -1935,7 +1907,7 @@ pub mod tests {
fn test_insert_no_ancestors() {
let key = solana_pubkey::new_rand();
let index = AccountsIndex::::default_for_tests();
- let mut gc = Vec::new();
+ let mut gc = ReclaimsSlotList::new();
index.upsert(
0,
0,
@@ -1953,12 +1925,14 @@ pub mod tests {
assert!(!index.contains_with(&key, None, None));
let mut num = 0;
- index.unchecked_scan_accounts(
- "",
- &ancestors,
- |_pubkey, _index| num += 1,
- &ScanConfig::default(),
- );
+ index
+ .scan_accounts(
+ &ancestors,
+ 0,
+ |_pubkey, _index| num += 1,
+ &ScanConfig::default(),
+ )
+ .expect("scan should succeed");
assert_eq!(num, 0);
}
@@ -2015,22 +1989,26 @@ pub mod tests {
assert!(!index.contains_with(pubkey, None, None));
let mut num = 0;
- index.unchecked_scan_accounts(
- "",
- &ancestors,
- |_pubkey, _index| num += 1,
- &ScanConfig::default(),
- );
+ index
+ .scan_accounts(
+ &ancestors,
+ 0,
+ |_pubkey, _index| num += 1,
+ &ScanConfig::default(),
+ )
+ .expect("scan should succeed");
assert_eq!(num, 0);
ancestors.insert(slot, 0);
assert!(index.contains_with(pubkey, Some(&ancestors), None));
assert_eq!(index.ref_count_from_storage(pubkey), 1);
- index.unchecked_scan_accounts(
- "",
- &ancestors,
- |_pubkey, _index| num += 1,
- &ScanConfig::default(),
- );
+ index
+ .scan_accounts(
+ &ancestors,
+ 0,
+ |_pubkey, _index| num += 1,
+ &ScanConfig::default(),
+ )
+ .expect("scan should succeed");
assert_eq!(num, 1);
// not zero lamports
@@ -2048,22 +2026,26 @@ pub mod tests {
assert!(!index.contains_with(pubkey, None, None));
let mut num = 0;
- index.unchecked_scan_accounts(
- "",
- &ancestors,
- |_pubkey, _index| num += 1,
- &ScanConfig::default(),
- );
+ index
+ .scan_accounts(
+ &ancestors,
+ 0,
+ |_pubkey, _index| num += 1,
+ &ScanConfig::default(),
+ )
+ .expect("scan should succeed");
assert_eq!(num, 0);
ancestors.insert(slot, 0);
assert!(index.contains_with(pubkey, Some(&ancestors), None));
assert_eq!(index.ref_count_from_storage(pubkey), 1);
- index.unchecked_scan_accounts(
- "",
- &ancestors,
- |_pubkey, _index| num += 1,
- &ScanConfig::default(),
- );
+ index
+ .scan_accounts(
+ &ancestors,
+ 0,
+ |_pubkey, _index| num += 1,
+ &ScanConfig::default(),
+ )
+ .expect("scan should succeed");
assert_eq!(num, 1);
}
@@ -2091,8 +2073,7 @@ pub mod tests {
let index: AccountsIndex = AccountsIndex::::default_for_tests();
let pubkeys_by_bin: Vec = vec![];
- let reclaims =
- index.clean_and_unref_rooted_entries_by_bin(&pubkeys_by_bin, |_slot, _info| {});
+ let reclaims = index.clean_and_unref_rooted_entries_by_bin(&pubkeys_by_bin);
assert!(reclaims.is_empty());
}
@@ -2104,7 +2085,7 @@ pub mod tests {
let slot = 0;
let account_info = true;
- let mut gc = Vec::new();
+ let mut gc = ReclaimsSlotList::new();
index.upsert(
slot,
slot,
@@ -2118,10 +2099,7 @@ pub mod tests {
assert!(gc.is_empty());
- let reclaims = index.clean_and_unref_rooted_entries_by_bin(&[pubkey], |slot, info| {
- assert_eq!(slot, 0);
- assert!(info);
- });
+ let reclaims = index.clean_and_unref_rooted_entries_by_bin(&[pubkey]);
assert_eq!(reclaims.len(), 0);
}
@@ -2135,7 +2113,7 @@ pub mod tests {
let account_info1 = 0;
let account_info2 = 1;
- let mut gc = Vec::new();
+ let mut gc = ReclaimsSlotList::new();
for (slot, account_info) in [(slot1, account_info1), (slot2, account_info2)] {
index.upsert(
slot,
@@ -2151,12 +2129,8 @@ pub mod tests {
assert!(gc.is_empty());
- let reclaims = index.clean_and_unref_rooted_entries_by_bin(&[pubkey], |slot, info| {
- assert_eq!(slot, slot2);
- assert_eq!(info, account_info2);
- });
-
- assert_eq!(reclaims, vec![(slot1, account_info1)]);
+ let reclaims = index.clean_and_unref_rooted_entries_by_bin(&[pubkey]);
+ assert_eq!(reclaims, ReclaimsSlotList::from([(slot1, account_info1)]));
}
#[test]
@@ -2164,8 +2138,8 @@ pub mod tests {
let index: AccountsIndex = AccountsIndex::::default_for_tests();
let bin_index = 0;
let mut pubkeys = Vec::new();
- let mut expected_reclaims = Vec::new();
- let mut gc: Vec<(u64, bool)> = Vec::new();
+ let mut expected_reclaims = ReclaimsSlotList::new();
+ let mut gc = ReclaimsSlotList::new();
while pubkeys.len() < 10 {
let new_pubkey = solana_pubkey::new_rand();
@@ -2198,7 +2172,7 @@ pub mod tests {
assert!(gc.is_empty());
- let mut reclaims = index.clean_and_unref_rooted_entries_by_bin(&pubkeys, |_slot, _info| {});
+ let mut reclaims = index.clean_and_unref_rooted_entries_by_bin(&pubkeys);
reclaims.sort_unstable();
expected_reclaims.sort_unstable();
@@ -2281,10 +2255,10 @@ pub mod tests {
fn test_new_entry_code_paths_helper(
account_infos: [T; 2],
is_cached: bool,
- upsert: bool,
+ upsert_method: Option,
use_disk: bool,
) {
- if is_cached && !upsert {
+ if is_cached && upsert_method.is_none() {
// This is an illegal combination when we are using queued lazy inserts.
// Cached items don't ever leave the in-mem cache.
// But the queued lazy insert code relies on there being nothing in the in-mem cache.
@@ -2302,27 +2276,30 @@ pub mod tests {
IndexLimitMb::InMemOnly // in-mem only
};
let index = AccountsIndex::::new(&config, Arc::default());
- let mut gc = Vec::new();
+ let mut gc = ReclaimsSlotList::new();
- if upsert {
- // insert first entry for pubkey. This will use new_entry_after_update and not call update.
- index.upsert(
- slot0,
- slot0,
- &key,
- &AccountSharedData::default(),
- &AccountSecondaryIndexes::default(),
- account_infos[0],
- &mut gc,
- UPSERT_RECLAIM_TEST_DEFAULT,
- );
- } else {
- let items = vec![(key, account_infos[0])];
- index.set_startup(Startup::Startup);
- let expected_len = items.len();
- let (_, result) = index.insert_new_if_missing_into_primary_index(slot0, items);
- assert_eq!(result.count, expected_len);
- index.set_startup(Startup::Normal);
+ match upsert_method {
+ Some(upsert_method) => {
+ // insert first entry for pubkey. This will use new_entry_after_update and not call update.
+ index.upsert(
+ slot0,
+ slot0,
+ &key,
+ &AccountSharedData::default(),
+ &AccountSecondaryIndexes::default(),
+ account_infos[0],
+ &mut gc,
+ upsert_method,
+ );
+ }
+ None => {
+ let items = vec![(key, account_infos[0])];
+ index.set_startup(Startup::Startup);
+ let expected_len = items.len();
+ let (_, result) = index.insert_new_if_missing_into_primary_index(slot0, items);
+ assert_eq!(result.count, expected_len);
+ index.set_startup(Startup::Normal);
+ }
}
assert!(gc.is_empty());
@@ -2330,7 +2307,7 @@ pub mod tests {
{
let entry = index.get_cloned(&key).unwrap();
let slot_list = entry.slot_list.read().unwrap();
- assert_eq!(entry.ref_count(), u64::from(!is_cached));
+ assert_eq!(entry.ref_count(), RefCount::from(!is_cached));
assert_eq!(slot_list.as_slice(), &[(slot0, account_infos[0])]);
let new_entry = PreAllocatedAccountMapEntry::new(
slot0,
@@ -2345,43 +2322,63 @@ pub mod tests {
);
}
- // insert second entry for pubkey. This will use update and NOT use new_entry_after_update.
- if upsert {
- index.upsert(
- slot1,
- slot1,
- &key,
- &AccountSharedData::default(),
- &AccountSecondaryIndexes::default(),
- account_infos[1],
- &mut gc,
- UPSERT_RECLAIM_TEST_DEFAULT,
- );
- } else {
- // this has the effect of aging out everything in the in-mem cache
- for _ in 0..5 {
+ match upsert_method {
+ Some(upsert_method) => {
+ // insert second entry for pubkey. This will use update and NOT use new_entry_after_update.
+ index.upsert(
+ slot1,
+ slot1,
+ &key,
+ &AccountSharedData::default(),
+ &AccountSecondaryIndexes::default(),
+ account_infos[1],
+ &mut gc,
+ upsert_method,
+ );
+ }
+ None => {
+ // this has the effect of aging out everything in the in-mem cache
+ for _ in 0..5 {
+ index.set_startup(Startup::Startup);
+ index.set_startup(Startup::Normal);
+ }
+
+ let items = vec![(key, account_infos[1])];
index.set_startup(Startup::Startup);
+ let expected_len = items.len();
+ let (_, result) = index.insert_new_if_missing_into_primary_index(slot1, items);
+ assert_eq!(result.count, expected_len);
index.set_startup(Startup::Normal);
}
+ }
+
+ // There should be reclaims if entries are uncached and old slots are being reclaimed
+ let should_have_reclaims =
+ upsert_method == Some(UpsertReclaim::ReclaimOldSlots) && !is_cached;
- let items = vec![(key, account_infos[1])];
- index.set_startup(Startup::Startup);
- let expected_len = items.len();
- let (_, result) = index.insert_new_if_missing_into_primary_index(slot1, items);
- assert_eq!(result.count, expected_len);
- index.set_startup(Startup::Normal);
+ if should_have_reclaims {
+ assert!(!gc.is_empty());
+ assert_eq!(gc.len(), 1);
+ assert_eq!(gc[0], (slot0, account_infos[0]));
+ } else {
+ assert!(gc.is_empty());
}
- assert!(gc.is_empty());
+
index.populate_and_retrieve_duplicate_keys_from_startup(|_slot_keys| {});
let entry = index.get_cloned(&key).unwrap();
let slot_list = entry.slot_list.read().unwrap();
- assert_eq!(entry.ref_count(), if is_cached { 0 } else { 2 });
- assert_eq!(
- slot_list.as_slice(),
- &[(slot0, account_infos[0]), (slot1, account_infos[1])],
- );
+ if should_have_reclaims {
+ assert_eq!(entry.ref_count(), 1);
+ assert_eq!(slot_list.as_slice(), &[(slot1, account_infos[1])],);
+ } else {
+ assert_eq!(entry.ref_count(), if is_cached { 0 } else { 2 });
+ assert_eq!(
+ slot_list.as_slice(),
+ &[(slot0, account_infos[0]), (slot1, account_infos[1])],
+ );
+ }
let new_entry = PreAllocatedAccountMapEntry::new(
slot1,
@@ -2389,19 +2386,26 @@ pub mod tests {
&index.storage.storage,
false,
);
- assert_eq!(slot_list[1], new_entry.into());
- }
- #[test]
- fn test_new_entry_and_update_code_paths() {
- for use_disk in [false, true] {
- for is_upsert in &[false, true] {
- // account_info type that IS cached
- test_new_entry_code_paths_helper([1.0, 2.0], true, *is_upsert, use_disk);
+ assert_eq!(slot_list.last().unwrap(), &new_entry.into());
+ }
- // account_info type that is NOT cached
- test_new_entry_code_paths_helper([1, 2], false, *is_upsert, use_disk);
- }
+ #[test_matrix(
+ [false, true],
+ [None, Some(UpsertReclaim::PopulateReclaims), Some(UpsertReclaim::ReclaimOldSlots)],
+ [true, false]
+ )]
+ fn test_new_entry_and_update_code_paths(
+ use_disk: bool,
+ upsert_method: Option,
+ is_cached: bool,
+ ) {
+ if is_cached {
+ // account_info type that IS cached
+ test_new_entry_code_paths_helper([1.0, 2.0], true, upsert_method, use_disk);
+ } else {
+ // account_info type that is NOT cached
+ test_new_entry_code_paths_helper([1, 2], false, upsert_method, use_disk);
}
}
@@ -2423,7 +2427,7 @@ pub mod tests {
&key,
new_entry,
None,
- &mut SlotList::default(),
+ &mut ReclaimsSlotList::default(),
UPSERT_RECLAIM_TEST_DEFAULT,
);
assert_eq!(1, account_maps_stats_len(&index));
@@ -2433,21 +2437,25 @@ pub mod tests {
assert!(!index.contains_with(&key, None, None));
let mut num = 0;
- index.unchecked_scan_accounts(
- "",
- &ancestors,
- |_pubkey, _index| num += 1,
- &ScanConfig::default(),
- );
+ index
+ .scan_accounts(
+ &ancestors,
+ 0,
+ |_pubkey, _index| num += 1,
+ &ScanConfig::default(),
+ )
+ .expect("scan should succeed");
assert_eq!(num, 0);
ancestors.insert(slot, 0);
assert!(index.contains_with(&key, Some(&ancestors), None));
- index.unchecked_scan_accounts(
- "",
- &ancestors,
- |_pubkey, _index| num += 1,
- &ScanConfig::default(),
- );
+ index
+ .scan_accounts(
+ &ancestors,
+ 0,
+ |_pubkey, _index| num += 1,
+ &ScanConfig::default(),
+ )
+ .expect("scan should succeed");
assert_eq!(num, 1);
}
@@ -2455,7 +2463,7 @@ pub mod tests {
fn test_insert_wrong_ancestors() {
let key = solana_pubkey::new_rand();
let index = AccountsIndex::::default_for_tests();
- let mut gc = Vec::new();
+ let mut gc = ReclaimsSlotList::new();
index.upsert(
0,
0,
@@ -2472,12 +2480,14 @@ pub mod tests {
assert!(!index.contains_with(&key, Some(&ancestors), None));
let mut num = 0;
- index.unchecked_scan_accounts(
- "",
- &ancestors,
- |_pubkey, _index| num += 1,
- &ScanConfig::default(),
- );
+ index
+ .scan_accounts(
+ &ancestors,
+ 0,
+ |_pubkey, _index| num += 1,
+ &ScanConfig::default(),
+ )
+ .expect("scan should succeed");
assert_eq!(num, 0);
}
#[test]
@@ -2486,7 +2496,7 @@ pub mod tests {
// non-cached
let key = solana_pubkey::new_rand();
let index = AccountsIndex::::default_for_tests();
- let mut reclaims = Vec::new();
+ let mut reclaims = ReclaimsSlotList::new();
let slot = 0;
let value = 1;
assert!(!value.is_cached());
@@ -2532,7 +2542,7 @@ pub mod tests {
// cached
let key = solana_pubkey::new_rand();
let index = AccountsIndex::::default_for_tests();
- let mut reclaims = Vec::new();
+ let mut reclaims = ReclaimsSlotList::new();
let slot = 0;
let value = 1.0;
assert!(value.is_cached());
@@ -2579,7 +2589,7 @@ pub mod tests {
fn test_insert_with_ancestors() {
let key = solana_pubkey::new_rand();
let index = AccountsIndex::::default_for_tests();
- let mut gc = Vec::new();
+ let mut gc = ReclaimsSlotList::new();
index.upsert(
0,
0,
@@ -2608,17 +2618,20 @@ pub mod tests {
let mut num = 0;
let mut found_key = false;
- index.unchecked_scan_accounts(
- "",
- &ancestors,
- |pubkey, _index| {
- if pubkey == &key {
- found_key = true
- };
- num += 1
- },
- &ScanConfig::default(),
- );
+ index
+ .scan_accounts(
+ &ancestors,
+ 0,
+ |pubkey, _index| {
+ if pubkey == &key {
+ found_key = true
+ };
+ num += 1
+ },
+ &ScanConfig::default(),
+ )
+ .expect("scan should succeed");
+
assert_eq!(num, 1);
assert!(found_key);
}
@@ -2636,7 +2649,7 @@ pub mod tests {
&AccountSharedData::default(),
&AccountSecondaryIndexes::default(),
true,
- &mut vec![],
+ &mut ReclaimsSlotList::new(),
UPSERT_RECLAIM_TEST_DEFAULT,
);
new_pubkey
@@ -2653,7 +2666,7 @@ pub mod tests {
&AccountSharedData::default(),
&AccountSecondaryIndexes::default(),
true,
- &mut vec![],
+ &mut ReclaimsSlotList::new(),
UPSERT_RECLAIM_TEST_DEFAULT,
);
}
@@ -2665,17 +2678,18 @@ pub mod tests {
fn run_test_scan_accounts(num_pubkeys: usize) {
let (index, _) = setup_accounts_index_keys(num_pubkeys);
- let ancestors = Ancestors::default();
let mut scanned_keys = HashSet::new();
- index.unchecked_scan_accounts(
- "",
- &ancestors,
- |pubkey, _index| {
- scanned_keys.insert(*pubkey);
- },
- &ScanConfig::default(),
- );
+ index
+ .scan_accounts(
+ &Ancestors::default(),
+ 0,
+ |pubkey, _index| {
+ scanned_keys.insert(*pubkey);
+ },
+ &ScanConfig::default(),
+ )
+ .expect("scan should succeed");
assert_eq!(scanned_keys.len(), num_pubkeys);
}
@@ -2700,7 +2714,7 @@ pub mod tests {
fn test_insert_with_root() {
let key = solana_pubkey::new_rand();
let index = AccountsIndex::::default_for_tests();
- let mut gc = Vec::new();
+ let mut gc = ReclaimsSlotList::new();
index.upsert(
0,
0,
@@ -2748,7 +2762,7 @@ pub mod tests {
let key = solana_pubkey::new_rand();
let index = AccountsIndex::::default_for_tests();
let ancestors = vec![(0, 0)].into_iter().collect();
- let mut gc = Vec::new();
+ let mut gc = ReclaimsSlotList::new();
index.upsert(
0,
0,
@@ -2773,7 +2787,7 @@ pub mod tests {
)
.unwrap();
- let mut gc = Vec::new();
+ let mut gc = ReclaimsSlotList::new();
index.upsert(
0,
0,
@@ -2784,7 +2798,7 @@ pub mod tests {
&mut gc,
UPSERT_RECLAIM_TEST_DEFAULT,
);
- assert_eq!(gc, vec![(0, 1)]);
+ assert_eq!(gc, ReclaimsSlotList::from([(0, 1)]));
index
.get_with_and_then(
&key,
@@ -2805,7 +2819,7 @@ pub mod tests {
let key = solana_pubkey::new_rand();
let index = AccountsIndex::::default_for_tests();
let ancestors = vec![(0, 0)].into_iter().collect();
- let mut gc = Vec::new();
+ let mut gc = ReclaimsSlotList::new();
index.upsert(
0,
0,
@@ -2814,7 +2828,7 @@ pub mod tests {
&AccountSecondaryIndexes::default(),
true,
&mut gc,
- UPSERT_RECLAIM_TEST_DEFAULT,
+ UpsertReclaim::PopulateReclaims,
);
assert!(gc.is_empty());
index.upsert(
@@ -2825,7 +2839,7 @@ pub mod tests {
&AccountSecondaryIndexes::default(),
false,
&mut gc,
- UPSERT_RECLAIM_TEST_DEFAULT,
+ UpsertReclaim::PopulateReclaims,
);
assert!(gc.is_empty());
index
@@ -2859,7 +2873,7 @@ pub mod tests {
fn test_update_gc_purged_slot() {
let key = solana_pubkey::new_rand();
let index = AccountsIndex::::default_for_tests();
- let mut gc = Vec::new();
+ let mut gc = ReclaimsSlotList::new();
index.upsert(
0,
0,
@@ -2868,7 +2882,7 @@ pub mod tests {
&AccountSecondaryIndexes::default(),
true,
&mut gc,
- UPSERT_RECLAIM_TEST_DEFAULT,
+ UpsertReclaim::PopulateReclaims,
);
assert!(gc.is_empty());
index.upsert(
@@ -2879,7 +2893,7 @@ pub mod tests {
&AccountSecondaryIndexes::default(),
false,
&mut gc,
- UPSERT_RECLAIM_TEST_DEFAULT,
+ UpsertReclaim::PopulateReclaims,
);
index.upsert(
2,
@@ -2889,7 +2903,7 @@ pub mod tests {
&AccountSecondaryIndexes::default(),
true,
&mut gc,
- UPSERT_RECLAIM_TEST_DEFAULT,
+ UpsertReclaim::PopulateReclaims,
);
index.upsert(
3,
@@ -2899,7 +2913,7 @@ pub mod tests {
&AccountSecondaryIndexes::default(),
true,
&mut gc,
- UPSERT_RECLAIM_TEST_DEFAULT,
+ UpsertReclaim::PopulateReclaims,
);
index.add_root(0);
index.add_root(1);
@@ -2912,12 +2926,12 @@ pub mod tests {
&AccountSecondaryIndexes::default(),
true,
&mut gc,
- UPSERT_RECLAIM_TEST_DEFAULT,
+ UpsertReclaim::PopulateReclaims,
);
// Updating index should not purge older roots, only purges
// previous updates within the same slot
- assert_eq!(gc, vec![]);
+ assert_eq!(gc, ReclaimsSlotList::new());
index
.get_with_and_then(&key, None, None, false, |(slot, account_info)| {
assert_eq!(slot, 3);
@@ -2927,22 +2941,84 @@ pub mod tests {
let mut num = 0;
let mut found_key = false;
- index.unchecked_scan_accounts(
- "",
- &Ancestors::default(),
- |pubkey, index| {
- if pubkey == &key {
- found_key = true;
- assert_eq!(index, (&true, 3));
- };
- num += 1
- },
- &ScanConfig::default(),
- );
+ index
+ .scan_accounts(
+ &Ancestors::default(),
+ 0,
+ |pubkey, index| {
+ if pubkey == &key {
+ found_key = true;
+ assert_eq!(index, (&true, 3));
+ };
+ num += 1
+ },
+ &ScanConfig::default(),
+ )
+ .expect("scan should succeed");
assert_eq!(num, 1);
assert!(found_key);
}
+ #[test]
+ fn test_upsert_reclaims() {
+ let key = solana_pubkey::new_rand();
+ let index =
+ AccountsIndex::::default_for_tests();
+ let mut reclaims = ReclaimsSlotList::new();
+ index.upsert(
+ 0,
+ 0,
+ &key,
+ &AccountSharedData::default(),
+ &AccountSecondaryIndexes::default(),
+ CacheableIndexValueTest(true),
+ &mut reclaims,
+ UPSERT_RECLAIM_TEST_DEFAULT,
+ );
+ // No reclaims should be returned on the first item
+ assert!(reclaims.is_empty());
+
+ index.upsert(
+ 0,
+ 0,
+ &key,
+ &AccountSharedData::default(),
+ &AccountSecondaryIndexes::default(),
+ CacheableIndexValueTest(false),
+ &mut reclaims,
+ UPSERT_RECLAIM_TEST_DEFAULT,
+ );
+ // Cached item should not be reclaimed
+ assert!(reclaims.is_empty());
+
+ // Slot list should only have a single entry
+ // Using brackets to limit scope of read lock
+ {
+ let entry = index.get_cloned(&key).unwrap();
+ let slot_list = entry.slot_list.read().unwrap();
+ assert_eq!(slot_list.len(), 1);
+ }
+
+ index.upsert(
+ 0,
+ 0,
+ &key,
+ &AccountSharedData::default(),
+ &AccountSecondaryIndexes::default(),
+ CacheableIndexValueTest(false),
+ &mut reclaims,
+ UPSERT_RECLAIM_TEST_DEFAULT,
+ );
+
+ // Uncached item should be returned as reclaim
+ assert!(!reclaims.is_empty());
+
+ // Slot list should only have a single entry
+ let entry = index.get_cloned(&key).unwrap();
+ let slot_list = entry.slot_list.read().unwrap();
+ assert_eq!(slot_list.len(), 1);
+ }
+
fn account_maps_stats_len(index: &AccountsIndex) -> usize {
index.storage.storage.stats.total_count()
}
@@ -2951,7 +3027,7 @@ pub mod tests {
fn test_purge() {
let key = solana_pubkey::new_rand();
let index = AccountsIndex::::default_for_tests();
- let mut gc = Vec::new();
+ let mut gc = ReclaimsSlotList::new();
assert_eq!(0, account_maps_stats_len(&index));
index.upsert(
1,
@@ -2978,11 +3054,11 @@ pub mod tests {
assert_eq!(1, account_maps_stats_len(&index));
let purges = index.purge_roots(&key);
- assert_eq!(purges, (vec![], false));
+ assert_eq!(purges, (SlotList::new(), false));
index.add_root(1);
let purges = index.purge_roots(&key);
- assert_eq!(purges, (vec![(1, 10)], true));
+ assert_eq!(purges, (SlotList::from([(1, 10)]), true));
assert_eq!(1, account_maps_stats_len(&index));
index.upsert(
@@ -3081,7 +3157,7 @@ pub mod tests {
),
secondary_indexes,
true,
- &mut vec![],
+ &mut ReclaimsSlotList::new(),
UPSERT_RECLAIM_TEST_DEFAULT,
);
}
@@ -3105,15 +3181,191 @@ pub mod tests {
index.purge_exact(
&account_key,
- &slots.into_iter().collect::>(),
- &mut vec![],
+ slots.into_iter().collect::>(),
+ &mut ReclaimsSlotList::new(),
);
- let _ = index.handle_dead_keys(&[&account_key], secondary_indexes);
+ let _ = index.handle_dead_keys(&[account_key], secondary_indexes);
assert!(secondary_index.index.is_empty());
assert!(secondary_index.reverse_index.is_empty());
}
+ #[test]
+ fn test_reclaim_older_items_in_slot_list() {
+ solana_logger::setup();
+ let key = solana_pubkey::new_rand();
+ let index = AccountsIndex::::default_for_tests();
+ let mut gc = ReclaimsSlotList::new();
+ let reclaim_slot = 5;
+ let account_value = 50;
+
+ // Insert multiple older items into the slot list
+ for slot in 0..reclaim_slot {
+ index.upsert(
+ slot,
+ slot,
+ &key,
+ &AccountSharedData::default(),
+ &AccountSecondaryIndexes::default(),
+ slot,
+ &mut gc,
+ UpsertReclaim::IgnoreReclaims,
+ );
+ }
+ let entry = index.get_cloned(&key).unwrap();
+ assert_eq!(entry.slot_list.read().unwrap().len(), reclaim_slot as usize);
+
+ // Insert an item newer than the one that we will reclaim old slots on
+ index.upsert(
+ reclaim_slot + 1,
+ reclaim_slot + 1,
+ &key,
+ &AccountSharedData::default(),
+ &AccountSecondaryIndexes::default(),
+ account_value + 1,
+ &mut gc,
+ UpsertReclaim::IgnoreReclaims,
+ );
+ let entry = index.get_cloned(&key).unwrap();
+ assert_eq!(
+ entry.slot_list.read().unwrap().len(),
+ (reclaim_slot + 1) as usize
+ );
+
+ // Reclaim all older slots
+ index.upsert(
+ reclaim_slot,
+ reclaim_slot,
+ &key,
+ &AccountSharedData::default(),
+ &AccountSecondaryIndexes::default(),
+ account_value,
+ &mut gc,
+ UpsertReclaim::ReclaimOldSlots,
+ );
+
+ // Verify that older items are reclaimed
+ assert_eq!(gc.len(), reclaim_slot as usize);
+ for (slot, value) in gc.iter() {
+ assert!(*slot < reclaim_slot);
+ assert_eq!(*value, *slot);
+ }
+
+ // Verify that the item added is in in the slot list
+ let ancestors = vec![(reclaim_slot, 0)].into_iter().collect();
+ index
+ .get_with_and_then(
+ &key,
+ Some(&ancestors),
+ None,
+ false,
+ |(slot, account_info)| {
+ assert_eq!(slot, reclaim_slot);
+ assert_eq!(account_info, account_value);
+ },
+ )
+ .unwrap();
+
+ // Verify that the newer item remains in the slot list
+ let ancestors = vec![((reclaim_slot + 1), 0)].into_iter().collect();
+ index
+ .get_with_and_then(
+ &key,
+ Some(&ancestors),
+ None,
+ false,
+ |(slot, account_info)| {
+ assert_eq!(slot, reclaim_slot + 1);
+ assert_eq!(account_info, account_value + 1);
+ },
+ )
+ .unwrap();
+ }
+
+ #[test]
+ fn test_reclaim_do_not_reclaim_cached_other_slot() {
+ solana_logger::setup();
+ let key = solana_pubkey::new_rand();
+ let index =
+ AccountsIndex::::default_for_tests();
+ let mut gc = ReclaimsSlotList::new();
+
+ // Insert an uncached account at slot 0 and an cached account at slot 1
+ index.upsert(
+ 0,
+ 0,
+ &key,
+ &AccountSharedData::default(),
+ &AccountSecondaryIndexes::default(),
+ CacheableIndexValueTest(false),
+ &mut gc,
+ UpsertReclaim::IgnoreReclaims,
+ );
+
+ index.upsert(
+ 1,
+ 1,
+ &key,
+ &AccountSharedData::default(),
+ &AccountSecondaryIndexes::default(),
+ CacheableIndexValueTest(true),
+ &mut gc,
+ UpsertReclaim::IgnoreReclaims,
+ );
+
+ // Now insert a cached account at slot 2
+ index.upsert(
+ 2,
+ 2,
+ &key,
+ &AccountSharedData::default(),
+ &AccountSecondaryIndexes::default(),
+ CacheableIndexValueTest(true),
+ &mut gc,
+ UpsertReclaim::IgnoreReclaims,
+ );
+
+ // Replace the cached account at slot 2 with a uncached account
+ index.upsert(
+ 2,
+ 2,
+ &key,
+ &AccountSharedData::default(),
+ &AccountSecondaryIndexes::default(),
+ CacheableIndexValueTest(false),
+ &mut gc,
+ UpsertReclaim::ReclaimOldSlots,
+ );
+
+ // Verify that the slot list is length two and consists of the cached account at slot 1
+ // and the uncached account at slot 2
+ let entry = index.get_cloned(&key).unwrap();
+ assert_eq!(entry.slot_list.read().unwrap().len(), 2);
+ assert_eq!(
+ entry.slot_list.read().unwrap()[0],
+ PreAllocatedAccountMapEntry::new(
+ 1,
+ CacheableIndexValueTest(true),
+ &index.storage.storage,
+ false
+ )
+ .into()
+ );
+ assert_eq!(
+ entry.slot_list.read().unwrap()[1],
+ PreAllocatedAccountMapEntry::new(
+ 2,
+ CacheableIndexValueTest(false),
+ &index.storage.storage,
+ false
+ )
+ .into()
+ );
+ // Verify that the uncached account at slot 0 was reclaimed
+ assert_eq!(gc.len(), 1);
+ assert_eq!(gc[0], (0, CacheableIndexValueTest(false)));
+ }
+
#[test]
fn test_purge_exact_spl_token_mint_secondary_index() {
let (key_start, key_end, secondary_indexes) = create_spl_token_mint_secondary_index_state();
@@ -3145,68 +3397,77 @@ pub mod tests {
fn test_purge_older_root_entries() {
// No roots, should be no reclaims
let index = AccountsIndex::::default_for_tests();
- let mut slot_list = vec![(1, true), (2, true), (5, true), (9, true)];
- let mut reclaims = vec![];
+ let mut slot_list = SlotList::from_iter([(1, true), (2, true), (5, true), (9, true)]);
+ let mut reclaims = ReclaimsSlotList::new();
index.purge_older_root_entries(&mut slot_list, &mut reclaims, None);
assert!(reclaims.is_empty());
- assert_eq!(slot_list, vec![(1, true), (2, true), (5, true), (9, true)]);
+ assert_eq!(
+ slot_list,
+ SlotList::from_iter([(1, true), (2, true), (5, true), (9, true)])
+ );
// Add a later root, earlier slots should be reclaimed
- slot_list = vec![(1, true), (2, true), (5, true), (9, true)];
+ slot_list = SlotList::from_iter([(1, true), (2, true), (5, true), (9, true)]);
index.add_root(1);
// Note 2 is not a root
index.add_root(5);
- reclaims = vec![];
+ reclaims = ReclaimsSlotList::new();
index.purge_older_root_entries(&mut slot_list, &mut reclaims, None);
- assert_eq!(reclaims, vec![(1, true), (2, true)]);
- assert_eq!(slot_list, vec![(5, true), (9, true)]);
+ assert_eq!(reclaims, ReclaimsSlotList::from([(1, true), (2, true)]));
+ assert_eq!(slot_list, SlotList::from_iter([(5, true), (9, true)]));
// Add a later root that is not in the list, should not affect the outcome
- slot_list = vec![(1, true), (2, true), (5, true), (9, true)];
+ slot_list = SlotList::from_iter([(1, true), (2, true), (5, true), (9, true)]);
index.add_root(6);
- reclaims = vec![];
+ reclaims = ReclaimsSlotList::new();
index.purge_older_root_entries(&mut slot_list, &mut reclaims, None);
- assert_eq!(reclaims, vec![(1, true), (2, true)]);
- assert_eq!(slot_list, vec![(5, true), (9, true)]);
+ assert_eq!(reclaims, ReclaimsSlotList::from([(1, true), (2, true)]));
+ assert_eq!(slot_list, SlotList::from_iter([(5, true), (9, true)]));
// Pass a max root >= than any root in the slot list, should not affect
// outcome
- slot_list = vec![(1, true), (2, true), (5, true), (9, true)];
- reclaims = vec![];
+ slot_list = SlotList::from_iter([(1, true), (2, true), (5, true), (9, true)]);
+ reclaims = ReclaimsSlotList::new();
index.purge_older_root_entries(&mut slot_list, &mut reclaims, Some(6));
- assert_eq!(reclaims, vec![(1, true), (2, true)]);
- assert_eq!(slot_list, vec![(5, true), (9, true)]);
+ assert_eq!(reclaims, ReclaimsSlotList::from([(1, true), (2, true)]));
+ assert_eq!(slot_list, SlotList::from_iter([(5, true), (9, true)]));
// Pass a max root, earlier slots should be reclaimed
- slot_list = vec![(1, true), (2, true), (5, true), (9, true)];
- reclaims = vec![];
+ slot_list = SlotList::from_iter([(1, true), (2, true), (5, true), (9, true)]);
+ reclaims = ReclaimsSlotList::new();
index.purge_older_root_entries(&mut slot_list, &mut reclaims, Some(5));
- assert_eq!(reclaims, vec![(1, true), (2, true)]);
- assert_eq!(slot_list, vec![(5, true), (9, true)]);
+ assert_eq!(reclaims, ReclaimsSlotList::from([(1, true), (2, true)]));
+ assert_eq!(slot_list, SlotList::from_iter([(5, true), (9, true)]));
// Pass a max root 2. This means the latest root < 2 is 1 because 2 is not a root
// so nothing will be purged
- slot_list = vec![(1, true), (2, true), (5, true), (9, true)];
- reclaims = vec![];
+ slot_list = SlotList::from_iter([(1, true), (2, true), (5, true), (9, true)]);
+ reclaims = ReclaimsSlotList::new();
index.purge_older_root_entries(&mut slot_list, &mut reclaims, Some(2));
assert!(reclaims.is_empty());
- assert_eq!(slot_list, vec![(1, true), (2, true), (5, true), (9, true)]);
+ assert_eq!(
+ slot_list,
+ SlotList::from_iter([(1, true), (2, true), (5, true), (9, true)])
+ );
// Pass a max root 1. This means the latest root < 3 is 1 because 2 is not a root
// so nothing will be purged
- slot_list = vec![(1, true), (2, true), (5, true), (9, true)];
- reclaims = vec![];
+ slot_list = SlotList::from_iter([(1, true), (2, true), (5, true), (9, true)]);
+ reclaims = ReclaimsSlotList::new();
index.purge_older_root_entries(&mut slot_list, &mut reclaims, Some(1));
assert!(reclaims.is_empty());
- assert_eq!(slot_list, vec![(1, true), (2, true), (5, true), (9, true)]);
+ assert_eq!(
+ slot_list,
+ SlotList::from_iter([(1, true), (2, true), (5, true), (9, true)])
+ );
// Pass a max root that doesn't exist in the list but is greater than
// some of the roots in the list, shouldn't return those smaller roots
- slot_list = vec![(1, true), (2, true), (5, true), (9, true)];
- reclaims = vec![];
+ slot_list = SlotList::from_iter([(1, true), (2, true), (5, true), (9, true)]);
+ reclaims = ReclaimsSlotList::new();
index.purge_older_root_entries(&mut slot_list, &mut reclaims, Some(7));
- assert_eq!(reclaims, vec![(1, true), (2, true)]);
- assert_eq!(slot_list, vec![(5, true), (9, true)]);
+ assert_eq!(reclaims, ReclaimsSlotList::from([(1, true), (2, true)]));
+ assert_eq!(slot_list, SlotList::from_iter([(5, true), (9, true)]));
}
fn check_secondary_index_mapping_correct(
@@ -3256,7 +3517,7 @@ pub mod tests {
&AccountSharedData::create(0, account_data.to_vec(), Pubkey::default(), false, 0),
&secondary_indexes,
true,
- &mut vec![],
+ &mut ReclaimsSlotList::new(),
UPSERT_RECLAIM_TEST_DEFAULT,
);
assert!(secondary_index.index.is_empty());
@@ -3270,7 +3531,7 @@ pub mod tests {
&AccountSharedData::create(0, account_data[1..].to_vec(), *token_id, false, 0),
&secondary_indexes,
true,
- &mut vec![],
+ &mut ReclaimsSlotList::new(),
UPSERT_RECLAIM_TEST_DEFAULT,
);
assert!(secondary_index.index.is_empty());
@@ -3328,7 +3589,7 @@ pub mod tests {
index.slot_list_mut(&account_key, |slot_list| slot_list.clear());
// Everything should be deleted
- let _ = index.handle_dead_keys(&[&account_key], &secondary_indexes);
+ let _ = index.handle_dead_keys(&[account_key], &secondary_indexes);
assert!(secondary_index.index.is_empty());
assert!(secondary_index.reverse_index.is_empty());
}
@@ -3395,7 +3656,7 @@ pub mod tests {
&AccountSharedData::create(0, account_data1.to_vec(), *token_id, false, 0),
secondary_indexes,
true,
- &mut vec![],
+ &mut ReclaimsSlotList::new(),
UPSERT_RECLAIM_TEST_DEFAULT,
);
@@ -3407,7 +3668,7 @@ pub mod tests {
&AccountSharedData::create(0, account_data2.to_vec(), *token_id, false, 0),
secondary_indexes,
true,
- &mut vec![],
+ &mut ReclaimsSlotList::new(),
UPSERT_RECLAIM_TEST_DEFAULT,
);
@@ -3427,7 +3688,7 @@ pub mod tests {
&AccountSharedData::create(0, account_data1.to_vec(), *token_id, false, 0),
secondary_indexes,
true,
- &mut vec![],
+ &mut ReclaimsSlotList::new(),
UPSERT_RECLAIM_TEST_DEFAULT,
);
assert_eq!(secondary_index.get(&secondary_key1), vec![account_key]);
@@ -3437,7 +3698,7 @@ pub mod tests {
// so both secondary keys will still be kept alive.
index.add_root(later_slot);
index.slot_list_mut(&account_key, |slot_list| {
- index.purge_older_root_entries(slot_list, &mut vec![], None)
+ index.purge_older_root_entries(slot_list, &mut ReclaimsSlotList::new(), None)
});
check_secondary_index_mapping_correct(
@@ -3448,9 +3709,9 @@ pub mod tests {
// Removing the remaining entry for this pubkey in the index should mark the
// pubkey as dead and finally remove all the secondary indexes
- let mut reclaims = vec![];
- index.purge_exact(&account_key, &later_slot, &mut reclaims);
- let _ = index.handle_dead_keys(&[&account_key], secondary_indexes);
+ let mut reclaims = ReclaimsSlotList::new();
+ index.purge_exact(&account_key, later_slot, &mut reclaims);
+ let _ = index.handle_dead_keys(&[account_key], secondary_indexes);
assert!(secondary_index.index.is_empty());
assert!(secondary_index.reverse_index.is_empty());
}
@@ -3513,6 +3774,24 @@ pub mod tests {
}
}
+ /// Type that supports caching for tests. Used to test upsert behaviour
+ /// when the slot list has mixed cached and uncached items.
+ #[derive(Default, Debug, Clone, Copy, PartialEq, Eq)]
+ struct CacheableIndexValueTest(bool);
+ impl IndexValue for CacheableIndexValueTest {}
+ impl DiskIndexValue for CacheableIndexValueTest {}
+ impl IsCached for CacheableIndexValueTest {
+ fn is_cached(&self) -> bool {
+ // Return self value as whether the item is cached or not
+ self.0
+ }
+ }
+ impl IsZeroLamport for CacheableIndexValueTest {
+ fn is_zero_lamport(&self) -> bool {
+ false
+ }
+ }
+
#[test]
fn test_get_newest_root_in_slot_list() {
let index = AccountsIndex::::default_for_tests();
@@ -3586,7 +3865,16 @@ pub mod tests {
impl AccountsIndex {
fn upsert_simple_test(&self, key: &Pubkey, slot: Slot, value: T) {
- let mut gc = Vec::new();
+ let mut gc = ReclaimsSlotList::new();
+
+ // It is invalid to reclaim older slots if the slot being upserted
+ // is unrooted
+ let reclaim_method = if self.is_alive_root(slot) {
+ UPSERT_RECLAIM_TEST_DEFAULT
+ } else {
+ UpsertReclaim::IgnoreReclaims
+ };
+
self.upsert(
slot,
slot,
@@ -3595,7 +3883,7 @@ pub mod tests {
&AccountSecondaryIndexes::default(),
value,
&mut gc,
- UPSERT_RECLAIM_TEST_DEFAULT,
+ reclaim_method,
);
assert!(gc.is_empty());
}
@@ -3651,7 +3939,7 @@ pub mod tests {
let index = AccountsIndex::::default_for_tests();
let slot1 = 1;
- let mut gc = Vec::new();
+ let mut gc = ReclaimsSlotList::new();
// return true if we don't know anything about 'key_unknown'
// the item did not exist in the accounts index at all, so index is up to date
assert!(index.clean_rooted_entries(&key_unknown, &mut gc, None));
@@ -3666,9 +3954,9 @@ pub mod tests {
// this will delete the entry because it is <= max_root_inclusive and NOT a root
// note this has to be slot2 because of inclusive vs exclusive in the call to can_purge_older_entries
{
- let mut gc = Vec::new();
+ let mut gc = ReclaimsSlotList::new();
assert!(index.clean_rooted_entries(&key, &mut gc, Some(slot2)));
- assert_eq!(gc, vec![(slot1, value)]);
+ assert_eq!(gc, ReclaimsSlotList::from([(slot1, value)]));
}
// re-add it
@@ -3699,7 +3987,7 @@ pub mod tests {
{
{
let roots_tracker = &index.roots_tracker.read().unwrap();
- let slot_list = vec![(slot2, value)];
+ let slot_list = SlotList::from([(slot2, value)]);
assert_eq!(
0,
AccountsIndex::::get_newest_root_in_slot_list(
@@ -3712,7 +4000,7 @@ pub mod tests {
index.add_root(slot2);
{
let roots_tracker = &index.roots_tracker.read().unwrap();
- let slot_list = vec![(slot2, value)];
+ let slot_list = SlotList::from([(slot2, value)]);
assert_eq!(
slot2,
AccountsIndex::::get_newest_root_in_slot_list(
@@ -3734,12 +4022,12 @@ pub mod tests {
assert!(gc.is_empty());
assert!(!index.clean_rooted_entries(&key, &mut gc, Some(slot2)));
- assert_eq!(gc, vec![(slot1, value)]);
+ assert_eq!(gc, ReclaimsSlotList::from([(slot1, value)]));
gc.clear();
index.clean_dead_slot(slot2);
let slot3 = 3;
assert!(index.clean_rooted_entries(&key, &mut gc, Some(slot3)));
- assert_eq!(gc, vec![(slot2, value)]);
+ assert_eq!(gc, ReclaimsSlotList::from([(slot2, value)]));
}
#[test]
@@ -3748,7 +4036,7 @@ pub mod tests {
let index = AccountsIndex::::default_for_tests();
assert_eq!(
- index.handle_dead_keys(&[&key], &AccountSecondaryIndexes::default()),
+ index.handle_dead_keys(&[key], &AccountSecondaryIndexes::default()),
vec![key].into_iter().collect::>()
);
}
diff --git a/accounts-db/src/accounts_index/account_map_entry.rs b/accounts-db/src/accounts_index/account_map_entry.rs
index 85b2a984286afa..edad7b78456cec 100644
--- a/accounts-db/src/accounts_index/account_map_entry.rs
+++ b/accounts-db/src/accounts_index/account_map_entry.rs
@@ -13,7 +13,7 @@ use {
/// one entry in the in-mem accounts index
/// Represents the value for an account key in the in-memory accounts index
-#[derive(Debug, Default)]
+#[derive(Debug)]
pub struct AccountMapEntry {
/// number of alive slots that contain >= 1 instances of account data for this pubkey
/// where alive represents a slot that has not yet been removed by clean via AccountsDB::clean_stored_dead_slots() for containing no up to date account information
@@ -34,12 +34,24 @@ impl AccountMapEntry {
meta,
}
}
+
+ #[cfg(test)]
+ pub(super) fn empty_for_tests() -> Self {
+ Self {
+ slot_list: RwLock::default(),
+ ref_count: AtomicRefCount::default(),
+ meta: AccountMapEntryMeta::default(),
+ }
+ }
+
pub fn ref_count(&self) -> RefCount {
self.ref_count.load(Ordering::Acquire)
}
pub fn addref(&self) {
- self.ref_count.fetch_add(1, Ordering::Release);
+ let previous = self.ref_count.fetch_add(1, Ordering::Release);
+ // ensure ref count does not overflow
+ assert_ne!(previous, RefCount::MAX);
self.set_dirty(true);
}
@@ -52,7 +64,7 @@ impl AccountMapEntry {
/// decrement the ref count by the passed in amount
/// return the refcount prior to the ref count change
- pub fn unref_by_count(&self, count: u64) -> RefCount {
+ pub fn unref_by_count(&self, count: RefCount) -> RefCount {
let previous = self.ref_count.fetch_sub(count, Ordering::Release);
self.set_dirty(true);
assert!(
@@ -109,7 +121,7 @@ pub struct AccountMapEntryMeta {
impl AccountMapEntryMeta {
pub fn new_dirty + Into>(
- storage: &Arc>,
+ storage: &BucketMapHolder,
is_cached: bool,
) -> Self {
AccountMapEntryMeta {
@@ -118,7 +130,7 @@ impl AccountMapEntryMeta {
}
}
pub fn new_clean + Into>(
- storage: &Arc>,
+ storage: &BucketMapHolder,
) -> Self {
AccountMapEntryMeta {
dirty: AtomicBool::new(false),
@@ -162,7 +174,7 @@ impl PreAllocatedAccountMapEntry {
pub fn new + Into>(
slot: Slot,
account_info: T,
- storage: &Arc