diff --git a/.github/env b/.github/env index f19d014b9aab1..b7c597fbcb45b 100644 --- a/.github/env +++ b/.github/env @@ -1 +1 @@ -IMAGE="docker.io/paritytech/ci-unified:bullseye-1.88.0-2025-06-27-v202509220255" +IMAGE="docker.io/paritytech/ci-unified:bullseye-1.88.0-2025-06-27-v202511141243" diff --git a/.github/scripts/process-differential-tests-report.py b/.github/scripts/process-differential-tests-report.py index daee379f0cb30..1a583be48cf31 100644 --- a/.github/scripts/process-differential-tests-report.py +++ b/.github/scripts/process-differential-tests-report.py @@ -225,7 +225,9 @@ def main() -> None: if status["status"] != "Failed": continue - failure_reason: str = status["reason"].replace("\n", " ") + failure_reason: str = ( + status["reason"].replace("\n", " ").replace("|", " ") + ) note: str = "" modes_where_this_case_succeeded: set[ModeString] = ( diff --git a/.github/workflows/check-semver.yml b/.github/workflows/check-semver.yml index c7c6f99d43a08..637145c4672bc 100644 --- a/.github/workflows/check-semver.yml +++ b/.github/workflows/check-semver.yml @@ -106,23 +106,67 @@ jobs: - name: Check semver if: ${{ github.ref != 'refs/heads/master' }} + shell: bash env: PRDOC_EXTRA_ARGS: ${{ env.PRDOC_EXTRA_ARGS }} PR: ${{ env.PR_NUMBER }} BASE_BRANCH: ${{ github.event.pull_request.base.ref }} + PR_LABELS: ${{ toJson(github.event.pull_request.labels.*.name) }} run: | if [ -z "$PR" ]; then echo "Skipping master/merge queue" exit 0 fi + # Skip semver check if PR targets stable branch and has R0-no-crate-publish-require label + if [[ "$BASE_BRANCH" =~ ^stable[0-9]{4}$ ]]; then + if echo "$PR_LABELS" | grep -q "R0-no-crate-publish-require"; then + echo "â„šī¸ Skipping the SemVer check is not recommended and should only be done in rare cases: PR targets stable branch '$BASE_BRANCH' and has 'R0-no-crate-publish-require' label." + exit 0 + fi + fi + export CARGO_TARGET_DIR=target export RUSTFLAGS='-A warnings -A missing_docs' export SKIP_WASM_BUILD=1 - if ! parity-publish --color always prdoc --since old --validate prdoc/pr_$PR.prdoc $PRDOC_EXTRA_ARGS -v --toolchain $TOOLCHAIN; then + prdoc_file="prdoc/pr_$PR.prdoc" + + # Always run parity-publish to check for all issues (mismatches and missing crates) + # Capture output to check for specific error types + parity_output=$(mktemp) + if ! parity-publish --color always prdoc --since old --validate prdoc/pr_$PR.prdoc $PRDOC_EXTRA_ARGS -v --toolchain $TOOLCHAIN 2>&1 | tee "$parity_output"; then + + # Check if there are missing crates (files changed but not listed in prdoc) + if grep -q "Files changed but crate not listed in PR Doc" "$parity_output"; then + rm -f "$parity_output" + cat < "$minor_patch_temp" + + has_validate_false=false + while read -r line; do + if [[ "$line" =~ bump:[[:space:]]*(minor|patch) ]]; then + read -r next_line + if [[ "$next_line" =~ validate:[[:space:]]*false ]]; then + has_validate_false=true + break + fi + fi + done < "$minor_patch_temp" + + rm -f "$minor_patch_temp" + + if [ "$has_validate_false" = true ]; then + echo "â„šī¸ Found minor/patch bumps with validate: false override. Semver validation was skipped for these crates by parity-publish." + fi + fi # Check if there are any major bumps if ! grep -q "bump:[[:space:]]*major" "$prdoc_file"; then @@ -155,24 +223,34 @@ jobs: temp_file=$(mktemp) grep -A1 "bump:[[:space:]]*major" "$prdoc_file" > "$temp_file" - while read -r line; do + error_found=false + while IFS= read -r line; do if [[ "$line" =~ bump:[[:space:]]*major ]]; then # This is the bump line, read the next line - read -r next_line - if [[ "$next_line" =~ validate:[[:space:]]*false ]]; then - continue # This major bump is properly validated + if IFS= read -r next_line; then + if [[ "$next_line" =~ validate:[[:space:]]*false ]]; then + continue # This major bump is properly validated + else + error_found=true + break + fi else - echo "❌ Error: Found major bump without 'validate: false'" - echo "📘 See: https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/prdoc.md#backporting-prs" - echo "🔧 Add 'validate: false' after the major bump in $prdoc_file with justification." - rm -f "$temp_file" - exit 1 + # No next line, means no validate: false + error_found=true + break fi fi done < "$temp_file" rm -f "$temp_file" + if [ "$error_found" = true ]; then + echo "❌ Error: Found major bump without 'validate: false'" + echo "📘 See: https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/prdoc.md#backporting-prs" + echo "🔧 Add 'validate: false' after the major bump in $prdoc_file with justification." + exit 1 + fi + # If we reach here, all major bumps have validate: false echo "âš ī¸ Backport contains major bumps, but they are all marked with validate: false." echo "✅ Semver override accepted. Please ensure justification is documented in the PR description." diff --git a/.github/workflows/release-30_publish_release_draft.yml b/.github/workflows/release-30_publish_release_draft.yml index 318c4e7adfff4..edbd2d14aae7d 100644 --- a/.github/workflows/release-30_publish_release_draft.yml +++ b/.github/workflows/release-30_publish_release_draft.yml @@ -179,7 +179,7 @@ jobs: GITHUB_TOKEN: ${{ steps.generate_write_token.outputs.token }} run: | mkdir -p ${{ github.workspace}}/runtimes/ - gh run download ${{ github.event.inputs.build_run_id }} --dir ${{ github.workspace}}/runtimes + gh run download ${{ inputs.build_run_id }} --dir ${{ github.workspace}}/runtimes ls -la ${{ github.workspace}}/runtimes - name: Get runtime info diff --git a/.github/workflows/release-60_create-old-release-tag.yml b/.github/workflows/release-60_create-old-release-tag.yml new file mode 100644 index 0000000000000..a39e36502419f --- /dev/null +++ b/.github/workflows/release-60_create-old-release-tag.yml @@ -0,0 +1,63 @@ +name: Release - Create polkadot-vX.YY.Z tag +# This workflow creates a final release tag in the old format (e.g. polkadot-v1.20.0) for a published release. + +on: + release: + types: published + +jobs: + create-old-release-tag: + runs-on: parity-default + environment: release + env: + PGP_KMS_KEY: ${{ secrets.PGP_KMS_SIGN_COMMITS_KEY }} + PGP_KMS_HASH: ${{ secrets.PGP_KMS_HASH }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + + steps: + - name: Install pgpkkms + run: | + # Install pgpkms that is used to sign commits + pip install git+https://github.com/paritytech-release/pgpkms.git@6cb1cecce1268412189b77e4b130f4fa248c4151 + + - name: Generate content write token for the release automation + id: generate_write_token + uses: actions/create-github-app-token@67018539274d69449ef7c02e8e71183d1719ab42 # v2.1.4 + with: + app-id: ${{ vars.RELEASE_AUTOMATION_APP_ID }} + private-key: ${{ secrets.RELEASE_AUTOMATION_APP_PRIVATE_KEY }} + owner: paritytech + + - name: Checkout + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + with: + ref: ${{ github.event.release.tag_name }} + token: ${{ steps.generate_write_token.outputs.token }} + + - name: Import gpg keys + run: | + . ./.github/scripts/common/lib.sh + + import_gpg_keys + + - name: Config git + run: | + git config --global commit.gpgsign true + git config --global gpg.program /home/runner/.local/bin/pgpkms-git + git config --global user.name "ParityReleases" + git config --global user.email "release-team@parity.io" + git config --global user.signingKey "D8018FBB3F534D866A45998293C5FB5F6A367B51" + + - name: Create old release tag + env: + GH_TOKEN: ${{ steps.generate_write_token.outputs.token }} + run: | + . ./.github/scripts/common/lib.sh + + version=$(get_polkadot_node_version_from_code) + echo "Extracted node version: $version" + + git tag -s "polkadot-v${version}" -m "Old release tag polkadot-v${version}" + git push origin "polkadot-v${version}" diff --git a/.github/workflows/release-reusable-rc-build.yml b/.github/workflows/release-reusable-rc-build.yml index aae0c700c8c3e..c480d3d83385a 100644 --- a/.github/workflows/release-reusable-rc-build.yml +++ b/.github/workflows/release-reusable-rc-build.yml @@ -42,19 +42,32 @@ jobs: # However, env variables don't work for forks: https://github.com/orgs/community/discussions/44322 # This workaround sets the container image for each job using 'set-image' job output. runs-on: ubuntu-latest + env: + BINARY: ${{ inputs.binary }} outputs: IMAGE: ${{ steps.set_image.outputs.IMAGE }} + RUNNER: ${{ steps.set_image.outputs.RUNNER }} steps: - name: Checkout uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - id: set_image - run: cat .github/env >> $GITHUB_OUTPUT + run: | + cat .github/env >> $GITHUB_OUTPUT + RUNNER="" + if [[ "${BINARY}" =~ "polkadot-parachain" || "${BINARY}" =~ "polkadot-omni-node" ]]; then + RUNNER="ubuntu-latest-m" + echo "Using ubuntu-latest-m runner" + else + RUNNER="ubuntu-latest" + echo "Using ubuntu-latest runner" + fi + echo "RUNNER=${RUNNER}" >> $GITHUB_OUTPUT build-rc: if: ${{ inputs.target == 'x86_64-unknown-linux-gnu' }} needs: [set-image] - runs-on: ubuntu-latest-m + runs-on: ${{ needs.set-image.outputs.RUNNER }} environment: release container: image: ${{ needs.set-image.outputs.IMAGE }} diff --git a/.github/workflows/tests-evm.yml b/.github/workflows/tests-evm.yml index bcc95f3bcf44c..55ae82b229603 100644 --- a/.github/workflows/tests-evm.yml +++ b/.github/workflows/tests-evm.yml @@ -7,6 +7,7 @@ on: pull_request: types: [opened, synchronize, reopened, ready_for_review] merge_group: + workflow_dispatch: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true @@ -50,7 +51,7 @@ jobs: uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: repository: paritytech/revive-differential-tests - ref: 347dcb4488ac188ef7bddf6e4b4c44b389f881c4 + ref: a6e4932a08b1ca231e4a02ca6e54e08a53f0e786 path: revive-differential-tests submodules: recursive - name: Installing Retester @@ -84,7 +85,8 @@ jobs: # certain cases where the report is too long to post as a Github comment. # This happens if the all of the tests are failing and therefore the # report exceeds the maximum allowed length of github comments - - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 + - name: Upload the Report to the CI + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 with: name: report-${{ matrix.platform }}.md path: report.md diff --git a/.github/workflows/tests-misc.yml b/.github/workflows/tests-misc.yml index 1b478d9d15588..8223cb2d96854 100644 --- a/.github/workflows/tests-misc.yml +++ b/.github/workflows/tests-misc.yml @@ -382,7 +382,7 @@ jobs: - name: Install resolc run: | source $HOME/.cargo/env - VERSION="0.3.0" + VERSION="0.5.0" ASSET_URL="https://github.com/paritytech/revive/releases/download/v$VERSION/resolc-universal-apple-darwin" echo "Downloading resolc v$VERSION from $ASSET_URL" curl -Lsf --show-error -o $HOME/.cargo/bin/resolc "$ASSET_URL" diff --git a/.github/zombienet-tests/zombienet_cumulus_tests.yml b/.github/zombienet-tests/zombienet_cumulus_tests.yml index 3760e284359c9..2de3c3160799d 100644 --- a/.github/zombienet-tests/zombienet_cumulus_tests.yml +++ b/.github/zombienet-tests/zombienet_cumulus_tests.yml @@ -1,18 +1,18 @@ - job-name: "zombienet-cumulus-0001-sync_blocks_from_tip_without_connected_collator" test-filter: "zombie_ci::sync_blocks::sync_blocks_from_tip_without_connected_collator" - runner-type: "large" + runner-type: "default" cumulus-image: "test-parachain" use-zombienet-sdk: true - job-name: "zombienet-cumulus-0002-pov_recovery" test-filter: "zombie_ci::pov_recovery::pov_recovery" - runner-type: "large" + runner-type: "default" cumulus-image: "test-parachain" use-zombienet-sdk: true - job-name: "zombienet-cumulus-0003-full_node_catching_up" test-filter: "zombie_ci::full_node_catching_up::full_node_catching_up" - runner-type: "large" + runner-type: "default" cumulus-image: "test-parachain" - job-name: "zombienet-cumulus-0004-runtime_upgrade" @@ -30,19 +30,19 @@ - job-name: "zombienet-cumulus-0006-rpc_collator_builds_blocks" test-filter: "zombie_ci::rpc_collator_build_blocks::rpc_collator_builds_blocks" - runner-type: "large" + runner-type: "default" cumulus-image: "test-parachain" use-zombienet-sdk: true - job-name: "zombienet-cumulus-0007-full_node_warp_sync" test-filter: "zombie_ci::full_node_warp_sync::full_node_warp_sync" - runner-type: "large" + runner-type: "default" cumulus-image: "test-parachain" use-zombienet-sdk: true - job-name: "zombienet-cumulus-0008-elastic_authoring" test-filter: "zombie_ci::elastic_scaling::slot_based_authoring::elastic_scaling_slot_based_authoring" - runner-type: "large" + runner-type: "default" cumulus-image: "test-parachain" use-zombienet-sdk: true @@ -50,7 +50,7 @@ # See https://github.com/paritytech/polkadot-sdk/issues/8986 - job-name: "zombienet-cumulus-0009-elastic_scaling_pov_recovery" test-filter: "zombie_ci::elastic_scaling::pov_recovery::elastic_scaling_pov_recovery" - runner-type: "large" + runner-type: "default" cumulus-image: "test-parachain" use-zombienet-sdk: true @@ -58,13 +58,13 @@ # See https://github.com/paritytech/polkadot-sdk/issues/8999 - job-name: "zombienet-cumulus-0010-elastic_scaling_multiple_block_per_slot" test-filter: "zombie_ci::elastic_scaling::multiple_blocks_per_slot::elastic_scaling_multiple_blocks_per_slot" - runner-type: "large" + runner-type: "default" cumulus-image: "test-parachain" use-zombienet-sdk: true - job-name: "zombienet-cumulus-0011-dht-bootnodes" test-filter: "zombie_ci::bootnodes::dht_bootnodes_test" - runner-type: "large" + runner-type: "default" cumulus-image: "polkadot-parachain-debug" use-zombienet-sdk: true @@ -76,13 +76,13 @@ - job-name: "zombienet-cumulus-0013-elastic_scaling_slot_based_rp_offset" test-filter: "zombie_ci::elastic_scaling::slot_based_rp_offset::elastic_scaling_slot_based_relay_parent_offset_test" - runner-type: "large" + runner-type: "default" cumulus-image: "test-parachain" use-zombienet-sdk: true - job-name: "zombienet-cumulus-0014-elastic_scaling_upgrade_to_3_cores" test-filter: "zombie_ci::elastic_scaling::upgrade_to_3_cores::elastic_scaling_upgrade_to_3_cores" - runner-type: "large" + runner-type: "default" cumulus-image: "test-parachain" use-zombienet-sdk: true needs-wasm-binary: true diff --git a/.github/zombienet-tests/zombienet_polkadot_tests.yml b/.github/zombienet-tests/zombienet_polkadot_tests.yml index bfae60cc46a71..3e3fd238b068f 100644 --- a/.github/zombienet-tests/zombienet_polkadot_tests.yml +++ b/.github/zombienet-tests/zombienet_polkadot_tests.yml @@ -2,14 +2,14 @@ - job-name: "zombienet-polkadot-functional-0001-parachains-pvf" test-definition: "0001-parachains-pvf.zndsl" local-dir: "./polkadot/zombienet_tests/functional" - runner-type: "large" + runner-type: "default" concurrency: 1 use-zombienet-sdk: false - job-name: "zombienet-polkadot-functional-0002-parachains-disputes" test-definition: "0002-parachains-disputes.zndsl" local-dir: "./polkadot/zombienet_tests/functional" - runner-type: "large" + runner-type: "default" concurrency: 1 use-zombienet-sdk: false @@ -22,31 +22,31 @@ - job-name: "zombienet-polkadot-functional-0004-parachains-disputes-garbage-candidate" test-definition: "0004-parachains-garbage-candidate.zndsl" local-dir: "./polkadot/zombienet_tests/functional" - runner-type: "large" + runner-type: "default" use-zombienet-sdk: false - job-name: "zombienet-polkadot-functional-0006-parachains-max-tranche0" test-definition: "0006-parachains-max-tranche0.zndsl" local-dir: "./polkadot/zombienet_tests/functional" - runner-type: "large" + runner-type: "default" use-zombienet-sdk: false - job-name: "zombienet-polkadot-functional-0007-dispute-freshly-finalized" test-definition: "0007-dispute-freshly-finalized.zndsl" local-dir: "./polkadot/zombienet_tests/functional" - runner-type: "large" + runner-type: "default" use-zombienet-sdk: false - job-name: "zombienet-polkadot-functional-0013-systematic-chunk-recovery" test-definition: "0013-systematic-chunk-recovery.zndsl" local-dir: "./polkadot/zombienet_tests/functional" - runner-type: "large" + runner-type: "default" use-zombienet-sdk: false - job-name: "zombienet-polkadot-functional-0014-chunk-fetching-network-compatibility" test-definition: "0014-chunk-fetching-network-compatibility.zndsl" local-dir: "./polkadot/zombienet_tests/functional" - runner-type: "large" + runner-type: "default" use-zombienet-sdk: false additional-setup: | BIN_DIR="$(pwd)/bin_old" @@ -73,7 +73,7 @@ - job-name: "zombienet-polkadot-functional-0015-coretime-shared-core" test-definition: "0015-coretime-shared-core.zndsl" local-dir: "./polkadot/zombienet_tests/functional" - runner-type: "large" + runner-type: "default" use-zombienet-sdk: false additional-setup: | cp --remove-destination ./polkadot/zombienet_tests/assign-core.js ./polkadot/zombienet_tests/functional @@ -81,7 +81,7 @@ - job-name: "zombienet-polkadot-functional-0019-coretime-collation-fetching-fairness" test-definition: "0019-coretime-collation-fetching-fairness.zndsl" local-dir: "./polkadot/zombienet_tests/functional" - runner-type: "large" + runner-type: "default" use-zombienet-sdk: false additional-setup: | cp --remove-destination ./polkadot/zombienet_tests/assign-core.js ./polkadot/zombienet_tests/functional @@ -109,13 +109,13 @@ - job-name: "zombienet-polkadot-smoke-0004-coretime-smoke-test" test-definition: "0004-coretime-smoke-test.zndsl" local-dir: "./polkadot/zombienet_tests/smoke" - runner-type: "large" + runner-type: "default" use-zombienet-sdk: false - job-name: "zombienet-polkadot-smoke-0005-precompile-pvf-smoke" test-definition: "0005-precompile-pvf-smoke.zndsl" local-dir: "./polkadot/zombienet_tests/smoke" - runner-type: "large" + runner-type: "default" use-zombienet-sdk: false # Misc tests using traditional zombienet @@ -123,7 +123,7 @@ - job-name: "zombienet-polkadot-misc-0001-parachains-paritydb" test-definition: "0001-paritydb.zndsl" local-dir: "./polkadot/zombienet_tests/misc" - runner-type: "large" + runner-type: "default" use-zombienet-sdk: false # TODO: needs to resolve how to pass the GH_TOKEN to pods @@ -140,7 +140,7 @@ - job-name: "zombienet-polkadot-malus-0001-dispute-valid" test-definition: "0001-dispute-valid-block.zndsl" local-dir: "./polkadot/node/malus/integrationtests" - runner-type: "large" + runner-type: "default" use-zombienet-sdk: false # SDK tests using zombienet-sdk @@ -153,43 +153,43 @@ - job-name: "zombienet-polkadot-elastic-scaling-slot-based-3cores" test-filter: "elastic_scaling::slot_based_3cores::slot_based_3cores_test" - runner-type: "large" + runner-type: "default" use-zombienet-sdk: true cumulus-image: "test-parachain" # TODO: Disabled, fails very often with zombienet native provider - job-name: "zombienet-polkadot-elastic-scaling-slot-based-12cores" test-filter: "elastic_scaling::slot_based_12cores::slot_based_12cores_test" - runner-type: "large" + runner-type: "default" use-zombienet-sdk: true cumulus-image: "test-parachain" - job-name: "zombienet-polkadot-elastic-scaling-doesnt-break-parachains" test-filter: "elastic_scaling::doesnt_break_parachains::doesnt_break_parachains_test" - runner-type: "large" + runner-type: "default" use-zombienet-sdk: true - job-name: "zombienet-polkadot-elastic-scaling-basic-3cores" test-filter: "elastic_scaling::basic_3cores::basic_3cores_test" - runner-type: "large" + runner-type: "default" use-zombienet-sdk: true cumulus-image: "colander" - job-name: "zombienet-polkadot-functional-sync-backing" test-filter: "functional::sync_backing::sync_backing_test" - runner-type: "large" + runner-type: "default" use-zombienet-sdk: true cumulus-image: "test-parachain" - job-name: "zombienet-polkadot-functional-async-backing-6-seconds-rate" test-filter: "functional::async_backing_6_seconds_rate::async_backing_6_seconds_rate_test" - runner-type: "large" + runner-type: "default" use-zombienet-sdk: true # TODO: Disabled, occasionally (1 on ~50-100 runs) fails - job-name: "zombienet-polkadot-functional-duplicate-collations" test-filter: "functional::duplicate_collations::duplicate_collations_test" - runner-type: "large" + runner-type: "default" use-zombienet-sdk: true # TODO: Disabled, occasionally (2 on ~50-70 runs) fails @@ -201,19 +201,19 @@ # TODO: Disabled, occasionally (1 on ~50-100 runs) fails - job-name: "zombienet-polkadot-functional-spam-statement-distribution-requests" test-filter: "functional::spam_statement_distribution_requests::spam_statement_distribution_requests_test" - runner-type: "large" + runner-type: "default" use-zombienet-sdk: true cumulus-image: "colander" - job-name: "zombienet-polkadot-approval-voting-coalescing" test-filter: "functional::approval_voting_coalescing::approval_voting_coalescing_test" - runner-type: "large" + runner-type: "default" use-zombienet-sdk: true cumulus-image: "colander" - job-name: "zombienet-polkadot-approved-peer-mixed-validators" test-filter: "functional::approved_peer_mixed_validators::approved_peer_mixed_validators_test" - runner-type: "large" + runner-type: "default" use-zombienet-sdk: true cumulus-image: "colander" additional-setup: | @@ -240,15 +240,15 @@ - job-name: "zombienet-polkadot-functional-validator-disabling" test-filter: "functional::validator_disabling::validator_disabling_test" - runner-type: "large" + runner-type: "default" use-zombienet-sdk: true - job-name: "zombienet-polkadot-dispute-old-finalized" test-filter: "functional::dispute_old_finalized::dispute_old_finalized" - runner-type: "large" + runner-type: "default" use-zombienet-sdk: true - job-name: "zombienet-polkadot-shared-core-idle-parachain" test-filter: "functional::shared_core_idle_parachain::shared_core_idle_parachain_test" - runner-type: "large" + runner-type: "default" use-zombienet-sdk: true diff --git a/Cargo.lock b/Cargo.lock index a63438a07b008..1d88814ffcc6d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4500,6 +4500,7 @@ dependencies = [ "sp-trie", "sp-version", "substrate-prometheus-endpoint", + "tokio", "tracing", ] @@ -22966,6 +22967,7 @@ dependencies = [ "sp-api", "sp-consensus", "sp-core 28.0.0", + "sp-externalities 0.25.0", "sp-metadata-ir", "sp-runtime", "sp-state-machine", diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs index 7e9a6121be33f..8dd695b7bd233 100644 --- a/cumulus/client/consensus/aura/src/collators/lookahead.rs +++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs @@ -62,6 +62,7 @@ use sp_core::crypto::Pair; use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Member}; +use sp_timestamp::Timestamp; use std::{path::PathBuf, sync::Arc, time::Duration}; /// Parameters for [`run`]. @@ -105,6 +106,50 @@ pub struct Params { pub max_pov_percentage: Option, } +/// Get the current parachain slot from a given block hash. +/// +/// Returns the parachain slot, relay chain slot, and timestamp. +fn get_parachain_slot( + para_client: &Client, + block_hash: Block::Hash, + relay_parent_header: &polkadot_primitives::Header, + relay_chain_slot_duration: Duration, +) -> Option<(Slot, Slot, Timestamp)> +where + Block: BlockT, + Client: ProvideRuntimeApi, + Client::Api: AuraApi, + P: Codec, +{ + let slot_duration = + match sc_consensus_aura::standalone::slot_duration_at(para_client, block_hash) { + Ok(sd) => sd, + Err(err) => { + tracing::error!(target: crate::LOG_TARGET, ?err, "Failed to acquire parachain slot duration"); + return None + }, + }; + + tracing::debug!(target: crate::LOG_TARGET, ?slot_duration, ?block_hash, "Parachain slot duration acquired"); + + let (relay_slot, timestamp) = + consensus_common::relay_slot_and_timestamp(relay_parent_header, relay_chain_slot_duration)?; + + let slot_now = Slot::from_timestamp(timestamp, slot_duration); + + tracing::debug!( + target: crate::LOG_TARGET, + ?relay_slot, + para_slot = ?slot_now, + ?timestamp, + ?slot_duration, + ?relay_chain_slot_duration, + "Adjusted relay-chain slot to parachain slot" + ); + + Some((slot_now, relay_slot, timestamp)) +} + /// Run async-backing-friendly Aura. pub fn run( params: Params, @@ -223,12 +268,10 @@ where collator_util::Collator::::new(params) }; - let mut connection_helper: BackingGroupConnectionHelper = - BackingGroupConnectionHelper::new( - params.para_client.clone(), - params.keystore.clone(), - params.overseer_handle.clone(), - ); + let mut connection_helper = BackingGroupConnectionHelper::new( + params.keystore.clone(), + params.overseer_handle.clone(), + ); while let Some(relay_parent_header) = import_notifications.next().await { let relay_parent = relay_parent_header.hash(); @@ -280,42 +323,21 @@ where let para_client = &*params.para_client; let keystore = ¶ms.keystore; let can_build_upon = |block_hash| { - let slot_duration = match sc_consensus_aura::standalone::slot_duration_at( - &*params.para_client, + let (slot_now, relay_slot, timestamp) = get_parachain_slot::<_, _, P::Public>( + para_client, block_hash, - ) { - Ok(sd) => sd, - Err(err) => { - tracing::error!(target: crate::LOG_TARGET, ?err, "Failed to acquire parachain slot duration"); - return None - }, - }; - tracing::debug!(target: crate::LOG_TARGET, ?slot_duration, ?block_hash, "Parachain slot duration acquired"); - let (relay_slot, timestamp) = consensus_common::relay_slot_and_timestamp( &relay_parent_header, params.relay_chain_slot_duration, )?; - let slot_now = Slot::from_timestamp(timestamp, slot_duration); - tracing::debug!( - target: crate::LOG_TARGET, - ?relay_slot, - para_slot = ?slot_now, - ?timestamp, - ?slot_duration, - relay_chain_slot_duration = ?params.relay_chain_slot_duration, - "Adjusted relay-chain slot to parachain slot" - ); - Some(( + + Some(super::can_build_upon::<_, _, P>( slot_now, - super::can_build_upon::<_, _, P>( - slot_now, - relay_slot, - timestamp, - block_hash, - included_block.hash(), - para_client, - &keystore, - ), + relay_slot, + timestamp, + block_hash, + included_block.hash(), + para_client, + &keystore, )) }; @@ -330,15 +352,25 @@ where continue } + // Trigger pre-conect to backing groups if necessary. + if let (Some((slot_now, _relay_slot, _timestamp)), Ok(authorities)) = ( + get_parachain_slot::<_, _, P::Public>( + para_client, + parent_hash, + &relay_parent_header, + params.relay_chain_slot_duration, + ), + para_client.runtime_api().authorities(parent_hash), + ) { + connection_helper.update::

(slot_now, &authorities).await; + } + // This needs to change to support elastic scaling, but for continuously // scheduled chains this ensures that the backlog will grow steadily. for n_built in 0..2 { let slot_claim = match can_build_upon(parent_hash) { - Some((current_slot, fut)) => match fut.await { - None => { - connection_helper.update::(current_slot, parent_hash).await; - break - }, + Some(fut) => match fut.await { + None => break, Some(c) => c, }, None => break, diff --git a/cumulus/client/consensus/aura/src/collators/mod.rs b/cumulus/client/consensus/aura/src/collators/mod.rs index 1a945236b392c..d938dca69282f 100644 --- a/cumulus/client/consensus/aura/src/collators/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/mod.rs @@ -58,20 +58,15 @@ const PARENT_SEARCH_DEPTH: usize = 40; // Helper to pre-connect to the backing group we got assigned to and keep the connection // open until backing group changes or own slot ends. -struct BackingGroupConnectionHelper { - client: std::sync::Arc, +struct BackingGroupConnectionHelper { keystore: sp_keystore::KeystorePtr, overseer_handle: OverseerHandle, our_slot: Option, } -impl BackingGroupConnectionHelper { - pub fn new( - client: std::sync::Arc, - keystore: sp_keystore::KeystorePtr, - overseer_handle: OverseerHandle, - ) -> Self { - Self { client, keystore, overseer_handle, our_slot: None } +impl BackingGroupConnectionHelper { + pub fn new(keystore: sp_keystore::KeystorePtr, overseer_handle: OverseerHandle) -> Self { + Self { keystore, overseer_handle, our_slot: None } } async fn send_subsystem_message(&mut self, message: CollatorProtocolMessage) { @@ -79,12 +74,8 @@ impl BackingGroupConnectionHelper { } /// Update the current slot and initiate connections to backing groups if needed. - pub async fn update(&mut self, current_slot: Slot, best_block: Block::Hash) + pub async fn update

(&mut self, current_slot: Slot, authorities: &[P::Public]) where - Block: sp_runtime::traits::Block, - Client: - sc_client_api::HeaderBackend + Send + Sync + ProvideRuntimeApi + 'static, - Client::Api: AuraApi, P: sp_core::Pair + Send + Sync, P::Public: Codec, { @@ -94,21 +85,21 @@ impl BackingGroupConnectionHelper { return } - let Some(authorities) = self.client.runtime_api().authorities(best_block).ok() else { - return - }; - let next_slot = current_slot + 1; let next_slot_is_ours = - aura_internal::claim_slot::

(next_slot, &authorities, &self.keystore) + aura_internal::claim_slot::

(next_slot, authorities, &self.keystore) .await .is_some(); if next_slot_is_ours { - // Next slot is ours, send connect message. - tracing::debug!(target: crate::LOG_TARGET, "Our slot {} is next, connecting to backing groups", next_slot); - self.send_subsystem_message(CollatorProtocolMessage::ConnectToBackingGroups) - .await; + // Only send message if we were not connected. This avoids sending duplicate messages + // when running with a single collator. + if self.our_slot.is_none() { + // Next slot is ours, send connect message. + tracing::debug!(target: crate::LOG_TARGET, "Our slot {} is next, connecting to backing groups", next_slot); + self.send_subsystem_message(CollatorProtocolMessage::ConnectToBackingGroups) + .await; + } self.our_slot = Some(next_slot); } else if self.our_slot.take().is_some() { // Next slot is not ours, send disconnect only if we had a slot before. @@ -465,15 +456,18 @@ mod tests { #[tokio::test] async fn preconnect_when_next_slot_is_ours() { - let (client, keystore) = set_up_components(6); + let (client, keystore) = set_up_components(1); let genesis_hash = client.chain_info().genesis_hash; let (overseer_handle, messages_recorder) = create_overseer_handle(); - let mut helper = BackingGroupConnectionHelper::new(client, keystore, overseer_handle); + let mut helper = BackingGroupConnectionHelper::new(keystore, overseer_handle); - // Update with slot 0, next slot (1) should be ours + // Fetch authorities for the update call + let authorities = client.runtime_api().authorities(genesis_hash).unwrap(); + + // Update with slot 5, next slot (6) should be ours helper - .update::(Slot::from(0), genesis_hash) + .update::(Slot::from(5), &authorities) .await; // Give time for message to be processed @@ -482,20 +476,23 @@ mod tests { let messages = messages_recorder.lock().unwrap(); assert_eq!(messages.len(), 1); assert!(matches!(messages[0], CollatorProtocolMessage::ConnectToBackingGroups)); - assert_eq!(helper.our_slot, Some(Slot::from(1))); + assert_eq!(helper.our_slot, Some(Slot::from(6))); } #[tokio::test] async fn preconnect_no_duplicate_connect_message() { - let (client, keystore) = set_up_components(6); + let (client, keystore) = set_up_components(1); let genesis_hash = client.chain_info().genesis_hash; let (overseer_handle, messages_recorder) = create_overseer_handle(); - let mut helper = BackingGroupConnectionHelper::new(client, keystore, overseer_handle); + let mut helper = BackingGroupConnectionHelper::new(keystore, overseer_handle); - // Update with slot 0, next slot (1) is ours + // Fetch authorities for the update calls + let authorities = client.runtime_api().authorities(genesis_hash).unwrap(); + + // Update with slot 5, next slot (6) is ours helper - .update::(Slot::from(0), genesis_hash) + .update::(Slot::from(5), &authorities) .await; // Give time for message to be processed @@ -503,16 +500,16 @@ mod tests { assert_eq!(messages_recorder.lock().unwrap().len(), 1); messages_recorder.lock().unwrap().clear(); - // Update with slot 0 again - should not send another message + // Update with slot 5 again - should not send another message helper - .update::(Slot::from(0), genesis_hash) + .update::(Slot::from(5), &authorities) .await; tokio::time::sleep(tokio::time::Duration::from_millis(50)).await; assert_eq!(messages_recorder.lock().unwrap().len(), 0); // Update with slot 1 (our slot) - should not send another message helper - .update::(Slot::from(1), genesis_hash) + .update::(Slot::from(6), &authorities) .await; tokio::time::sleep(tokio::time::Duration::from_millis(50)).await; assert_eq!(messages_recorder.lock().unwrap().len(), 0); @@ -524,14 +521,17 @@ mod tests { let genesis_hash = client.chain_info().genesis_hash; let (overseer_handle, messages_recorder) = create_overseer_handle(); - let mut helper = BackingGroupConnectionHelper::new(client, keystore, overseer_handle); + let mut helper = BackingGroupConnectionHelper::new(keystore, overseer_handle); + + // Fetch authorities for the update calls + let authorities = client.runtime_api().authorities(genesis_hash).unwrap(); // Slot 0 -> Alice, Slot 1 -> Bob, Slot 2 -> Charlie, Slot 3 -> Dave, Slot 4 -> Eve, // Slot 5 -> Ferdie, Slot 6 -> Alice // Update with slot 5, next slot (6) is ours -> should connect helper - .update::(Slot::from(5), genesis_hash) + .update::(Slot::from(5), &authorities) .await; tokio::time::sleep(tokio::time::Duration::from_millis(50)).await; assert_eq!(helper.our_slot, Some(Slot::from(6))); @@ -539,7 +539,7 @@ mod tests { // Update with slot 8, next slot (9) is Charlie's -> should disconnect helper - .update::(Slot::from(8), genesis_hash) + .update::(Slot::from(8), &authorities) .await; tokio::time::sleep(tokio::time::Duration::from_millis(50)).await; @@ -555,7 +555,7 @@ mod tests { // Update again with slot 8, next slot (9) is Charlie's -> should not send another // disconnect message helper - .update::(Slot::from(8), genesis_hash) + .update::(Slot::from(8), &authorities) .await; tokio::time::sleep(tokio::time::Duration::from_millis(50)).await; @@ -570,7 +570,10 @@ mod tests { let genesis_hash = client.chain_info().genesis_hash; let (overseer_handle, messages_recorder) = create_overseer_handle(); - let mut helper = BackingGroupConnectionHelper::new(client, keystore, overseer_handle); + let mut helper = BackingGroupConnectionHelper::new(keystore, overseer_handle); + + // Fetch authorities for the update call + let authorities = client.runtime_api().authorities(genesis_hash).unwrap(); // Slot 0 -> Alice, Slot 1 -> Bob, Slot 2 -> Charlie, Slot 3 -> Dave, Slot 4 -> Eve, // Slot 5 -> Ferdie @@ -578,7 +581,7 @@ mod tests { // Update with slot 1 (Bob's slot), next slot (2) is Charlie's // Since we never connected before (our_slot is None), we should not send disconnect helper - .update::(Slot::from(1), genesis_hash) + .update::(Slot::from(1), &authorities) .await; tokio::time::sleep(tokio::time::Duration::from_millis(50)).await; @@ -593,14 +596,17 @@ mod tests { let genesis_hash = client.chain_info().genesis_hash; let (overseer_handle, messages_recorder) = create_overseer_handle(); - let mut helper = BackingGroupConnectionHelper::new(client, keystore, overseer_handle); + let mut helper = BackingGroupConnectionHelper::new(keystore, overseer_handle); + + // Fetch authorities for the update calls + let authorities = client.runtime_api().authorities(genesis_hash).unwrap(); // Slot 0 -> Alice, Slot 1 -> Bob, Slot 2 -> Charlie, Slot 3 -> Dave, Slot 4 -> Eve, // Slot 5 -> Ferdie, Slot 6 -> Alice, Slot 7 -> Bob, ... // Cycle 1: Connect at slot 5, next slot (6) is ours helper - .update::(Slot::from(5), genesis_hash) + .update::(Slot::from(5), &authorities) .await; tokio::time::sleep(tokio::time::Duration::from_millis(50)).await; { @@ -613,7 +619,7 @@ mod tests { // Cycle 1: Disconnect at slot 7, next slot (8) is Charlie's helper - .update::(Slot::from(7), genesis_hash) + .update::(Slot::from(7), &authorities) .await; tokio::time::sleep(tokio::time::Duration::from_millis(50)).await; { @@ -626,10 +632,7 @@ mod tests { // Cycle 2: Connect again at slot 11, next slot (12) is ours helper - .update::( - Slot::from(11), - genesis_hash, - ) + .update::(Slot::from(11), &authorities) .await; tokio::time::sleep(tokio::time::Duration::from_millis(50)).await; { @@ -641,20 +644,20 @@ mod tests { } #[tokio::test] - async fn preconnect_handles_runtime_api_error() { + async fn preconnect_handles_empty_authorities() { let keystore = Arc::new(sp_keystore::testing::MemoryKeystore::new()) as Arc<_>; - let client = Arc::new(TestClientBuilder::new().build()); let (overseer_handle, messages_recorder) = create_overseer_handle(); - let mut helper = BackingGroupConnectionHelper::new(client, keystore, overseer_handle); + let mut helper = BackingGroupConnectionHelper::new(keystore, overseer_handle); - let invalid_hash = Hash::default(); + // Pass empty authorities list + let authorities = vec![]; helper - .update::(Slot::from(0), invalid_hash) + .update::(Slot::from(0), &authorities) .await; tokio::time::sleep(tokio::time::Duration::from_millis(50)).await; - // Should not send any message if runtime API fails + // Should not send any message if authorities list is empty assert_eq!(messages_recorder.lock().unwrap().len(), 0); } } diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index affce0a29fcae..c9e191e601638 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -184,16 +184,14 @@ where }; let mut relay_chain_data_cache = RelayChainDataCache::new(relay_client.clone(), para_id); - - let mut maybe_connection_helper = relay_client - .overseer_handle() - .ok() - .map(|h| BackingGroupConnectionHelper::new(para_client.clone(), keystore.clone(), h.clone())) - .or_else(|| { - tracing::warn!(target: LOG_TARGET, - "Relay chain interface does not provide overseer handle. Backing group pre-connect is disabled."); - None - }); + let mut connection_helper = BackingGroupConnectionHelper::new( + keystore.clone(), + relay_client + .overseer_handle() + // Should never fail. If it fails, then providing collations to relay chain + // doesn't work either. So it is fine to panic here. + .expect("Relay chain interface must provide overseer handle."), + ); loop { // We wait here until the next slot arrives. @@ -216,7 +214,7 @@ where continue; }; - let Ok(rp_data) = offset_relay_parent_find_descendants( + let Ok(Some(rp_data)) = offset_relay_parent_find_descendants( &mut relay_chain_data_cache, relay_best_hash, relay_parent_offset, @@ -310,6 +308,10 @@ where let included_header_hash = included_header.hash(); + if let Ok(authorities) = para_client.runtime_api().authorities(parent_hash) { + connection_helper.update::

(para_slot.slot, &authorities).await; + } + let slot_claim = match crate::collators::can_build_upon::<_, _, P>( para_slot.slot, relay_slot, @@ -334,9 +336,6 @@ where slot = ?para_slot.slot, "Not building block." ); - if let Some(ref mut connection_helper) = maybe_connection_helper { - connection_helper.update::(para_slot.slot, parent_hash).await; - } continue }, }; @@ -486,7 +485,7 @@ pub(crate) async fn offset_relay_parent_find_descendants( relay_chain_data_cache: &mut RelayChainDataCache, relay_best_block: RelayHash, relay_parent_offset: u32, -) -> Result +) -> Result, ()> where RelayClient: RelayChainInterface + Clone + 'static, { @@ -500,7 +499,12 @@ where }; if relay_parent_offset == 0 { - return Ok(RelayParentData::new(relay_header)); + return Ok(Some(RelayParentData::new(relay_header))); + } + + if sc_consensus_babe::contains_epoch_change::(&relay_header) { + tracing::debug!(target: LOG_TARGET, ?relay_best_block, relay_best_block_number = relay_header.number(), "Relay parent is in previous session."); + return Ok(None); } let mut required_ancestors: VecDeque = Default::default(); @@ -511,6 +515,10 @@ where .await? .relay_parent_header .clone(); + if sc_consensus_babe::contains_epoch_change::(&next_header) { + tracing::debug!(target: LOG_TARGET, ?relay_best_block, ancestor = %next_header.hash(), ancestor_block_number = next_header.number(), "Ancestor of best block is in previous session."); + return Ok(None); + } required_ancestors.push_front(next_header.clone()); relay_header = next_header; } @@ -529,7 +537,7 @@ where "Relay parent descendants." ); - Ok(RelayParentData::new_with_descendants(relay_parent, required_ancestors.into())) + Ok(Some(RelayParentData::new_with_descendants(relay_parent, required_ancestors.into()))) } /// Return value of [`determine_core`]. diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/tests.rs b/cumulus/client/consensus/aura/src/collators/slot_based/tests.rs index a26ac2c581e92..e0ba35e558afe 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/tests.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/tests.rs @@ -20,6 +20,7 @@ use super::{ relay_chain_data_cache::{RelayChainData, RelayChainDataCache}, }; use async_trait::async_trait; +use codec::Encode; use cumulus_primitives_core::{ClaimQueueOffset, CoreInfo, CoreSelector, CumulusDigestItem}; use cumulus_relay_chain_interface::*; use futures::Stream; @@ -28,6 +29,11 @@ use polkadot_primitives::{ CandidateEvent, CommittedCandidateReceiptV2, CoreIndex, Hash as RelayHash, Header as RelayHeader, Id as ParaId, }; +use rstest::rstest; +use sc_consensus_babe::{ + AuthorityId, ConsensusLog as BabeConsensusLog, NextEpochDescriptor, BABE_ENGINE_ID, +}; +use sp_core::sr25519; use sp_runtime::{generic::BlockId, testing::Header as TestHeader, traits::Header}; use sp_version::RuntimeVersion; use std::{ @@ -45,7 +51,7 @@ async fn offset_test_zero_offset() { let result = offset_relay_parent_find_descendants(&mut cache, best_hash, 0).await; assert!(result.is_ok()); - let data = result.unwrap(); + let data = result.unwrap().unwrap(); assert_eq!(data.descendants_len(), 0); assert_eq!(data.relay_parent().hash(), best_hash); assert!(data.into_inherent_descendant_list().is_empty()); @@ -61,7 +67,7 @@ async fn offset_test_two_offset() { let result = offset_relay_parent_find_descendants(&mut cache, best_hash, 2).await; assert!(result.is_ok()); - let data = result.unwrap(); + let data = result.unwrap().unwrap(); assert_eq!(data.descendants_len(), 2); assert_eq!(*data.relay_parent().number(), 98); let descendant_list = data.into_inherent_descendant_list(); @@ -80,7 +86,7 @@ async fn offset_test_five_offset() { let result = offset_relay_parent_find_descendants(&mut cache, best_hash, 5).await; assert!(result.is_ok()); - let data = result.unwrap(); + let data = result.unwrap().unwrap(); assert_eq!(data.descendants_len(), 5); assert_eq!(*data.relay_parent().number(), 95); let descendant_list = data.into_inherent_descendant_list(); @@ -104,6 +110,33 @@ async fn offset_test_too_long() { assert!(result.is_err()); } +#[derive(PartialEq)] +enum HasEpochChange { + Yes, + No, +} + +#[rstest] +#[case::in_best( + &[HasEpochChange::No, HasEpochChange::No, HasEpochChange::Yes], +)] +#[case::in_first_ancestor( + &[HasEpochChange::No, HasEpochChange::Yes, HasEpochChange::No], +)] +#[case::in_second_ancestor( + &[HasEpochChange::Yes, HasEpochChange::No, HasEpochChange::No], +)] +#[tokio::test] +async fn offset_returns_none_when_epoch_change_encountered(#[case] flags: &[HasEpochChange]) { + let (headers, best_hash) = build_headers_with_epoch_flags(flags); + let client = TestRelayClient::new(headers); + let mut cache = RelayChainDataCache::new(client, 1.into()); + + let result = offset_relay_parent_find_descendants(&mut cache, best_hash, 3).await; + assert!(result.is_ok()); + assert!(result.unwrap().is_none()); +} + #[tokio::test] async fn determine_core_new_relay_parent() { let (headers, _best_hash) = create_header_chain(); @@ -593,6 +626,50 @@ impl RelayChainInterface for TestRelayClient { } } +/// Build a consecutive set of relay headers whose digest entries optionally carry a BABE +/// epoch-change marker, returning the underlying map and the hash of the last header. +fn build_headers_with_epoch_flags( + flags: &[HasEpochChange], +) -> (HashMap, RelayHash) { + let mut headers = HashMap::new(); + let mut parent_hash = RelayHash::default(); + let mut last_hash = RelayHash::default(); + + for (index, has_epoch_change) in flags.iter().enumerate() { + let digest = if *has_epoch_change == HasEpochChange::Yes { + babe_epoch_change_digest() + } else { + Default::default() + }; + + let header = RelayHeader { + parent_hash, + number: (index as u32 + 1), + state_root: Default::default(), + extrinsics_root: Default::default(), + digest, + }; + + let hash = header.hash(); + headers.insert(hash, header); + parent_hash = hash; + last_hash = hash; + } + + (headers, last_hash) +} + +/// Create a digest containing a single BABE `NextEpochData` item for use in tests. +fn babe_epoch_change_digest() -> sp_runtime::generic::Digest { + let mut digest = sp_runtime::generic::Digest::default(); + let authority_id = AuthorityId::from(sr25519::Public::from_raw([1u8; 32])); + let next_epoch = + NextEpochDescriptor { authorities: vec![(authority_id, 1u64)], randomness: [0u8; 32] }; + let log = BabeConsensusLog::NextEpochData(next_epoch); + digest.push(sp_runtime::generic::DigestItem::Consensus(BABE_ENGINE_ID, log.encode())); + digest +} + fn create_header_chain() -> (HashMap, RelayHash) { let mut headers = HashMap::new(); let mut current_parent = None; diff --git a/cumulus/client/consensus/common/Cargo.toml b/cumulus/client/consensus/common/Cargo.toml index 1615215abfa73..c65e05c02266a 100644 --- a/cumulus/client/consensus/common/Cargo.toml +++ b/cumulus/client/consensus/common/Cargo.toml @@ -46,6 +46,7 @@ schnellru = { workspace = true } [dev-dependencies] futures-timer = { workspace = true } +tokio = { features = ["macros"], workspace = true } # Substrate sp-tracing = { workspace = true, default-features = true } diff --git a/cumulus/client/consensus/common/src/lib.rs b/cumulus/client/consensus/common/src/lib.rs index 324023d4b440c..6c810173af82e 100644 --- a/cumulus/client/consensus/common/src/lib.rs +++ b/cumulus/client/consensus/common/src/lib.rs @@ -39,7 +39,7 @@ mod tests; pub use parent_search::*; pub use cumulus_relay_chain_streams::finalized_heads; -pub use parachain_consensus::run_parachain_consensus; +pub use parachain_consensus::spawn_parachain_consensus_tasks; use level_monitor::LevelMonitor; pub use level_monitor::{LevelLimit, MAX_LEAVES_PER_LEVEL_SENSIBLE_DEFAULT}; diff --git a/cumulus/client/consensus/common/src/parachain_consensus.rs b/cumulus/client/consensus/common/src/parachain_consensus.rs index 3d959aa9948d2..5ed588d4d33da 100644 --- a/cumulus/client/consensus/common/src/parachain_consensus.rs +++ b/cumulus/client/consensus/common/src/parachain_consensus.rs @@ -31,7 +31,11 @@ use cumulus_relay_chain_interface::RelayChainInterface; use polkadot_primitives::Id as ParaId; use codec::Decode; -use futures::{channel::mpsc::Sender, pin_mut, select, FutureExt, StreamExt}; +use futures::{ + channel::mpsc::{Sender, UnboundedSender}, + pin_mut, select, FutureExt, SinkExt, Stream, StreamExt, +}; +use sp_core::traits::SpawnEssentialNamed; use std::sync::Arc; @@ -40,25 +44,13 @@ const FINALIZATION_CACHE_SIZE: u32 = 40; fn handle_new_finalized_head( parachain: &Arc

, - finalized_head: Vec, + header: Block::Header, last_seen_finalized_hashes: &mut LruMap, ) where Block: BlockT, B: Backend, P: Finalizer + UsageProvider + BlockchainEvents, { - let header = match Block::Header::decode(&mut &finalized_head[..]) { - Ok(header) => header, - Err(err) => { - tracing::debug!( - target: LOG_TARGET, - error = ?err, - "Could not decode parachain header while following finalized heads.", - ); - return - }, - }; - let hash = header.hash(); last_seen_finalized_hashes.insert(hash, ()); @@ -88,18 +80,21 @@ fn handle_new_finalized_head( } } -/// Follow the finalized head of the given parachain. +/// Streams finalized parachain heads from the relay chain. /// -/// For every finalized block of the relay chain, it will get the included parachain header -/// corresponding to `para_id` and will finalize it in the parachain. -async fn follow_finalized_head(para_id: ParaId, parachain: Arc

, relay_chain: R) -where - Block: BlockT, - P: Finalizer + UsageProvider + BlockchainEvents, - R: RelayChainInterface + Clone, - B: Backend, -{ - let finalized_heads = match finalized_heads(relay_chain, para_id).await { +/// This worker continuously monitors the relay chain for finalized blocks and extracts +/// the corresponding parachain head data for the given `para_id`. The extracted head +/// data is sent through the provided channel for consumption by the consensus system. +/// +/// This is necessary because finalization of blocks can take a long +/// time. During this blocking operation, we should not keep references to finality notifications, +/// because that prevents the corresponding blocks from getting pruned. +pub async fn finalized_head_stream_worker( + mut tx: UnboundedSender, + para_id: ParaId, + relay_chain: R, +) { + let finalized_heads = match finalized_heads(relay_chain.clone(), para_id).await { Ok(finalized_heads_stream) => finalized_heads_stream.fuse(), Err(err) => { tracing::error!(target: LOG_TARGET, error = ?err, "Unable to retrieve finalized heads stream."); @@ -107,9 +102,42 @@ where }, }; - let mut imported_blocks = parachain.import_notification_stream().fuse(); - pin_mut!(finalized_heads); + loop { + if let Some((head_data, _)) = finalized_heads.next().await { + let header = match Block::Header::decode(&mut &head_data[..]) { + Ok(header) => header, + Err(err) => { + tracing::debug!( + target: LOG_TARGET, + error = ?err, + "Could not decode parachain header while following finalized heads.", + ); + continue + }, + }; + if let Err(e) = tx.send(header).await { + tracing::error!(target: LOG_TARGET, ?e, "Error while sending finalized head."); + return; + }; + } + } +} + +/// Follow the finalized head of the given parachain. +/// +/// For every finalized block of the relay chain, it will get the included parachain header +/// corresponding to `para_id` and will finalize it in the parachain. +async fn follow_finalized_head( + parachain: Arc

, + finalized_head_stream: Box + Unpin + Send>, +) where + Block: BlockT, + P: Finalizer + UsageProvider + BlockchainEvents, + B: Backend, +{ + let mut imported_blocks = parachain.import_notification_stream().fuse(); + let mut finalized_head_stream = finalized_head_stream.fuse(); // We use this cache to finalize blocks that are imported late. // For example, a block that has been recovered via PoV-Recovery @@ -119,9 +147,9 @@ where loop { select! { - fin = finalized_heads.next() => { + fin = finalized_head_stream.next() => { match fin { - Some((finalized_head, _)) => + Some(finalized_head) => handle_new_finalized_head(¶chain, finalized_head, &mut last_seen_finalized_hashes), None => { tracing::debug!(target: LOG_TARGET, "Stopping following finalized head."); @@ -170,6 +198,52 @@ where } } +/// Spawns the essential finalization tasks for parachain consensus. +/// +/// This function creates and spawns two critical background tasks: +/// 1. A finalized head stream worker that monitors relay chain finality and extracts included +/// headers +/// 2. The main parachain consensus task that handles finalization and best block updates +pub fn spawn_parachain_consensus_tasks( + para_id: ParaId, + parachain: Arc

, + relay_chain: R, + announce_block: Arc>) + Send + Sync>, + recovery_chan_tx: Option>>, + spawn_handle: S, +) where + Block: BlockT, + P: Finalizer + + UsageProvider + + Send + + Sync + + BlockBackend + + BlockchainEvents + + 'static, + for<'a> &'a P: BlockImport, + R: RelayChainInterface + Clone + 'static, + S: SpawnEssentialNamed + 'static, + B: Backend + 'static, +{ + let (tx, rx) = futures::channel::mpsc::unbounded(); + let worker = finalized_head_stream_worker::<_, Block>(tx, para_id, relay_chain.clone()); + let consensus = run_parachain_consensus( + para_id, + parachain, + relay_chain, + announce_block, + Box::new(rx), + recovery_chan_tx, + ); + + spawn_handle.spawn_essential_blocking("cumulus-consensus", None, Box::pin(consensus)); + spawn_handle.spawn_essential_blocking( + "cumulus-consensus-finality-stream", + None, + Box::pin(worker), + ); +} + /// Run the parachain consensus. /// /// This will follow the given `relay_chain` to act as consensus for the parachain that corresponds @@ -185,6 +259,7 @@ pub async fn run_parachain_consensus( parachain: Arc

, relay_chain: R, announce_block: Arc>) + Send + Sync>, + finalized_head_stream: Box + Unpin + Send>, recovery_chan_tx: Option>>, ) where Block: BlockT, @@ -205,7 +280,7 @@ pub async fn run_parachain_consensus( announce_block, recovery_chan_tx, ); - let follow_finalized_head = follow_finalized_head(para_id, parachain, relay_chain); + let follow_finalized_head = follow_finalized_head(parachain, finalized_head_stream); select! { _ = follow_new_best.fuse() => {}, _ = follow_finalized_head.fuse() => {}, diff --git a/cumulus/client/consensus/common/src/tests.rs b/cumulus/client/consensus/common/src/tests.rs index 34514f6457370..ff1c8ec56508b 100644 --- a/cumulus/client/consensus/common/src/tests.rs +++ b/cumulus/client/consensus/common/src/tests.rs @@ -17,6 +17,7 @@ use crate::*; +use crate::parachain_consensus::run_parachain_consensus; use async_trait::async_trait; use codec::Encode; use cumulus_client_pov_recovery::RecoveryKind; @@ -404,8 +405,8 @@ fn build_and_import_block(mut client: Arc, import_as_best: bool) -> Bloc ) } -#[test] -fn follow_new_best_works() { +#[tokio::test] +async fn follow_new_best_works() { sp_tracing::try_init_simple(); let client = Arc::new(TestClientBuilder::default().build()); @@ -414,8 +415,15 @@ fn follow_new_best_works() { let relay_chain = Relaychain::new(); let new_best_heads_sender = relay_chain.inner.lock().unwrap().new_best_heads_sender.clone(); - let consensus = - run_parachain_consensus(100.into(), client.clone(), relay_chain, Arc::new(|_, _| {}), None); + let (_finalized_sender, finalized_receiver) = futures::channel::mpsc::unbounded(); + let consensus = run_parachain_consensus( + 100.into(), + client.clone(), + relay_chain, + Arc::new(|_, _| {}), + Box::new(finalized_receiver), + None, + ); let work = async move { new_best_heads_sender.unbounded_send(block.header().clone()).unwrap(); @@ -427,19 +435,17 @@ fn follow_new_best_works() { } }; - block_on(async move { - futures::pin_mut!(consensus); - futures::pin_mut!(work); + futures::pin_mut!(consensus); + futures::pin_mut!(work); - select! { - r = consensus.fuse() => panic!("Consensus should not end: {:?}", r), - _ = work.fuse() => {}, - } - }); + select! { + r = consensus.fuse() => panic!("Consensus should not end: {:?}", r), + _ = work.fuse() => {}, + } } -#[test] -fn follow_new_best_with_dummy_recovery_works() { +#[tokio::test] +async fn follow_new_best_with_dummy_recovery_works() { sp_tracing::try_init_simple(); let client = Arc::new(TestClientBuilder::default().build()); @@ -449,11 +455,13 @@ fn follow_new_best_with_dummy_recovery_works() { let (recovery_chan_tx, mut recovery_chan_rx) = futures::channel::mpsc::channel(3); + let (_finalized_sender, finalized_receiver) = futures::channel::mpsc::unbounded(); let consensus = run_parachain_consensus( 100.into(), client.clone(), relay_chain, Arc::new(|_, _| {}), + Box::new(finalized_receiver), Some(recovery_chan_tx), ); @@ -493,33 +501,38 @@ fn follow_new_best_with_dummy_recovery_works() { } }; - block_on(async move { - futures::pin_mut!(consensus); - futures::pin_mut!(work); + futures::pin_mut!(consensus); + futures::pin_mut!(work); - select! { - r = consensus.fuse() => panic!("Consensus should not end: {:?}", r), - _ = dummy_block_recovery.fuse() => {}, - _ = work.fuse() => {}, - } - }); + select! { + r = consensus.fuse() => panic!("Consensus should not end: {:?}", r), + _ = dummy_block_recovery.fuse() => {}, + _ = work.fuse() => {}, + } } -#[test] -fn follow_finalized_works() { +#[tokio::test] +async fn follow_finalized_works() { sp_tracing::try_init_simple(); let client = Arc::new(TestClientBuilder::default().build()); let block = build_and_import_block(client.clone(), false); let relay_chain = Relaychain::new(); - let finalized_sender = relay_chain.inner.lock().unwrap().finalized_heads_sender.clone(); + let _finalized_sender = relay_chain.inner.lock().unwrap().finalized_heads_sender.clone(); - let consensus = - run_parachain_consensus(100.into(), client.clone(), relay_chain, Arc::new(|_, _| {}), None); + let (mock_finalized_sender, finalized_receiver) = futures::channel::mpsc::unbounded(); + let consensus = run_parachain_consensus( + 100.into(), + client.clone(), + relay_chain, + Arc::new(|_, _| {}), + Box::new(finalized_receiver), + None, + ); let work = async move { - finalized_sender.unbounded_send(block.header().clone()).unwrap(); + mock_finalized_sender.unbounded_send(block.header().clone()).unwrap(); loop { Delay::new(Duration::from_millis(100)).await; if block.hash() == client.usage_info().chain.finalized_hash { @@ -528,19 +541,17 @@ fn follow_finalized_works() { } }; - block_on(async move { - futures::pin_mut!(consensus); - futures::pin_mut!(work); + futures::pin_mut!(consensus); + futures::pin_mut!(work); - select! { - r = consensus.fuse() => panic!("Consensus should not end: {:?}", r), - _ = work.fuse() => {}, - } - }); + select! { + r = consensus.fuse() => panic!("Consensus should not end: {:?}", r), + _ = work.fuse() => {}, + } } -#[test] -fn follow_finalized_does_not_stop_on_unknown_block() { +#[tokio::test] +async fn follow_finalized_does_not_stop_on_unknown_block() { sp_tracing::try_init_simple(); let client = Arc::new(TestClientBuilder::default().build()); @@ -554,19 +565,26 @@ fn follow_finalized_does_not_stop_on_unknown_block() { }; let relay_chain = Relaychain::new(); - let finalized_sender = relay_chain.inner.lock().unwrap().finalized_heads_sender.clone(); + let _finalized_sender = relay_chain.inner.lock().unwrap().finalized_heads_sender.clone(); - let consensus = - run_parachain_consensus(100.into(), client.clone(), relay_chain, Arc::new(|_, _| {}), None); + let (mock_finalized_sender, finalized_receiver) = futures::channel::mpsc::unbounded(); + let consensus = run_parachain_consensus( + 100.into(), + client.clone(), + relay_chain, + Arc::new(|_, _| {}), + Box::new(finalized_receiver), + None, + ); let work = async move { for _ in 0..3usize { - finalized_sender.unbounded_send(unknown_block.header().clone()).unwrap(); + mock_finalized_sender.unbounded_send(unknown_block.header().clone()).unwrap(); Delay::new(Duration::from_millis(100)).await; } - finalized_sender.unbounded_send(block.header().clone()).unwrap(); + mock_finalized_sender.unbounded_send(block.header().clone()).unwrap(); loop { Delay::new(Duration::from_millis(100)).await; if block.hash() == client.usage_info().chain.finalized_hash { @@ -575,22 +593,20 @@ fn follow_finalized_does_not_stop_on_unknown_block() { } }; - block_on(async move { - futures::pin_mut!(consensus); - futures::pin_mut!(work); + futures::pin_mut!(consensus); + futures::pin_mut!(work); - select! { - r = consensus.fuse() => panic!("Consensus should not end: {:?}", r), - _ = work.fuse() => {}, - } - }); + select! { + r = consensus.fuse() => panic!("Consensus should not end: {:?}", r), + _ = work.fuse() => {}, + } } // It can happen that we first import a relay chain block, while not yet having the parachain // block imported that would be set to the best block. We need to make sure to import this // block as new best block in the moment it is imported. -#[test] -fn follow_new_best_sets_best_after_it_is_imported() { +#[tokio::test] +async fn follow_new_best_sets_best_after_it_is_imported() { sp_tracing::try_init_simple(); let client = Arc::new(TestClientBuilder::default().build()); @@ -606,8 +622,15 @@ fn follow_new_best_sets_best_after_it_is_imported() { let relay_chain = Relaychain::new(); let new_best_heads_sender = relay_chain.inner.lock().unwrap().new_best_heads_sender.clone(); - let consensus = - run_parachain_consensus(100.into(), client.clone(), relay_chain, Arc::new(|_, _| {}), None); + let (_finalized_sender, finalized_receiver) = futures::channel::mpsc::unbounded(); + let consensus = run_parachain_consensus( + 100.into(), + client.clone(), + relay_chain, + Arc::new(|_, _| {}), + Box::new(finalized_receiver), + None, + ); let work = async move { new_best_heads_sender.unbounded_send(block.header().clone()).unwrap(); @@ -645,15 +668,13 @@ fn follow_new_best_sets_best_after_it_is_imported() { } }; - block_on(async move { - futures::pin_mut!(consensus); - futures::pin_mut!(work); + futures::pin_mut!(consensus); + futures::pin_mut!(work); - select! { - r = consensus.fuse() => panic!("Consensus should not end: {:?}", r), - _ = work.fuse() => {}, - } - }); + select! { + r = consensus.fuse() => panic!("Consensus should not end: {:?}", r), + _ = work.fuse() => {}, + } } /// When we import a new best relay chain block, we extract the best parachain block from it and set @@ -663,8 +684,8 @@ fn follow_new_best_sets_best_after_it_is_imported() { /// could import block 100 as best and then import a relay chain block that says that block 99 is /// the best parachain block. This should not happen, we should never set the best block to a lower /// block number. -#[test] -fn do_not_set_best_block_to_older_block() { +#[tokio::test] +async fn do_not_set_best_block_to_older_block() { const NUM_BLOCKS: usize = 4; sp_tracing::try_init_simple(); @@ -682,8 +703,15 @@ fn do_not_set_best_block_to_older_block() { let relay_chain = Relaychain::new(); let new_best_heads_sender = relay_chain.inner.lock().unwrap().new_best_heads_sender.clone(); - let consensus = - run_parachain_consensus(100.into(), client.clone(), relay_chain, Arc::new(|_, _| {}), None); + let (_finalized_sender, finalized_receiver) = futures::channel::mpsc::unbounded(); + let consensus = run_parachain_consensus( + 100.into(), + client.clone(), + relay_chain, + Arc::new(|_, _| {}), + Box::new(finalized_receiver), + None, + ); let work = async move { new_best_heads_sender @@ -693,15 +721,13 @@ fn do_not_set_best_block_to_older_block() { Delay::new(Duration::from_millis(300)).await; }; - block_on(async move { - futures::pin_mut!(consensus); - futures::pin_mut!(work); + futures::pin_mut!(consensus); + futures::pin_mut!(work); - select! { - r = consensus.fuse() => panic!("Consensus should not end: {:?}", r), - _ = work.fuse() => {}, - } - }); + select! { + r = consensus.fuse() => panic!("Consensus should not end: {:?}", r), + _ = work.fuse() => {}, + } // Build and import a new best block. build_and_import_block(client, true); diff --git a/cumulus/client/parachain-inherent/src/lib.rs b/cumulus/client/parachain-inherent/src/lib.rs index 87d716ff6a673..5e994cd472f70 100644 --- a/cumulus/client/parachain-inherent/src/lib.rs +++ b/cumulus/client/parachain-inherent/src/lib.rs @@ -184,12 +184,10 @@ impl ParachainInherentDataProvider { // Only include next epoch authorities when the descendants include an epoch digest. // Skip the first entry because this is the relay parent itself. - let include_next_authorities = relay_parent_descendants.iter().skip(1).any(|header| { - sc_consensus_babe::find_next_epoch_digest::(header) - .ok() - .flatten() - .is_some() - }); + let include_next_authorities = relay_parent_descendants + .iter() + .skip(1) + .any(sc_consensus_babe::contains_epoch_change::); let relay_chain_state = collect_relay_storage_proof( relay_chain_interface, para_id, diff --git a/cumulus/client/service/src/lib.rs b/cumulus/client/service/src/lib.rs index 068778822ccf2..de88f8e21e706 100644 --- a/cumulus/client/service/src/lib.rs +++ b/cumulus/client/service/src/lib.rs @@ -142,18 +142,15 @@ where { let (recovery_chan_tx, recovery_chan_rx) = mpsc::channel(RECOVERY_CHAN_SIZE); - let consensus = cumulus_client_consensus_common::run_parachain_consensus( + cumulus_client_consensus_common::spawn_parachain_consensus_tasks( para_id, client.clone(), relay_chain_interface.clone(), announce_block.clone(), Some(recovery_chan_tx), + task_manager.spawn_essential_handle(), ); - task_manager - .spawn_essential_handle() - .spawn_blocking("cumulus-consensus", None, consensus); - let da_recovery_profile = match da_recovery_profile { DARecoveryProfile::Collator => { // We want that collators wait at maximum the relay chain slot duration before starting diff --git a/cumulus/pallets/parachain-system/src/tests.rs b/cumulus/pallets/parachain-system/src/tests.rs index 686e254bf4319..4b69d674e9eca 100755 --- a/cumulus/pallets/parachain-system/src/tests.rs +++ b/cumulus/pallets/parachain-system/src/tests.rs @@ -1553,6 +1553,7 @@ fn receive_hrmp_many() { } #[test] +#[cfg(not(feature = "runtime-benchmarks"))] fn upgrade_version_checks_should_work() { use codec::Encode; use sp_version::RuntimeVersion; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 747842c2254e9..bde01f335b19c 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -2069,9 +2069,9 @@ impl_runtime_apis! { } } - impl cumulus_primitives_core::SlotSchedule for Runtime { - fn next_slot_schedule(_num_cores: u32) -> cumulus_primitives_core::NextSlotSchedule { - cumulus_primitives_core::NextSlotSchedule::one_block_using_one_core() + impl cumulus_primitives_core::TargetBlockRate for Runtime { + fn target_block_rate() -> u32 { + 1 } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index e00258f7a4f2b..6ee07f6fe0a8a 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -1755,9 +1755,9 @@ pallet_revive::impl_runtime_apis_plus_revive_traits!( } } - impl cumulus_primitives_core::SlotSchedule for Runtime { - fn next_slot_schedule(_num_cores: u32) -> cumulus_primitives_core::NextSlotSchedule { - cumulus_primitives_core::NextSlotSchedule::one_block_using_one_core() + impl cumulus_primitives_core::TargetBlockRate for Runtime { + fn target_block_rate() -> u32 { + 1 } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 68cfd2b6c21ac..7dbcbb0975eb4 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -1577,9 +1577,9 @@ impl_runtime_apis! { } } - impl cumulus_primitives_core::SlotSchedule for Runtime { - fn next_slot_schedule(_num_cores: u32) -> cumulus_primitives_core::NextSlotSchedule { - cumulus_primitives_core::NextSlotSchedule::one_block_using_one_core() + impl cumulus_primitives_core::TargetBlockRate for Runtime { + fn target_block_rate() -> u32 { + 1 } } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index cc8449131c343..6e55033bc20ba 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -1439,9 +1439,9 @@ impl_runtime_apis! { } } - impl cumulus_primitives_core::SlotSchedule for Runtime { - fn next_slot_schedule(_num_cores: u32) -> cumulus_primitives_core::NextSlotSchedule { - cumulus_primitives_core::NextSlotSchedule::one_block_using_one_core() + impl cumulus_primitives_core::TargetBlockRate for Runtime { + fn target_block_rate() -> u32 { + 1 } } } diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index ac168d694e77f..85521a8db084e 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -1347,9 +1347,9 @@ impl_runtime_apis! { } } - impl cumulus_primitives_core::SlotSchedule for Runtime { - fn next_slot_schedule(_num_cores: u32) -> cumulus_primitives_core::NextSlotSchedule { - cumulus_primitives_core::NextSlotSchedule::one_block_using_one_core() + impl cumulus_primitives_core::TargetBlockRate for Runtime { + fn target_block_rate() -> u32 { + 1 } } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index aea5eff47a9b9..8e59eda372c01 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -1187,9 +1187,9 @@ impl_runtime_apis! { } } - impl cumulus_primitives_core::SlotSchedule for Runtime { - fn next_slot_schedule(_num_cores: u32) -> cumulus_primitives_core::NextSlotSchedule { - cumulus_primitives_core::NextSlotSchedule::one_block_using_one_core() + impl cumulus_primitives_core::TargetBlockRate for Runtime { + fn target_block_rate() -> u32 { + 1 } } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 17b1023cc07d8..671ccb8c9c0d5 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -1204,9 +1204,9 @@ impl_runtime_apis! { } } - impl cumulus_primitives_core::SlotSchedule for Runtime { - fn next_slot_schedule(_num_cores: u32) -> cumulus_primitives_core::NextSlotSchedule { - cumulus_primitives_core::NextSlotSchedule::one_block_using_one_core() + impl cumulus_primitives_core::TargetBlockRate for Runtime { + fn target_block_rate() -> u32 { + 1 } } } diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index 39c0c57820b69..ac563cb4912dd 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -510,9 +510,9 @@ impl_runtime_apis! { } } - impl cumulus_primitives_core::SlotSchedule for Runtime { - fn next_slot_schedule(_num_cores: u32) -> cumulus_primitives_core::NextSlotSchedule { - cumulus_primitives_core::NextSlotSchedule::one_block_using_one_core() + impl cumulus_primitives_core::TargetBlockRate for Runtime { + fn target_block_rate() -> u32 { + 1 } } } diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index 5b3f7ba903c71..dec1be2786bd2 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -1118,9 +1118,9 @@ impl_runtime_apis! { } } - impl cumulus_primitives_core::SlotSchedule for Runtime { - fn next_slot_schedule(_num_cores: u32) -> cumulus_primitives_core::NextSlotSchedule { - cumulus_primitives_core::NextSlotSchedule::one_block_using_one_core() + impl cumulus_primitives_core::TargetBlockRate for Runtime { + fn target_block_rate() -> u32 { + 1 } } } diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index bee243cca5982..ed613f38589b8 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -1136,9 +1136,9 @@ impl_runtime_apis! { } } - impl cumulus_primitives_core::SlotSchedule for Runtime { - fn next_slot_schedule(_num_cores: u32) -> cumulus_primitives_core::NextSlotSchedule { - cumulus_primitives_core::NextSlotSchedule::one_block_using_one_core() + impl cumulus_primitives_core::TargetBlockRate for Runtime { + fn target_block_rate() -> u32 { + 1 } } diff --git a/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs b/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs index 085f7ff3e5027..db4c88033da9b 100644 --- a/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs +++ b/cumulus/polkadot-omni-node/lib/src/fake_runtime_api/utils.rs @@ -242,8 +242,8 @@ macro_rules! impl_node_runtime_apis { } } - impl cumulus_primitives_core::SlotSchedule<$block> for $runtime { - fn next_slot_schedule(_: u32) -> cumulus_primitives_core::NextSlotSchedule { + impl cumulus_primitives_core::TargetBlockRate<$block> for $runtime { + fn target_block_rate() -> u32 { unimplemented!() } } diff --git a/cumulus/polkadot-omni-node/lib/src/nodes/mod.rs b/cumulus/polkadot-omni-node/lib/src/nodes/mod.rs index a2a897c378cdc..373afac5b1010 100644 --- a/cumulus/polkadot-omni-node/lib/src/nodes/mod.rs +++ b/cumulus/polkadot-omni-node/lib/src/nodes/mod.rs @@ -19,4 +19,4 @@ pub mod aura; /// The current node version for cumulus official binaries, which takes the basic /// SemVer form `..`. It should correspond to the latest /// `polkadot` version of a stable release. -pub const NODE_VERSION: &'static str = "1.20.1"; +pub const NODE_VERSION: &'static str = "1.20.2"; diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index e06a92dcef8bc..9fc245aab43f7 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -22,7 +22,6 @@ extern crate alloc; use alloc::vec::Vec; use codec::{Compact, Decode, DecodeAll, DecodeWithMemTracking, Encode, MaxEncodedLen}; -use core::time::Duration; use polkadot_parachain_primitives::primitives::HeadData; use scale_info::TypeInfo; use sp_runtime::RuntimeDebug; @@ -467,46 +466,6 @@ pub struct CollationInfo { pub head_data: HeadData, } -/// The schedule for the next relay chain slot. -/// -/// Returns the maximum number of parachain blocks to produce and the block time per block to use. -#[derive(Clone, Debug, codec::Decode, codec::Encode, PartialEq, TypeInfo)] -pub struct NextSlotSchedule { - /// The maximum number of blocks to produce in the relay chain slot. - /// - /// The node is free to produce less blocks. - pub number_of_blocks: u32, - /// The target block time in wall clock time for each block. - /// - /// The maximum should be [`REF_TIME_PER_CORE_IN_SECS`] or otherwise blocks may fail to - /// validate on the relay chain. - pub block_time: Duration, -} - -impl NextSlotSchedule { - /// Creates a schedule that produces one block, occupying an entire core. - pub fn one_block_using_one_core() -> Self { - Self { number_of_blocks: 1, block_time: Duration::from_secs(REF_TIME_PER_CORE_IN_SECS) } - } - - /// A schedule that maps `x` blocks onto `y` cores. - pub fn x_blocks_using_y_cores(blocks: u32, cores: u32) -> Self { - let ref_time_per_core = Duration::from_secs(REF_TIME_PER_CORE_IN_SECS); - - if blocks == 0 || cores == 0 { - return Self { number_of_blocks: 0, block_time: Duration::from_secs(0) } - } - - // In wall clock time we can not go above `6s` (relay chain slot duration), so we need to - // cap there. - let block_time = (ref_time_per_core * cores).min(Duration::from_secs(6)) / blocks; - // One block can at max occupy one core. - let block_time = block_time.min(ref_time_per_core); - - Self { block_time, number_of_blocks: blocks } - } -} - sp_api::decl_runtime_apis! { /// Runtime api to collect information about a collation. /// @@ -540,117 +499,18 @@ sp_api::decl_runtime_apis! { fn relay_parent_offset() -> u32; } - /// API for parachain slot scheduling. + /// API for parachain target block rate. /// - /// This runtime API allows the parachain runtime to communicate the block interval - /// to the node side. The node will call this API every relay chain slot (~6 seconds) - /// to get the scheduled parachain block interval. - pub trait SlotSchedule { - /// Get the block production schedule for the next relay chain slot. - /// - /// - `num_cores`: The number of cores assigned to this parachain + /// This runtime API allows the parachain runtime to communicate the target block rate + /// to the node side. The target block rate is always valid for the next relay chain slot. + /// + /// The runtime can not enforce this target block rate. It only acts as a maximum, but not more. + /// In the end it depends on the collator how many blocks will be produced. If there are no cores + /// available or the collator is offline, no blocks at all will be produced. + pub trait TargetBlockRate { + /// Get the target block rate for this parachain. /// - /// Returns a [`NextSlotSchedule`]. - fn next_slot_schedule(num_cores: u32) -> NextSlotSchedule; - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn one_block_using_one_core_works() { - let schedule = NextSlotSchedule::one_block_using_one_core(); - assert_eq!(schedule.number_of_blocks, 1); - assert_eq!(schedule.block_time, Duration::from_secs(REF_TIME_PER_CORE_IN_SECS)); - } - - #[test] - fn x_blocks_using_y_cores_basic_functionality() { - // 2 blocks using 1 core: each block gets 1 second - let schedule = NextSlotSchedule::x_blocks_using_y_cores(2, 1); - assert_eq!(schedule.number_of_blocks, 2); - assert_eq!(schedule.block_time, Duration::from_secs(1)); - - // 4 blocks using 2 cores: each block gets 1 second - let schedule = NextSlotSchedule::x_blocks_using_y_cores(4, 2); - assert_eq!(schedule.number_of_blocks, 4); - assert_eq!(schedule.block_time, Duration::from_secs(1)); - - // 2 blocks using 2 cores: each block gets 2 seconds (max) - let schedule = NextSlotSchedule::x_blocks_using_y_cores(2, 2); - assert_eq!(schedule.number_of_blocks, 2); - assert_eq!(schedule.block_time, Duration::from_secs(2)); - } - - #[test] - fn x_blocks_using_y_cores_caps_block_time_at_ref_time() { - let schedule = NextSlotSchedule::x_blocks_using_y_cores(2, 10); - assert_eq!(schedule.number_of_blocks, 2); - assert_eq!(schedule.block_time, Duration::from_secs(REF_TIME_PER_CORE_IN_SECS)); - - let schedule = NextSlotSchedule::x_blocks_using_y_cores(1, 5); - assert_eq!(schedule.number_of_blocks, 1); - assert_eq!(schedule.block_time, Duration::from_secs(REF_TIME_PER_CORE_IN_SECS)); - } - - #[test] - fn x_blocks_using_y_cores_edge_cases() { - // Zero blocks - let schedule = NextSlotSchedule::x_blocks_using_y_cores(0, 1); - assert_eq!(schedule.number_of_blocks, 0); - assert_eq!(schedule.block_time, Duration::from_secs(0)); - - // Zero cores (should not panic, though not realistic) - let schedule = NextSlotSchedule::x_blocks_using_y_cores(2, 0); - assert_eq!(schedule.number_of_blocks, 0); - assert_eq!(schedule.block_time, Duration::from_secs(0)); - - // Large numbers - let schedule = NextSlotSchedule::x_blocks_using_y_cores(100, 50); - assert_eq!(schedule.number_of_blocks, 100); - assert_eq!(schedule.block_time, Duration::from_millis(60)); - } - - #[test] - fn x_blocks_using_y_cores_various_ratios() { - // 6 blocks, 3 cores: each block gets 1 second - let schedule = NextSlotSchedule::x_blocks_using_y_cores(6, 3); - assert_eq!(schedule.number_of_blocks, 6); - assert_eq!(schedule.block_time, Duration::from_secs(1)); - - // 8 blocks, 4 cores: each block gets 1 second - let schedule = NextSlotSchedule::x_blocks_using_y_cores(8, 4); - assert_eq!(schedule.number_of_blocks, 8); - assert_eq!(schedule.block_time, Duration::from_millis(750)); - - // 4 blocks, 8 cores: each block gets 2 seconds (capped) - let schedule = NextSlotSchedule::x_blocks_using_y_cores(4, 8); - assert_eq!(schedule.number_of_blocks, 4); - assert_eq!(schedule.block_time, Duration::from_millis(1500)); - - // 10 blocks, 2 cores: each block gets `400ms` - let schedule = NextSlotSchedule::x_blocks_using_y_cores(10, 2); - assert_eq!(schedule.number_of_blocks, 10); - assert_eq!(schedule.block_time, Duration::from_millis(400)); - } - - #[test] - fn x_blocks_using_y_cores_fractional_seconds() { - // 6 blocks, 1 core: each block gets `333.333... ms (2000ms / 6)` - let schedule = NextSlotSchedule::x_blocks_using_y_cores(6, 1); - assert_eq!(schedule.number_of_blocks, 6); - assert_eq!(schedule.block_time, Duration::from_nanos(333_333_333)); - - // 8 blocks, 1 core: each block gets `250ms` - let schedule = NextSlotSchedule::x_blocks_using_y_cores(8, 1); - assert_eq!(schedule.number_of_blocks, 8); - assert_eq!(schedule.block_time, Duration::from_millis(250)); - - // 12 blocks, 1 core: each block gets `~166.666ms` - let schedule = NextSlotSchedule::x_blocks_using_y_cores(12, 1); - assert_eq!(schedule.number_of_blocks, 12); - assert_eq!(schedule.block_time, Duration::from_nanos(166_666_666)); + /// Returns the target number of blocks per relay chain slot. + fn target_block_rate() -> u32; } } diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index feb5d2cb8199b..75859680b5199 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -631,9 +631,9 @@ impl_runtime_apis! { } - impl cumulus_primitives_core::SlotSchedule for Runtime { - fn next_slot_schedule(_: u32) -> cumulus_primitives_core::NextSlotSchedule { - cumulus_primitives_core::NextSlotSchedule::one_block_using_one_core() + impl cumulus_primitives_core::TargetBlockRate for Runtime { + fn target_block_rate() -> u32 { + 1 } } } diff --git a/polkadot/node/primitives/src/lib.rs b/polkadot/node/primitives/src/lib.rs index 924ab7a79eb07..a1ed21936580e 100644 --- a/polkadot/node/primitives/src/lib.rs +++ b/polkadot/node/primitives/src/lib.rs @@ -60,7 +60,7 @@ pub use disputes::{ /// relatively rare. /// /// The associated worker binaries should use the same version as the node that spawns them. -pub const NODE_VERSION: &'static str = "1.20.1"; +pub const NODE_VERSION: &'static str = "1.20.2"; // For a 16-ary Merkle Prefix Trie, we can expect at most 16 32-byte hashes per node // plus some overhead: diff --git a/prdoc/pr_10162.prdoc b/prdoc/pr_10162.prdoc new file mode 100644 index 0000000000000..9d5fa0e7e8ebe --- /dev/null +++ b/prdoc/pr_10162.prdoc @@ -0,0 +1,16 @@ +title: Make tasks local only. +doc: +- audience: + - Runtime Dev + - Node Dev + description: |- + In frame-system: tasks are now only valid from local source, external source is invalid. + + The reason is that transaction are identified by their provided tag in the transaction pool, tasks provided tag is simply the hash of the tasks. Therefore, depending on how tasks are written, it may be possible to have a very large number of tasks that actually do the same operation but where the hash is different, thus it can be used to spam the transaction pool. A simple solution against this is to accept tasks only from local source. + + Fix: https://github.com/paritytech/polkadot-sdk/issues/9693 +crates: +- name: frame-support-procedural + bump: major +- name: frame-system + bump: major diff --git a/prdoc/pr_10298.prdoc b/prdoc/pr_10298.prdoc new file mode 100644 index 0000000000000..0f34dfbcb474e --- /dev/null +++ b/prdoc/pr_10298.prdoc @@ -0,0 +1,10 @@ +title: Fix the `CodeNotFound` issue in PolkaVM tests +doc: +- audience: Runtime Dev + description: |- + # Description + + This PR bumps the commit hash of the revive-differential-tests framework to a version that contains a fix for the `CodeNotFound` issue we've been seeing with PolkaVM. The framework now uploads the code of all the contracts prior to running the tests. + + When CI runs for this PR we should observe that there's either no more `CodeNotFound` errors in PolkaVM tests or that it's greatly reduced. +crates: [] diff --git a/prdoc/pr_10305.prdoc b/prdoc/pr_10305.prdoc new file mode 100644 index 0000000000000..05a73937d871f --- /dev/null +++ b/prdoc/pr_10305.prdoc @@ -0,0 +1,10 @@ +title: 'Cumulus: fix pre-connect to backers for single collator parachains' +doc: +- audience: Node Dev + description: |- + When running a single collator (most commonly on testnets), the block builder task is always + able to claim a slot, so we're never triggering the pre-connect mechanism which happens for + slots owned by other authors. +crates: +- name: cumulus-client-consensus-aura + bump: patch diff --git a/prdoc/pr_10329.prdoc b/prdoc/pr_10329.prdoc new file mode 100644 index 0000000000000..f1c4d70894279 --- /dev/null +++ b/prdoc/pr_10329.prdoc @@ -0,0 +1,10 @@ +title: 'fix: support `paginationStartKey` parameter for `archive_v1_storage`' +doc: +- audience: Node Dev + description: |- + Fixes #10185 + + This PR is to add support for `paginationStartKey` parameter in `archive_v1_storage` JSON RPC API for query type: `descendantsValues` and `descendantsHashes` per [the latest specs](https://paritytech.github.io/json-rpc-interface-spec/api/archive_v1_storage.html). +crates: +- name: sc-rpc-spec-v2 + bump: major diff --git a/prdoc/pr_10333.prdoc b/prdoc/pr_10333.prdoc new file mode 100644 index 0000000000000..ada51f47ac8f7 --- /dev/null +++ b/prdoc/pr_10333.prdoc @@ -0,0 +1,11 @@ +title: 'parachain-consensus: Do not pin blocks on the relay chain during syncing' +doc: +- audience: Node Operator + description: |- + The parachain-consensus component in parachain nodes was hogging finality notifications. This is now fixed. + The finality notifications where keeping blocks pinned in memory, causing unnecessary memory usage. +crates: +- name: cumulus-client-consensus-common + bump: major +- name: cumulus-client-service + bump: patch diff --git a/prdoc/pr_10336.prdoc b/prdoc/pr_10336.prdoc new file mode 100644 index 0000000000000..b44faef9a6246 --- /dev/null +++ b/prdoc/pr_10336.prdoc @@ -0,0 +1,7 @@ +title: fix P256Verify precompile address +doc: +- audience: Runtime Dev + description: fix https://github.com/paritytech/contract-issues/issues/220 +crates: +- name: pallet-revive + bump: patch diff --git a/prdoc/pr_10337.prdoc b/prdoc/pr_10337.prdoc new file mode 100644 index 0000000000000..d313cdf5ea101 --- /dev/null +++ b/prdoc/pr_10337.prdoc @@ -0,0 +1,8 @@ +title: Allow DT CI to be manually triggered +doc: +- audience: Runtime Dev + description: |- + # Description + + This is a small PR that allows for the differential testing job to be manually triggered instead of _only_ being triggered by PRs. +crates: [] diff --git a/prdoc/pr_10347.prdoc b/prdoc/pr_10347.prdoc new file mode 100644 index 0000000000000..cf00b63cce284 --- /dev/null +++ b/prdoc/pr_10347.prdoc @@ -0,0 +1,8 @@ +title: Don't require PR for uploading comment for DT CI +doc: +- audience: Runtime Dev + description: |- + # Description + + Small PR that changes the DT CI to not require a PR for uploading the report to the CI job. +crates: [] diff --git a/prdoc/pr_9930.prdoc b/prdoc/pr_9930.prdoc new file mode 100644 index 0000000000000..7bde8264634e3 --- /dev/null +++ b/prdoc/pr_9930.prdoc @@ -0,0 +1,19 @@ +title: Introduce `ReplayProofSizeProvider`, `RecordingProofProvider` & transactional + extensions +doc: +- audience: Node Dev + description: |- + The `ProofSizeExt` extension is used to serve the proof size to the runtime. It uses the proof recorder to request the current proof size. The `RecordingProofProvider` extension can record the calls to the proof size function. Later the `ReplayProofSizeProvider` can be used to replay these recorded proof sizes. So, the proof recorder is not required anymore. + + Extensions are now also hooked into the transactional system. This means they are called when a new transaction is created and informed when a transaction is committed or reverted. +crates: +- name: sp-api-proc-macro + bump: major +- name: sp-api + bump: major +- name: sp-externalities + bump: major +- name: sp-state-machine + bump: major +- name: sp-trie + bump: major diff --git a/prdoc/pr_9990.prdoc b/prdoc/pr_9990.prdoc new file mode 100644 index 0000000000000..fa7f237466b66 --- /dev/null +++ b/prdoc/pr_9990.prdoc @@ -0,0 +1,16 @@ +title: '`cumulus`: Skip building on blocks on relay parents in old session' +doc: +- audience: Node Dev + description: |- + Collators building on older relay parents must skip building blocks when the chosen RP session is different than best block session. + + The relay chain will not accept such candidate. We can see this happening on the Kusama Canary parachain at each session boundary. + + Slot-based Aura has been modified in Cumulus to skip building blocks on relay parents in old session. +crates: +- name: cumulus-client-consensus-aura + bump: patch +- name: cumulus-client-parachain-inherent + bump: patch +- name: sc-consensus-babe + bump: minor \ No newline at end of file diff --git a/prdoc/pr_10073.prdoc b/prdoc/stable2509-2/pr_10073.prdoc similarity index 100% rename from prdoc/pr_10073.prdoc rename to prdoc/stable2509-2/pr_10073.prdoc diff --git a/prdoc/pr_10149.prdoc b/prdoc/stable2509-2/pr_10149.prdoc similarity index 100% rename from prdoc/pr_10149.prdoc rename to prdoc/stable2509-2/pr_10149.prdoc diff --git a/prdoc/pr_10163.prdoc b/prdoc/stable2509-2/pr_10163.prdoc similarity index 100% rename from prdoc/pr_10163.prdoc rename to prdoc/stable2509-2/pr_10163.prdoc diff --git a/prdoc/pr_10180.prdoc b/prdoc/stable2509-2/pr_10180.prdoc similarity index 100% rename from prdoc/pr_10180.prdoc rename to prdoc/stable2509-2/pr_10180.prdoc diff --git a/prdoc/stable2509-2/pr_10206.prdoc b/prdoc/stable2509-2/pr_10206.prdoc new file mode 100644 index 0000000000000..d98c375e624bb --- /dev/null +++ b/prdoc/stable2509-2/pr_10206.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Add rpm build for polkadot node + +doc: + - audience: Node Operator + description: | + This PR adds an rpm build for the polkadot node. The rpm package was tested on Rocky Linux 10 and Alma Linux 10 distributions + +crates: + - name: polkadot + bump: none diff --git a/prdoc/pr_10235.prdoc b/prdoc/stable2509-2/pr_10235.prdoc similarity index 100% rename from prdoc/pr_10235.prdoc rename to prdoc/stable2509-2/pr_10235.prdoc diff --git a/prdoc/pr_10248.prdoc b/prdoc/stable2509-2/pr_10248.prdoc similarity index 100% rename from prdoc/pr_10248.prdoc rename to prdoc/stable2509-2/pr_10248.prdoc diff --git a/prdoc/pr_10280.prdoc b/prdoc/stable2509-2/pr_10280.prdoc similarity index 100% rename from prdoc/pr_10280.prdoc rename to prdoc/stable2509-2/pr_10280.prdoc diff --git a/prdoc/stable2509-2/pr_9581.prdoc b/prdoc/stable2509-2/pr_9581.prdoc new file mode 100644 index 0000000000000..4bdf025a66c39 --- /dev/null +++ b/prdoc/stable2509-2/pr_9581.prdoc @@ -0,0 +1,8 @@ +title: 'fix: parachain informant' +doc: +- audience: Node Operator + description: |- + The parachain informant was logging information for all parachains, not just ours. This PR fixes that by filtering the events by parachain ID. +crates: +- name: cumulus-client-service + bump: patch diff --git a/prdoc/pr_9725.prdoc b/prdoc/stable2509-2/pr_9725.prdoc similarity index 100% rename from prdoc/pr_9725.prdoc rename to prdoc/stable2509-2/pr_9725.prdoc diff --git a/prdoc/pr_9871.prdoc b/prdoc/stable2509-2/pr_9871.prdoc similarity index 100% rename from prdoc/pr_9871.prdoc rename to prdoc/stable2509-2/pr_9871.prdoc diff --git a/prdoc/pr_9965.prdoc b/prdoc/stable2509-2/pr_9965.prdoc similarity index 100% rename from prdoc/pr_9965.prdoc rename to prdoc/stable2509-2/pr_9965.prdoc diff --git a/substrate/client/consensus/babe/src/lib.rs b/substrate/client/consensus/babe/src/lib.rs index a7cad251379bf..a21aa92871b8c 100644 --- a/substrate/client/consensus/babe/src/lib.rs +++ b/substrate/client/consensus/babe/src/lib.rs @@ -913,6 +913,11 @@ pub fn find_pre_digest(header: &B::Header) -> Result(header: &B::Header) -> bool { + find_next_epoch_digest::(header).ok().flatten().is_some() +} + /// Extract the BABE epoch change digest from the given header, if it exists. pub fn find_next_epoch_digest( header: &B::Header, diff --git a/substrate/client/rpc-spec-v2/src/archive/archive.rs b/substrate/client/rpc-spec-v2/src/archive/archive.rs index 0f7ceed008762..2814e12afdcb2 100644 --- a/substrate/client/rpc-spec-v2/src/archive/archive.rs +++ b/substrate/client/rpc-spec-v2/src/archive/archive.rs @@ -231,13 +231,29 @@ where .into_iter() .map(|query| { let key = StorageKey(parse_hex_param(query.key)?); - Ok(StorageQuery { key, query_type: query.query_type }) + + // Validate that paginationStartKey is only used with descendant queries + if query.pagination_start_key.is_some() && + !query.query_type.is_descendant_query() + { + return Err(ArchiveError::InvalidParam( + "paginationStartKey is only valid for descendantsValues and descendantsHashes query types" + .to_string(), + )); + } + + let pagination_start_key = query + .pagination_start_key + .map(|key| parse_hex_param(key).map(StorageKey)) + .transpose()?; + + Ok(StorageQuery { key, query_type: query.query_type, pagination_start_key }) }) .collect::, ArchiveError>>() { Ok(items) => items, Err(error) => { - let _ = sink.send(&ArchiveStorageEvent::err(error.to_string())); + let _ = sink.send(&ArchiveStorageEvent::err(error.to_string())).await; return }, }; @@ -246,7 +262,7 @@ where let child_trie = match child_trie { Ok(child_trie) => child_trie.map(ChildInfo::new_default_from_vec), Err(error) => { - let _ = sink.send(&ArchiveStorageEvent::err(error.to_string())); + let _ = sink.send(&ArchiveStorageEvent::err(error.to_string())).await; return }, }; diff --git a/substrate/client/rpc-spec-v2/src/archive/tests.rs b/substrate/client/rpc-spec-v2/src/archive/tests.rs index 5956af3e019af..baf173511b818 100644 --- a/substrate/client/rpc-spec-v2/src/archive/tests.rs +++ b/substrate/client/rpc-spec-v2/src/archive/tests.rs @@ -360,10 +360,26 @@ async fn archive_storage_hashes_values() { let key = hex_string(&KEY); let items: Vec> = vec![ - StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsHashes }, - StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsValues }, - StorageQuery { key: key.clone(), query_type: StorageQueryType::Hash }, - StorageQuery { key: key.clone(), query_type: StorageQueryType::Value }, + StorageQuery { + key: key.clone(), + query_type: StorageQueryType::DescendantsHashes, + pagination_start_key: None, + }, + StorageQuery { + key: key.clone(), + query_type: StorageQueryType::DescendantsValues, + pagination_start_key: None, + }, + StorageQuery { + key: key.clone(), + query_type: StorageQueryType::Hash, + pagination_start_key: None, + }, + StorageQuery { + key: key.clone(), + query_type: StorageQueryType::Value, + pagination_start_key: None, + }, ]; let mut sub = api @@ -450,8 +466,16 @@ async fn archive_storage_hashes_values_child_trie() { let expected_value = hex_string(&CHILD_VALUE); let items: Vec> = vec![ - StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsHashes }, - StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsValues }, + StorageQuery { + key: key.clone(), + query_type: StorageQueryType::DescendantsHashes, + pagination_start_key: None, + }, + StorageQuery { + key: key.clone(), + query_type: StorageQueryType::DescendantsValues, + pagination_start_key: None, + }, ]; let mut sub = api .subscribe_unbounded("archive_v1_storage", rpc_params![&genesis_hash, items, &child_info]) @@ -505,38 +529,46 @@ async fn archive_storage_closest_merkle_value() { StorageQuery { key: hex_string(b":AAAA"), query_type: StorageQueryType::ClosestDescendantMerkleValue, + pagination_start_key: None }, StorageQuery { key: hex_string(b":AAAB"), query_type: StorageQueryType::ClosestDescendantMerkleValue, + pagination_start_key: None }, // Key with descendant. StorageQuery { key: hex_string(b":A"), query_type: StorageQueryType::ClosestDescendantMerkleValue, + pagination_start_key: None }, StorageQuery { key: hex_string(b":AA"), query_type: StorageQueryType::ClosestDescendantMerkleValue, + pagination_start_key: None }, // Keys below this comment do not produce a result. // Key that exceed the keyspace of the trie. StorageQuery { key: hex_string(b":AAAAX"), query_type: StorageQueryType::ClosestDescendantMerkleValue, + pagination_start_key: None }, StorageQuery { key: hex_string(b":AAABX"), query_type: StorageQueryType::ClosestDescendantMerkleValue, + pagination_start_key: None }, // Key that are not part of the trie. StorageQuery { key: hex_string(b":AAX"), query_type: StorageQueryType::ClosestDescendantMerkleValue, + pagination_start_key: None }, StorageQuery { key: hex_string(b":AAAX"), query_type: StorageQueryType::ClosestDescendantMerkleValue, + pagination_start_key: None }, ] ], @@ -666,6 +698,7 @@ async fn archive_storage_iterations() { vec![StorageQuery { key: hex_string(b":m"), query_type: StorageQueryType::DescendantsValues, + pagination_start_key: None }] ], ) @@ -686,6 +719,7 @@ async fn archive_storage_iterations() { vec![StorageQuery { key: hex_string(b":m"), query_type: StorageQueryType::DescendantsValues, + pagination_start_key: None }] ], ) @@ -743,6 +777,342 @@ async fn archive_storage_iterations() { ); } +#[tokio::test] +async fn archive_storage_pagination_descendant_values() { + let (client, api) = setup_api(); + + // Import a new block with multiple storage entries. + let mut builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap(); + builder + .push_storage_change(b":prefix:aa".to_vec(), Some(b"value_a".to_vec())) + .unwrap(); + builder + .push_storage_change(b":prefix:bb".to_vec(), Some(b"value_b".to_vec())) + .unwrap(); + builder + .push_storage_change(b":prefix:cc".to_vec(), Some(b"value_c".to_vec())) + .unwrap(); + builder + .push_storage_change(b":prefix:dd".to_vec(), Some(b"value_d".to_vec())) + .unwrap(); + let block = builder.build().unwrap().block; + let block_hash = format!("{:?}", block.header.hash()); + client.import(BlockOrigin::Own, block.clone()).await.unwrap(); + + // First request without pagination - should get all results. + let mut sub = api + .subscribe_unbounded( + "archive_v1_storage", + rpc_params![ + &block_hash, + vec![StorageQuery { + key: hex_string(b":prefix:"), + query_type: StorageQueryType::DescendantsValues, + pagination_start_key: None, + }] + ], + ) + .await + .unwrap(); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":prefix:aa"), + result: StorageResultType::Value(hex_string(b"value_a")), + child_trie_key: None, + }) + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":prefix:bb"), + result: StorageResultType::Value(hex_string(b"value_b")), + child_trie_key: None, + }) + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":prefix:cc"), + result: StorageResultType::Value(hex_string(b"value_c")), + child_trie_key: None, + }) + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":prefix:dd"), + result: StorageResultType::Value(hex_string(b"value_d")), + child_trie_key: None, + }) + ); + + assert_matches!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageDone + ); + + // Second request with pagination starting from `:prefix:bb` - should skip aa and bb. + let mut sub = api + .subscribe_unbounded( + "archive_v1_storage", + rpc_params![ + &block_hash, + vec![StorageQuery { + key: hex_string(b":prefix:"), + query_type: StorageQueryType::DescendantsValues, + pagination_start_key: Some(hex_string(b":prefix:bb")), + }] + ], + ) + .await + .unwrap(); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":prefix:cc"), + result: StorageResultType::Value(hex_string(b"value_c")), + child_trie_key: None, + }) + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":prefix:dd"), + result: StorageResultType::Value(hex_string(b"value_d")), + child_trie_key: None, + }) + ); + + assert_matches!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageDone + ); +} + +#[tokio::test] +async fn archive_storage_pagination_descendant_hashes() { + let (client, api) = setup_api(); + + // Import a new block with multiple storage entries. + let mut builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap(); + builder + .push_storage_change(b":test:1".to_vec(), Some(b"val1".to_vec())) + .unwrap(); + builder + .push_storage_change(b":test:2".to_vec(), Some(b"val2".to_vec())) + .unwrap(); + builder + .push_storage_change(b":test:3".to_vec(), Some(b"val3".to_vec())) + .unwrap(); + let block = builder.build().unwrap().block; + let block_hash = format!("{:?}", block.header.hash()); + client.import(BlockOrigin::Own, block.clone()).await.unwrap(); + + let expected_hash_1 = format!("{:?}", Blake2Hasher::hash(b"val2")); + let expected_hash_2 = format!("{:?}", Blake2Hasher::hash(b"val3")); + + // Request with pagination starting from `:test:1` - should skip keys 1 and get 2 and 3. + let mut sub = api + .subscribe_unbounded( + "archive_v1_storage", + rpc_params![ + &block_hash, + vec![StorageQuery { + key: hex_string(b":test:"), + query_type: StorageQueryType::DescendantsHashes, + pagination_start_key: Some(hex_string(b":test:1")), + }] + ], + ) + .await + .unwrap(); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":test:2"), + result: StorageResultType::Hash(expected_hash_1), + child_trie_key: None, + }) + ); + + assert_eq!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::Storage(StorageResult { + key: hex_string(b":test:3"), + result: StorageResultType::Hash(expected_hash_2), + child_trie_key: None, + }) + ); + + assert_matches!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageDone + ); +} + +#[tokio::test] +async fn archive_storage_pagination_invalid_query_type() { + let (client, api) = setup_api(); + let block_hash = format!("{:?}", client.genesis_hash()); + + // Test that paginationStartKey with Value query type returns an error. + let mut sub = api + .subscribe_unbounded( + "archive_v1_storage", + rpc_params![ + &block_hash, + vec![StorageQuery { + key: hex_string(b":test"), + query_type: StorageQueryType::Value, + pagination_start_key: Some(hex_string(b":test:a")), + }] + ], + ) + .await + .unwrap(); + + assert_matches!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageError(err) if err.error.contains( + "paginationStartKey is only valid for descendantsValues and descendantsHashes query types" + ) + ); + + // Test that paginationStartKey with Hash query type returns an error. + let mut sub = api + .subscribe_unbounded( + "archive_v1_storage", + rpc_params![ + &block_hash, + vec![StorageQuery { + key: hex_string(b":test"), + query_type: StorageQueryType::Hash, + pagination_start_key: Some(hex_string(b":test:a")), + }] + ], + ) + .await + .unwrap(); + + assert_matches!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageError(err) if err.error.contains( + "paginationStartKey is only valid for descendantsValues and descendantsHashes query types" + ) + ); + + // Test that paginationStartKey with ClosestDescendantMerkleValue query type returns an error. + let mut sub = api + .subscribe_unbounded( + "archive_v1_storage", + rpc_params![ + &block_hash, + vec![StorageQuery { + key: hex_string(b":test"), + query_type: StorageQueryType::ClosestDescendantMerkleValue, + pagination_start_key: Some(hex_string(b":test:a")), + }] + ], + ) + .await + .unwrap(); + + assert_matches!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageError(err) if err.error.contains( + "paginationStartKey is only valid for descendantsValues and descendantsHashes query types" + ) + ); +} + +#[tokio::test] +async fn archive_storage_pagination_invalid_hex() { + let (client, api) = setup_api(); + let block_hash = format!("{:?}", client.genesis_hash()); + + // Test that invalid hex in pagination_start_key returns an error. + let mut sub = api + .subscribe_unbounded( + "archive_v1_storage", + rpc_params![ + &block_hash, + vec![StorageQuery { + key: hex_string(b":test:"), + query_type: StorageQueryType::DescendantsValues, + pagination_start_key: Some("0xINVALID_HEX".to_string()), + }] + ], + ) + .await + .unwrap(); + + assert_matches!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageError(err) if err.error.contains("Invalid parameter") + && err.error.contains("0xINVALID_HEX") + ); + + // Test that invalid hex in key also returns an error. + let mut sub = api + .subscribe_unbounded( + "archive_v1_storage", + rpc_params![ + &block_hash, + vec![StorageQuery { + key: "NOT_HEX".to_string(), + query_type: StorageQueryType::Value, + pagination_start_key: None, + }] + ], + ) + .await + .unwrap(); + + assert_matches!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageError(err) if err.error.contains("Invalid parameter") + && err.error.contains("NOT_HEX") + ); + + // Test that pagination_start_key with empty string is valid (gets converted to empty bytes). + let mut sub = api + .subscribe_unbounded( + "archive_v1_storage", + rpc_params![ + &block_hash, + vec![StorageQuery { + key: hex_string(b":test:"), + query_type: StorageQueryType::DescendantsValues, + pagination_start_key: Some("".to_string()), + }] + ], + ) + .await + .unwrap(); + + // Should complete successfully, not error + assert_matches!( + get_next_event::(&mut sub).await, + ArchiveStorageEvent::StorageDone + ); +} + #[tokio::test] async fn archive_storage_diff_main_trie() { let (client, api) = setup_api(); diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs index b949fb25402bf..039bcf62562e3 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs @@ -400,7 +400,7 @@ where .into_iter() .map(|query| { let key = StorageKey(parse_hex_param(query.key)?); - Ok(StorageQuery { key, query_type: query.query_type }) + Ok(StorageQuery { key, query_type: query.query_type, pagination_start_key: None }) }) .collect::, ChainHeadRpcError>>() { diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index 6edd46c8b4644..4e6cd37bcdf98 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -699,7 +699,11 @@ async fn get_storage_hash() { rpc_params![ "invalid_sub_id", &invalid_hash, - vec![StorageQuery { key: key.clone(), query_type: StorageQueryType::Hash }] + vec![StorageQuery { + key: key.clone(), + query_type: StorageQueryType::Hash, + pagination_start_key: None + }] ], ) .await @@ -713,7 +717,11 @@ async fn get_storage_hash() { rpc_params![ &sub_id, &invalid_hash, - vec![StorageQuery { key: key.clone(), query_type: StorageQueryType::Hash }] + vec![StorageQuery { + key: key.clone(), + query_type: StorageQueryType::Hash, + pagination_start_key: None + }] ], ) .await @@ -729,7 +737,11 @@ async fn get_storage_hash() { rpc_params![ &sub_id, &block_hash, - vec![StorageQuery { key: key.clone(), query_type: StorageQueryType::Hash }] + vec![StorageQuery { + key: key.clone(), + query_type: StorageQueryType::Hash, + pagination_start_key: None + }] ], ) .await @@ -772,7 +784,11 @@ async fn get_storage_hash() { rpc_params![ &sub_id, &block_hash, - vec![StorageQuery { key: key.clone(), query_type: StorageQueryType::Hash }] + vec![StorageQuery { + key: key.clone(), + query_type: StorageQueryType::Hash, + pagination_start_key: None + }] ], ) .await @@ -805,7 +821,11 @@ async fn get_storage_hash() { rpc_params![ &sub_id, &genesis_hash, - vec![StorageQuery { key: key.clone(), query_type: StorageQueryType::Hash }], + vec![StorageQuery { + key: key.clone(), + query_type: StorageQueryType::Hash, + pagination_start_key: None + }], &child_info ], ) @@ -865,11 +885,13 @@ async fn get_storage_multi_query_iter() { vec![ StorageQuery { key: key.clone(), - query_type: StorageQueryType::DescendantsHashes + query_type: StorageQueryType::DescendantsHashes, + pagination_start_key: None }, StorageQuery { key: key.clone(), - query_type: StorageQueryType::DescendantsValues + query_type: StorageQueryType::DescendantsValues, + pagination_start_key: None } ] ], @@ -916,11 +938,13 @@ async fn get_storage_multi_query_iter() { vec![ StorageQuery { key: key.clone(), - query_type: StorageQueryType::DescendantsHashes + query_type: StorageQueryType::DescendantsHashes, + pagination_start_key: None }, StorageQuery { key: key.clone(), - query_type: StorageQueryType::DescendantsValues + query_type: StorageQueryType::DescendantsValues, + pagination_start_key: None } ], &child_info @@ -967,7 +991,11 @@ async fn get_storage_value() { rpc_params![ "invalid_sub_id", &invalid_hash, - vec![StorageQuery { key: key.clone(), query_type: StorageQueryType::Value }] + vec![StorageQuery { + key: key.clone(), + query_type: StorageQueryType::Value, + pagination_start_key: None + }] ], ) .await @@ -981,7 +1009,11 @@ async fn get_storage_value() { rpc_params![ &sub_id, &invalid_hash, - vec![StorageQuery { key: key.clone(), query_type: StorageQueryType::Value }] + vec![StorageQuery { + key: key.clone(), + query_type: StorageQueryType::Value, + pagination_start_key: None + }] ], ) .await @@ -997,7 +1029,11 @@ async fn get_storage_value() { rpc_params![ &sub_id, &block_hash, - vec![StorageQuery { key: key.clone(), query_type: StorageQueryType::Value }] + vec![StorageQuery { + key: key.clone(), + query_type: StorageQueryType::Value, + pagination_start_key: None + }] ], ) .await @@ -1040,7 +1076,11 @@ async fn get_storage_value() { rpc_params![ &sub_id, &block_hash, - vec![StorageQuery { key: key.clone(), query_type: StorageQueryType::Value }] + vec![StorageQuery { + key: key.clone(), + query_type: StorageQueryType::Value, + pagination_start_key: None + }] ], ) .await @@ -1072,7 +1112,11 @@ async fn get_storage_value() { rpc_params![ &sub_id, &genesis_hash, - vec![StorageQuery { key: key.clone(), query_type: StorageQueryType::Value }], + vec![StorageQuery { + key: key.clone(), + query_type: StorageQueryType::Value, + pagination_start_key: None + }], &child_info ], ) @@ -1114,7 +1158,11 @@ async fn get_storage_non_queryable_key() { rpc_params![ &sub_id, &block_hash, - vec![StorageQuery { key: prefixed_key, query_type: StorageQueryType::Value }] + vec![StorageQuery { + key: prefixed_key, + query_type: StorageQueryType::Value, + pagination_start_key: None + }] ], ) .await @@ -1139,7 +1187,11 @@ async fn get_storage_non_queryable_key() { rpc_params![ &sub_id, &block_hash, - vec![StorageQuery { key: prefixed_key, query_type: StorageQueryType::Value }] + vec![StorageQuery { + key: prefixed_key, + query_type: StorageQueryType::Value, + pagination_start_key: None + }] ], ) .await @@ -1164,7 +1216,11 @@ async fn get_storage_non_queryable_key() { rpc_params![ &sub_id, &block_hash, - vec![StorageQuery { key: key.clone(), query_type: StorageQueryType::Value }], + vec![StorageQuery { + key: key.clone(), + query_type: StorageQueryType::Value, + pagination_start_key: None + }], &prefixed_key ], ) @@ -1190,7 +1246,11 @@ async fn get_storage_non_queryable_key() { rpc_params![ &sub_id, &block_hash, - vec![StorageQuery { key, query_type: StorageQueryType::Value }], + vec![StorageQuery { + key, + query_type: StorageQueryType::Value, + pagination_start_key: None + }], &prefixed_key ], ) @@ -1238,7 +1298,11 @@ async fn unique_operation_ids() { rpc_params![ &sub_id, &block_hash, - vec![StorageQuery { key: key.clone(), query_type: StorageQueryType::Value }] + vec![StorageQuery { + key: key.clone(), + query_type: StorageQueryType::Value, + pagination_start_key: None + }] ], ) .await @@ -2864,10 +2928,26 @@ async fn ensure_operation_limits_works() { let key = hex_string(&KEY); let items = vec![ - StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsHashes }, - StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsHashes }, - StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsValues }, - StorageQuery { key: key.clone(), query_type: StorageQueryType::DescendantsValues }, + StorageQuery { + key: key.clone(), + query_type: StorageQueryType::DescendantsHashes, + pagination_start_key: None, + }, + StorageQuery { + key: key.clone(), + query_type: StorageQueryType::DescendantsHashes, + pagination_start_key: None, + }, + StorageQuery { + key: key.clone(), + query_type: StorageQueryType::DescendantsValues, + pagination_start_key: None, + }, + StorageQuery { + key: key.clone(), + query_type: StorageQueryType::DescendantsValues, + pagination_start_key: None, + }, ]; let response: MethodResponse = api @@ -2991,7 +3071,8 @@ async fn storage_is_backpressured() { &block_hash, vec![StorageQuery { key: hex_string(b":m"), - query_type: StorageQueryType::DescendantsValues + query_type: StorageQueryType::DescendantsValues, + pagination_start_key: None }] ], ) @@ -3126,7 +3207,8 @@ async fn stop_storage_operation() { &block_hash, vec![StorageQuery { key: hex_string(b":m"), - query_type: StorageQueryType::DescendantsValues + query_type: StorageQueryType::DescendantsValues, + pagination_start_key: None }] ], ) @@ -3194,39 +3276,47 @@ async fn storage_closest_merkle_value() { vec![ StorageQuery { key: hex_string(b":AAAA"), - query_type: StorageQueryType::ClosestDescendantMerkleValue + query_type: StorageQueryType::ClosestDescendantMerkleValue, + pagination_start_key: None }, StorageQuery { key: hex_string(b":AAAB"), - query_type: StorageQueryType::ClosestDescendantMerkleValue + query_type: StorageQueryType::ClosestDescendantMerkleValue, + pagination_start_key: None }, // Key with descendant. StorageQuery { key: hex_string(b":A"), - query_type: StorageQueryType::ClosestDescendantMerkleValue + query_type: StorageQueryType::ClosestDescendantMerkleValue, + pagination_start_key: None }, StorageQuery { key: hex_string(b":AA"), - query_type: StorageQueryType::ClosestDescendantMerkleValue + query_type: StorageQueryType::ClosestDescendantMerkleValue, + pagination_start_key: None }, // Keys below this comment do not produce a result. // Key that exceed the keyspace of the trie. StorageQuery { key: hex_string(b":AAAAX"), - query_type: StorageQueryType::ClosestDescendantMerkleValue + query_type: StorageQueryType::ClosestDescendantMerkleValue, + pagination_start_key: None }, StorageQuery { key: hex_string(b":AAABX"), - query_type: StorageQueryType::ClosestDescendantMerkleValue + query_type: StorageQueryType::ClosestDescendantMerkleValue, + pagination_start_key: None }, // Key that are not part of the trie. StorageQuery { key: hex_string(b":AAX"), - query_type: StorageQueryType::ClosestDescendantMerkleValue + query_type: StorageQueryType::ClosestDescendantMerkleValue, + pagination_start_key: None }, StorageQuery { key: hex_string(b":AAAX"), - query_type: StorageQueryType::ClosestDescendantMerkleValue + query_type: StorageQueryType::ClosestDescendantMerkleValue, + pagination_start_key: None }, ] ], @@ -3525,7 +3615,11 @@ async fn chain_head_single_connection_context() { &client, first_sub_id.clone(), finalized_hash.clone(), - vec![StorageQuery { key: key.clone(), query_type: StorageQueryType::Hash }], + vec![StorageQuery { + key: key.clone(), + query_type: StorageQueryType::Hash, + pagination_start_key: None, + }], None, ) .await @@ -3536,7 +3630,11 @@ async fn chain_head_single_connection_context() { &second_client, first_sub_id.clone(), finalized_hash.clone(), - vec![StorageQuery { key: key.clone(), query_type: StorageQueryType::Hash }], + vec![StorageQuery { + key: key.clone(), + query_type: StorageQueryType::Hash, + pagination_start_key: None, + }], None, ) .await diff --git a/substrate/client/rpc-spec-v2/src/common/events.rs b/substrate/client/rpc-spec-v2/src/common/events.rs index 91530115f9218..b4c31fe375eb1 100644 --- a/substrate/client/rpc-spec-v2/src/common/events.rs +++ b/substrate/client/rpc-spec-v2/src/common/events.rs @@ -29,6 +29,11 @@ pub struct StorageQuery { /// The type of the storage query. #[serde(rename = "type")] pub query_type: StorageQueryType, + /// The optional pagination start key for descendants queries. + /// Only valid for `DescendantsValues` and `DescendantsHashes` query types. + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default)] + pub pagination_start_key: Option, } /// The type of the storage query. @@ -377,7 +382,11 @@ mod tests { #[test] fn storage_query() { // Item with Value. - let item = StorageQuery { key: "0x1", query_type: StorageQueryType::Value }; + let item = StorageQuery { + key: "0x1", + query_type: StorageQueryType::Value, + pagination_start_key: None, + }; // Encode let ser = serde_json::to_string(&item).unwrap(); let exp = r#"{"key":"0x1","type":"value"}"#; @@ -387,7 +396,11 @@ mod tests { assert_eq!(dec, item); // Item with Hash. - let item = StorageQuery { key: "0x1", query_type: StorageQueryType::Hash }; + let item = StorageQuery { + key: "0x1", + query_type: StorageQueryType::Hash, + pagination_start_key: None, + }; // Encode let ser = serde_json::to_string(&item).unwrap(); let exp = r#"{"key":"0x1","type":"hash"}"#; @@ -397,7 +410,11 @@ mod tests { assert_eq!(dec, item); // Item with DescendantsValues. - let item = StorageQuery { key: "0x1", query_type: StorageQueryType::DescendantsValues }; + let item = StorageQuery { + key: "0x1", + query_type: StorageQueryType::DescendantsValues, + pagination_start_key: None, + }; // Encode let ser = serde_json::to_string(&item).unwrap(); let exp = r#"{"key":"0x1","type":"descendantsValues"}"#; @@ -407,7 +424,11 @@ mod tests { assert_eq!(dec, item); // Item with DescendantsHashes. - let item = StorageQuery { key: "0x1", query_type: StorageQueryType::DescendantsHashes }; + let item = StorageQuery { + key: "0x1", + query_type: StorageQueryType::DescendantsHashes, + pagination_start_key: None, + }; // Encode let ser = serde_json::to_string(&item).unwrap(); let exp = r#"{"key":"0x1","type":"descendantsHashes"}"#; @@ -417,8 +438,11 @@ mod tests { assert_eq!(dec, item); // Item with Merkle. - let item = - StorageQuery { key: "0x1", query_type: StorageQueryType::ClosestDescendantMerkleValue }; + let item = StorageQuery { + key: "0x1", + query_type: StorageQueryType::ClosestDescendantMerkleValue, + pagination_start_key: None, + }; // Encode let ser = serde_json::to_string(&item).unwrap(); let exp = r#"{"key":"0x1","type":"closestDescendantMerkleValue"}"#; @@ -426,5 +450,33 @@ mod tests { // Decode let dec: StorageQuery<&str> = serde_json::from_str(exp).unwrap(); assert_eq!(dec, item); + + // Item with DescendantsValues and paginationStartKey. + let item = StorageQuery { + key: "0x1", + query_type: StorageQueryType::DescendantsValues, + pagination_start_key: Some("0x2"), + }; + // Encode + let ser = serde_json::to_string(&item).unwrap(); + let exp = r#"{"key":"0x1","type":"descendantsValues","paginationStartKey":"0x2"}"#; + assert_eq!(ser, exp); + // Decode + let dec: StorageQuery<&str> = serde_json::from_str(exp).unwrap(); + assert_eq!(dec, item); + + // Item with DescendantsHashes and paginationStartKey. + let item = StorageQuery { + key: "0x1", + query_type: StorageQueryType::DescendantsHashes, + pagination_start_key: Some("0x2"), + }; + // Encode + let ser = serde_json::to_string(&item).unwrap(); + let exp = r#"{"key":"0x1","type":"descendantsHashes","paginationStartKey":"0x2"}"#; + assert_eq!(ser, exp); + // Decode + let dec: StorageQuery<&str> = serde_json::from_str(exp).unwrap(); + assert_eq!(dec, item); } } diff --git a/substrate/client/rpc-spec-v2/src/common/storage.rs b/substrate/client/rpc-spec-v2/src/common/storage.rs index a1e34d51530ec..41f4960880651 100644 --- a/substrate/client/rpc-spec-v2/src/common/storage.rs +++ b/substrate/client/rpc-spec-v2/src/common/storage.rs @@ -277,7 +277,7 @@ where let query = QueryIter { query_key: item.key, ty: IterQueryType::Value, - pagination_start_key: None, + pagination_start_key: item.pagination_start_key, }; this.client.query_iter_pagination_with_producer( query, @@ -290,7 +290,7 @@ where let query = QueryIter { query_key: item.key, ty: IterQueryType::Hash, - pagination_start_key: None, + pagination_start_key: item.pagination_start_key, }; this.client.query_iter_pagination_with_producer( query, diff --git a/substrate/frame/revive/src/benchmarking.rs b/substrate/frame/revive/src/benchmarking.rs index e0fce918c0db4..610805e745e99 100644 --- a/substrate/frame/revive/src/benchmarking.rs +++ b/substrate/frame/revive/src/benchmarking.rs @@ -2288,7 +2288,7 @@ mod benchmarks { { result = run_builtin_precompile( &mut ext, - H160::from_low_u64_be(100).as_fixed_bytes(), + H160::from_low_u64_be(0x100).as_fixed_bytes(), input, ); } diff --git a/substrate/frame/revive/src/precompiles/builtin/blake2f.rs b/substrate/frame/revive/src/precompiles/builtin/blake2f.rs index bad0fa27f6134..f1d7b8fb68551 100644 --- a/substrate/frame/revive/src/precompiles/builtin/blake2f.rs +++ b/substrate/frame/revive/src/precompiles/builtin/blake2f.rs @@ -28,7 +28,7 @@ pub struct Blake2F(PhantomData); impl PrimitivePrecompile for Blake2F { type T = T; - const MATCHER: BuiltinAddressMatcher = BuiltinAddressMatcher::Fixed(NonZero::new(9).unwrap()); + const MATCHER: BuiltinAddressMatcher = BuiltinAddressMatcher::Fixed(NonZero::new(0x9).unwrap()); const HAS_CONTRACT_INFO: bool = false; fn call( diff --git a/substrate/frame/revive/src/precompiles/builtin/bn128.rs b/substrate/frame/revive/src/precompiles/builtin/bn128.rs index 1aad0fbedc40f..d0b1e95829c3c 100644 --- a/substrate/frame/revive/src/precompiles/builtin/bn128.rs +++ b/substrate/frame/revive/src/precompiles/builtin/bn128.rs @@ -30,7 +30,7 @@ pub struct Bn128Add(PhantomData); impl PrimitivePrecompile for Bn128Add { type T = T; - const MATCHER: BuiltinAddressMatcher = BuiltinAddressMatcher::Fixed(NonZero::new(6).unwrap()); + const MATCHER: BuiltinAddressMatcher = BuiltinAddressMatcher::Fixed(NonZero::new(0x6).unwrap()); const HAS_CONTRACT_INFO: bool = false; fn call( @@ -58,7 +58,7 @@ pub struct Bn128Mul(PhantomData); impl PrimitivePrecompile for Bn128Mul { type T = T; - const MATCHER: BuiltinAddressMatcher = BuiltinAddressMatcher::Fixed(NonZero::new(7).unwrap()); + const MATCHER: BuiltinAddressMatcher = BuiltinAddressMatcher::Fixed(NonZero::new(0x7).unwrap()); const HAS_CONTRACT_INFO: bool = false; fn call( @@ -86,7 +86,7 @@ pub struct Bn128Pairing(PhantomData); impl PrimitivePrecompile for Bn128Pairing { type T = T; - const MATCHER: BuiltinAddressMatcher = BuiltinAddressMatcher::Fixed(NonZero::new(8).unwrap()); + const MATCHER: BuiltinAddressMatcher = BuiltinAddressMatcher::Fixed(NonZero::new(0x8).unwrap()); const HAS_CONTRACT_INFO: bool = false; fn call( diff --git a/substrate/frame/revive/src/precompiles/builtin/ecrecover.rs b/substrate/frame/revive/src/precompiles/builtin/ecrecover.rs index c699c5f656ded..e07a5df41efce 100644 --- a/substrate/frame/revive/src/precompiles/builtin/ecrecover.rs +++ b/substrate/frame/revive/src/precompiles/builtin/ecrecover.rs @@ -27,7 +27,7 @@ pub struct EcRecover(PhantomData); impl PrimitivePrecompile for EcRecover { type T = T; - const MATCHER: BuiltinAddressMatcher = BuiltinAddressMatcher::Fixed(NonZero::new(1).unwrap()); + const MATCHER: BuiltinAddressMatcher = BuiltinAddressMatcher::Fixed(NonZero::new(0x1).unwrap()); const HAS_CONTRACT_INFO: bool = false; fn call( diff --git a/substrate/frame/revive/src/precompiles/builtin/identity.rs b/substrate/frame/revive/src/precompiles/builtin/identity.rs index bd0c35d26204d..94af5eb032ba5 100644 --- a/substrate/frame/revive/src/precompiles/builtin/identity.rs +++ b/substrate/frame/revive/src/precompiles/builtin/identity.rs @@ -27,7 +27,7 @@ pub struct Identity(PhantomData); impl PrimitivePrecompile for Identity { type T = T; - const MATCHER: BuiltinAddressMatcher = BuiltinAddressMatcher::Fixed(NonZero::new(4).unwrap()); + const MATCHER: BuiltinAddressMatcher = BuiltinAddressMatcher::Fixed(NonZero::new(0x4).unwrap()); const HAS_CONTRACT_INFO: bool = false; fn call( diff --git a/substrate/frame/revive/src/precompiles/builtin/modexp.rs b/substrate/frame/revive/src/precompiles/builtin/modexp.rs index 6326980a335f3..2d671ddbbfb78 100644 --- a/substrate/frame/revive/src/precompiles/builtin/modexp.rs +++ b/substrate/frame/revive/src/precompiles/builtin/modexp.rs @@ -49,7 +49,7 @@ pub struct Modexp(PhantomData); impl PrimitivePrecompile for Modexp { type T = T; - const MATCHER: BuiltinAddressMatcher = BuiltinAddressMatcher::Fixed(NonZero::new(5).unwrap()); + const MATCHER: BuiltinAddressMatcher = BuiltinAddressMatcher::Fixed(NonZero::new(0x5).unwrap()); const HAS_CONTRACT_INFO: bool = false; fn call( diff --git a/substrate/frame/revive/src/precompiles/builtin/p256_verify.rs b/substrate/frame/revive/src/precompiles/builtin/p256_verify.rs index f3cff1d7c0d49..68d8789954ff1 100644 --- a/substrate/frame/revive/src/precompiles/builtin/p256_verify.rs +++ b/substrate/frame/revive/src/precompiles/builtin/p256_verify.rs @@ -35,7 +35,8 @@ pub struct P256Verify(PhantomData); impl PrimitivePrecompile for P256Verify { type T = T; - const MATCHER: BuiltinAddressMatcher = BuiltinAddressMatcher::Fixed(NonZero::new(100).unwrap()); + const MATCHER: BuiltinAddressMatcher = + BuiltinAddressMatcher::Fixed(NonZero::new(0x100).unwrap()); const HAS_CONTRACT_INFO: bool = false; /// [RIP-7212](https://github.com/ethereum/RIPs/blob/master/RIPS/rip-7212.md#specification) secp256r1 precompile. @@ -70,4 +71,12 @@ mod tests { // https://github.com/ethereum/go-ethereum/blob/master/core/vm/testdata/precompiles/p256Verify.json run_test_vectors::>(include_str!("./testdata/256-p256_verify.json")); } + + #[test] + fn test_p256_verify_address_match() { + assert_eq!( + as PrimitivePrecompile>::MATCHER.base_address(), + hex_literal::hex!("0000000000000000000000000000000000000100") + ); + } } diff --git a/substrate/frame/revive/src/precompiles/builtin/ripemd160.rs b/substrate/frame/revive/src/precompiles/builtin/ripemd160.rs index 0e96d7a332bdc..bd8e81b0b908d 100644 --- a/substrate/frame/revive/src/precompiles/builtin/ripemd160.rs +++ b/substrate/frame/revive/src/precompiles/builtin/ripemd160.rs @@ -28,7 +28,7 @@ pub struct Ripemd160(PhantomData); impl PrimitivePrecompile for Ripemd160 { type T = T; - const MATCHER: BuiltinAddressMatcher = BuiltinAddressMatcher::Fixed(NonZero::new(3).unwrap()); + const MATCHER: BuiltinAddressMatcher = BuiltinAddressMatcher::Fixed(NonZero::new(0x3).unwrap()); const HAS_CONTRACT_INFO: bool = false; fn call( diff --git a/substrate/frame/revive/src/precompiles/builtin/sha256.rs b/substrate/frame/revive/src/precompiles/builtin/sha256.rs index 64e43bfae334e..54474db9fcf69 100644 --- a/substrate/frame/revive/src/precompiles/builtin/sha256.rs +++ b/substrate/frame/revive/src/precompiles/builtin/sha256.rs @@ -27,7 +27,7 @@ pub struct Sha256(PhantomData); impl PrimitivePrecompile for Sha256 { type T = T; - const MATCHER: BuiltinAddressMatcher = BuiltinAddressMatcher::Fixed(NonZero::new(2).unwrap()); + const MATCHER: BuiltinAddressMatcher = BuiltinAddressMatcher::Fixed(NonZero::new(0x2).unwrap()); const HAS_CONTRACT_INFO: bool = false; fn call( diff --git a/substrate/frame/support/procedural/src/lib.rs b/substrate/frame/support/procedural/src/lib.rs index 4c24e1860fe9c..d06bd41e0a0b1 100644 --- a/substrate/frame/support/procedural/src/lib.rs +++ b/substrate/frame/support/procedural/src/lib.rs @@ -1120,10 +1120,9 @@ pub fn composite_enum(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// Allows you to define some service work that can be recognized by a script or an -/// off-chain worker. +/// Allows you to define some service work that can be recognized by the off-chain worker. /// -/// Such a script can then create and submit all such work items at any given time. +/// The off-chain worker can then create and submit all such work items at any given time. /// /// These work items are defined as instances of the `Task` trait (found at /// `frame_support::traits::Task`). [`pallet:tasks_experimental`](macro@tasks_experimental) when @@ -1140,11 +1139,11 @@ pub fn composite_enum(_: TokenStream, _: TokenStream) -> TokenStream { /// All of such Tasks are then aggregated into a `RuntimeTask` by /// [`construct_runtime`](macro@construct_runtime). /// -/// Finally, the `RuntimeTask` can then used by a script or off-chain worker to create and -/// submit such tasks via an extrinsic defined in `frame_system` called `do_task`. +/// Finally, the `RuntimeTask` can then be used by the off-chain worker to create and +/// submit such tasks via an extrinsic defined in `frame_system` called `do_task` which accepts +/// unsigned transaction from local source. /// -/// When submitted as unsigned transactions (for example via an off-chain workder), note -/// that the tasks will be executed in a random order. +/// When submitted as unsigned transactions, note that the tasks will be executed in a random order. /// /// ## Example #[doc = docify::embed!("examples/proc_main/tasks.rs", tasks_example)] diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.stderr index 732b8edb8211d..e5704f8a548c1 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.stderr @@ -20,3 +20,9 @@ error[E0412]: cannot find type `Runtime` in this scope | 37 | impl pallet::Config for Runtime {} | ^^^^^^^ not found in this scope + | +help: there is an enum variant `sp_api::__private::TransactionType::Runtime`; try using the variant's enum + | +37 - impl pallet::Config for Runtime {} +37 + impl pallet::Config for sp_api::__private::TransactionType {} + | diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.stderr index a6a630cf8cb80..0508fbb8ee694 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.stderr @@ -20,3 +20,9 @@ error[E0412]: cannot find type `Runtime` in this scope | 42 | impl pallet::Config for Runtime {} | ^^^^^^^ not found in this scope + | +help: there is an enum variant `sp_api::__private::TransactionType::Runtime`; try using the variant's enum + | +42 - impl pallet::Config for Runtime {} +42 + impl pallet::Config for sp_api::__private::TransactionType {} + | diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/use_undefined_part.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/use_undefined_part.stderr index 802a966c02cf6..3340809229fd0 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/use_undefined_part.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/use_undefined_part.stderr @@ -20,3 +20,9 @@ error[E0412]: cannot find type `Runtime` in this scope | 42 | impl pallet::Config for Runtime {} | ^^^^^^^ not found in this scope + | +help: there is an enum variant `sp_api::__private::TransactionType::Runtime`; try using the variant's enum + | +42 - impl pallet::Config for Runtime {} +42 + impl pallet::Config for sp_api::__private::TransactionType {} + | diff --git a/substrate/frame/support/test/tests/tasks.rs b/substrate/frame/support/test/tests/tasks.rs index 97e58388362bb..9a61da0ec8f0a 100644 --- a/substrate/frame/support/test/tests/tasks.rs +++ b/substrate/frame/support/test/tests/tasks.rs @@ -133,3 +133,23 @@ fn tasks_work() { assert_eq!(my_pallet_2::SomeStorage::::get(), (0, 2)); }); } + +#[test] +fn do_task_unsigned_validation_rejects_external_source() { + new_test_ext().execute_with(|| { + use frame_support::pallet_prelude::{ + InvalidTransaction, TransactionSource, TransactionValidityError, ValidateUnsigned, + }; + + let task = RuntimeTask::MyPallet(my_pallet::Task::::Foo { i: 0u32, j: 2u64 }); + let call = frame_system::Call::do_task { task }; + + assert!(matches!( + System::validate_unsigned(TransactionSource::External, &call), + Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) + )); + + assert!(System::validate_unsigned(TransactionSource::InBlock, &call).is_ok()); + assert!(System::validate_unsigned(TransactionSource::Local, &call).is_ok()); + }); +} diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index 9d80e6ec338d2..229671cb4f85e 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -1136,7 +1136,7 @@ pub mod pallet { #[pallet::validate_unsigned] impl sp_runtime::traits::ValidateUnsigned for Pallet { type Call = Call; - fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { + fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { if let Call::apply_authorized_upgrade { ref code } = call { if let Ok(res) = Self::validate_code_is_authorized(&code[..]) { if Self::can_set_code(&code, false).is_ok() { @@ -1153,17 +1153,29 @@ pub mod pallet { #[cfg(feature = "experimental")] if let Call::do_task { ref task } = call { - if task.is_valid() { - return Ok(ValidTransaction { - priority: u64::max_value(), - requires: Vec::new(), - provides: vec![T::Hashing::hash_of(&task.encode()).as_ref().to_vec()], - longevity: TransactionLongevity::max_value(), - propagate: true, - }) + // If valid, the tasks provides the tag: hash of task. + // But it is allowed to have many task for a single process, e.g. a task that takes + // a limit on the number of item to migrate is valid from 1 to the limit while + // actually advancing a single migration process. + // In the transaction pool, transaction are identified by their provides tag. + // So in order to protect the transaction pool against spam, we only accept tasks + // from local source. + if source == TransactionSource::InBlock || source == TransactionSource::Local { + if task.is_valid() { + return Ok(ValidTransaction { + priority: u64::max_value(), + requires: Vec::new(), + provides: vec![T::Hashing::hash_of(&task.encode()).as_ref().to_vec()], + longevity: TransactionLongevity::max_value(), + propagate: false, + }) + } } } + #[cfg(not(feature = "experimental"))] + let _ = source; + Err(InvalidTransaction::Call.into()) } } @@ -2346,7 +2358,7 @@ impl Pallet { core::hint::black_box((new_version, current_version)); } else { if new_version.spec_name != current_version.spec_name { - return CanSetCodeResult::InvalidVersion( Error::::InvalidSpecName) + return CanSetCodeResult::InvalidVersion(Error::::InvalidSpecName) } if new_version.spec_version <= current_version.spec_version { diff --git a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs index 76e26cce2653e..06b780a947077 100644 --- a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -412,6 +412,11 @@ fn generate_runtime_api_base_structures() -> Result { &mut std::cell::RefCell::borrow_mut(&self.changes) ); + #crate_::Extensions::commit_transaction( + &mut std::cell::RefCell::borrow_mut(&self.extensions), + #crate_::TransactionType::Host, + ); + // Will panic on an `Err` below, however we should call commit // on the recorder and the changes together. std::result::Result::and(res, std::result::Result::map_err(res2, drop)) @@ -426,6 +431,11 @@ fn generate_runtime_api_base_structures() -> Result { &mut std::cell::RefCell::borrow_mut(&self.changes) ); + #crate_::Extensions::rollback_transaction( + &mut std::cell::RefCell::borrow_mut(&self.extensions), + #crate_::TransactionType::Host, + ); + // Will panic on an `Err` below, however we should call commit // on the recorder and the changes together. std::result::Result::and(res, std::result::Result::map_err(res2, drop)) @@ -441,6 +451,11 @@ fn generate_runtime_api_base_structures() -> Result { if let Some(recorder) = &self.recorder { #crate_::ProofRecorder::::start_transaction(&recorder); } + + #crate_::Extensions::start_transaction( + &mut std::cell::RefCell::borrow_mut(&self.extensions), + #crate_::TransactionType::Host, + ); } } } diff --git a/substrate/primitives/api/src/lib.rs b/substrate/primitives/api/src/lib.rs index 676a94f0aabac..4385d73e37b95 100644 --- a/substrate/primitives/api/src/lib.rs +++ b/substrate/primitives/api/src/lib.rs @@ -81,7 +81,7 @@ pub mod __private { mod std_imports { pub use hash_db::Hasher; pub use sp_core::traits::CallContext; - pub use sp_externalities::{Extension, Extensions}; + pub use sp_externalities::{Extension, Extensions, TransactionType}; pub use sp_runtime::StateVersion; pub use sp_state_machine::{ Backend as StateBackend, InMemoryBackend, OverlayedChanges, StorageProof, TrieBackend, diff --git a/substrate/primitives/api/test/Cargo.toml b/substrate/primitives/api/test/Cargo.toml index 23da17c3f55ac..6582612d78ab2 100644 --- a/substrate/primitives/api/test/Cargo.toml +++ b/substrate/primitives/api/test/Cargo.toml @@ -25,6 +25,7 @@ sc-block-builder = { workspace = true, default-features = true } scale-info = { features = ["derive"], workspace = true } sp-api = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } +sp-externalities = { workspace = true, default-features = true } sp-metadata-ir = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } diff --git a/substrate/primitives/api/test/tests/runtime_calls.rs b/substrate/primitives/api/test/tests/runtime_calls.rs index 0470b8b72aa04..33cb72249a826 100644 --- a/substrate/primitives/api/test/tests/runtime_calls.rs +++ b/substrate/primitives/api/test/tests/runtime_calls.rs @@ -15,10 +15,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::panic::UnwindSafe; +use std::{ + panic::UnwindSafe, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, +}; use sc_block_builder::BlockBuilderBuilder; use sp_api::{ApiExt, Core, ProvideRuntimeApi}; +use sp_externalities::{decl_extension, TransactionType}; use sp_runtime::{ traits::{HashingFor, Header as HeaderT}, TransactionOutcome, @@ -182,14 +189,44 @@ fn disable_logging_works() { // Ensure that the type is not unwind safe! static_assertions::assert_not_impl_any!(>::Api: UnwindSafe); +#[derive(Default)] +struct TransactionTesterInner { + started: AtomicUsize, + committed: AtomicUsize, + rolled_back: AtomicUsize, +} + +decl_extension! { + struct TransactionTester(Arc); + + impl TransactionTester { + fn start_transaction(&mut self, ty: TransactionType) { + assert_eq!(ty, TransactionType::Host); + self.0.started.fetch_add(1, Ordering::Relaxed); + } + + fn commit_transaction(&mut self, ty: TransactionType) { + assert_eq!(ty, TransactionType::Host); + self.0.committed.fetch_add(1, Ordering::Relaxed); + } + + fn rollback_transaction(&mut self, ty: TransactionType) { + assert_eq!(ty, TransactionType::Host); + self.0.rolled_back.fetch_add(1, Ordering::Relaxed); + } + } +} + #[test] fn ensure_transactional_works() { const KEY: &[u8] = b"test"; let client = TestClientBuilder::new().build(); let best_hash = client.chain_info().best_hash; + let transaction_tester = Arc::new(TransactionTesterInner::default()); - let runtime_api = client.runtime_api(); + let mut runtime_api = client.runtime_api(); + runtime_api.register_extension(TransactionTester(transaction_tester.clone())); runtime_api.execute_in_transaction(|api| { api.write_key_value(best_hash, KEY.to_vec(), vec![1, 2, 3], false).unwrap(); @@ -207,7 +244,8 @@ fn ensure_transactional_works() { .unwrap(); assert_eq!(changes.main_storage_changes[0].1, Some(vec![1, 2, 3, 4])); - let runtime_api = client.runtime_api(); + let mut runtime_api = client.runtime_api(); + runtime_api.register_extension(TransactionTester(transaction_tester.clone())); runtime_api.execute_in_transaction(|api| { assert!(api.write_key_value(best_hash, KEY.to_vec(), vec![1, 2, 3], true).is_err()); @@ -218,4 +256,21 @@ fn ensure_transactional_works() { .into_storage_changes(&client.state_at(best_hash).unwrap(), best_hash) .unwrap(); assert_eq!(changes.main_storage_changes[0].1, Some(vec![1, 2, 3])); + + let mut runtime_api = client.runtime_api(); + runtime_api.register_extension(TransactionTester(transaction_tester.clone())); + runtime_api.execute_in_transaction(|api| { + assert!(api.write_key_value(best_hash, KEY.to_vec(), vec![1, 2], true).is_err()); + + TransactionOutcome::Rollback(()) + }); + + let changes = runtime_api + .into_storage_changes(&client.state_at(best_hash).unwrap(), best_hash) + .unwrap(); + assert!(changes.main_storage_changes.is_empty()); + + assert_eq!(transaction_tester.started.load(Ordering::Relaxed), 4); + assert_eq!(transaction_tester.committed.load(Ordering::Relaxed), 3); + assert_eq!(transaction_tester.rolled_back.load(Ordering::Relaxed), 1); } diff --git a/substrate/primitives/externalities/src/extensions.rs b/substrate/primitives/externalities/src/extensions.rs index 6e7e369a676cf..48891efcf03d6 100644 --- a/substrate/primitives/externalities/src/extensions.rs +++ b/substrate/primitives/externalities/src/extensions.rs @@ -32,6 +32,27 @@ use core::{ ops::DerefMut, }; +/// Informs [`Extension`] about what type of transaction is started, committed or rolled back. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum TransactionType { + /// A transaction started by the host. + Host, + /// A transaction started by the runtime. + Runtime, +} + +impl TransactionType { + /// Is `self` set to [`Self::Host`]. + pub fn is_host(self) -> bool { + matches!(self, Self::Host) + } + + /// Is `self` set to [`Self::Runtime`]. + pub fn is_runtime(self) -> bool { + matches!(self, Self::Runtime) + } +} + /// Marker trait for types that should be registered as [`Externalities`](crate::Externalities) /// extension. /// @@ -40,11 +61,26 @@ use core::{ pub trait Extension: Send + 'static { /// Return the extension as `&mut dyn Any`. /// - /// This is a trick to make the trait type castable into an `Any`. + /// This is a trick to make the trait type castable into an [`Any`]. fn as_mut_any(&mut self) -> &mut dyn Any; /// Get the [`TypeId`] of this `Extension`. fn type_id(&self) -> TypeId; + + /// Start a transaction of type `ty`. + fn start_transaction(&mut self, ty: TransactionType) { + let _ty = ty; + } + + /// Commit a transaction of type `ty`. + fn commit_transaction(&mut self, ty: TransactionType) { + let _ty = ty; + } + + /// Rollback a transaction of type `ty`. + fn rollback_transaction(&mut self, ty: TransactionType) { + let _ty = ty; + } } impl Extension for Box { @@ -55,6 +91,18 @@ impl Extension for Box { fn type_id(&self) -> TypeId { (**self).type_id() } + + fn start_transaction(&mut self, ty: TransactionType) { + (**self).start_transaction(ty); + } + + fn commit_transaction(&mut self, ty: TransactionType) { + (**self).commit_transaction(ty); + } + + fn rollback_transaction(&mut self, ty: TransactionType) { + (**self).rollback_transaction(ty); + } } /// Macro for declaring an extension that usable with [`Extensions`]. @@ -70,11 +118,37 @@ impl Extension for Box { /// struct TestExt(String); /// } /// ``` +/// +/// The [`Extension`] trait provides hooks that are called when starting, committing or rolling back +/// a transaction. These can be implemented with the macro as well: +/// ``` +/// # use sp_externalities::{decl_extension, TransactionType}; +/// decl_extension! { +/// /// Some test extension +/// struct TestExtWithCallback(String); +/// +/// impl TestExtWithCallback { +/// fn start_transaction(&mut self, ty: TransactionType) { +/// // do something cool +/// } +/// +/// // The other methods `commit_transaction` and `rollback_transaction` can also +/// // be implemented in the same way. +/// } +/// } +/// ``` #[macro_export] macro_rules! decl_extension { ( $( #[ $attr:meta ] )* $vis:vis struct $ext_name:ident ($inner:ty); + $( + impl $ext_name_impl:ident { + $( + $impls:tt + )* + } + )* ) => { $( #[ $attr ] )* $vis struct $ext_name (pub $inner); @@ -87,6 +161,12 @@ macro_rules! decl_extension { fn type_id(&self) -> core::any::TypeId { core::any::Any::type_id(self) } + + $( + $( + $impls + )* + )* } impl core::ops::Deref for $ext_name { @@ -220,6 +300,21 @@ impl Extensions { pub fn merge(&mut self, other: Self) { self.extensions.extend(other.extensions); } + + /// Start a transaction of type `ty`. + pub fn start_transaction(&mut self, ty: TransactionType) { + self.extensions.values_mut().for_each(|e| e.start_transaction(ty)); + } + + /// Commit a transaction of type `ty`. + pub fn commit_transaction(&mut self, ty: TransactionType) { + self.extensions.values_mut().for_each(|e| e.commit_transaction(ty)); + } + + /// Rollback a transaction of type `ty`. + pub fn rollback_transaction(&mut self, ty: TransactionType) { + self.extensions.values_mut().for_each(|e| e.rollback_transaction(ty)); + } } impl Extend for Extensions { diff --git a/substrate/primitives/externalities/src/lib.rs b/substrate/primitives/externalities/src/lib.rs index bcc46ee4f1b29..a543b6758ee4f 100644 --- a/substrate/primitives/externalities/src/lib.rs +++ b/substrate/primitives/externalities/src/lib.rs @@ -32,7 +32,7 @@ use core::any::{Any, TypeId}; use sp_storage::{ChildInfo, StateVersion, TrackedStorageKey}; -pub use extensions::{Extension, ExtensionStore, Extensions}; +pub use extensions::{Extension, ExtensionStore, Extensions, TransactionType}; pub use scope_limited::{set_and_run_with_externalities, with_externalities}; mod extensions; diff --git a/substrate/primitives/state-machine/src/ext.rs b/substrate/primitives/state-machine/src/ext.rs index 6892e94cc0a37..62f76124a9564 100644 --- a/substrate/primitives/state-machine/src/ext.rs +++ b/substrate/primitives/state-machine/src/ext.rs @@ -29,6 +29,8 @@ use sp_core::hexdisplay::HexDisplay; use sp_core::storage::{ well_known_keys::is_child_storage_key, ChildInfo, StateVersion, TrackedStorageKey, }; +#[cfg(feature = "std")] +use sp_externalities::TransactionType; use sp_externalities::{Extension, ExtensionStore, Externalities, MultiRemovalResults}; use crate::{trace, warn}; @@ -545,15 +547,34 @@ where } fn storage_start_transaction(&mut self) { - self.overlay.start_transaction() + self.overlay.start_transaction(); + + #[cfg(feature = "std")] + if let Some(exts) = self.extensions.as_mut() { + exts.start_transaction(TransactionType::Runtime); + } } fn storage_rollback_transaction(&mut self) -> Result<(), ()> { - self.overlay.rollback_transaction().map_err(|_| ()) + self.overlay.rollback_transaction().map_err(|_| ())?; + + #[cfg(feature = "std")] + if let Some(exts) = self.extensions.as_mut() { + exts.rollback_transaction(TransactionType::Runtime); + } + + Ok(()) } fn storage_commit_transaction(&mut self) -> Result<(), ()> { - self.overlay.commit_transaction().map_err(|_| ()) + self.overlay.commit_transaction().map_err(|_| ())?; + + #[cfg(feature = "std")] + if let Some(exts) = self.extensions.as_mut() { + exts.commit_transaction(TransactionType::Runtime); + } + + Ok(()) } fn wipe(&mut self) { diff --git a/substrate/primitives/state-machine/src/overlayed_changes/mod.rs b/substrate/primitives/state-machine/src/overlayed_changes/mod.rs index efc86a2eb3294..5157df59d8469 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/mod.rs @@ -31,7 +31,7 @@ use sp_core::{ storage::{well_known_keys::EXTRINSIC_INDEX, ChildInfo, StateVersion}, }; #[cfg(feature = "std")] -use sp_externalities::{Extension, Extensions}; +use sp_externalities::{Extension, Extensions, TransactionType}; use sp_trie::{empty_child_trie_root, LayoutV1}; #[cfg(not(feature = "std"))] @@ -817,6 +817,16 @@ pub enum OverlayedExtension<'a> { Owned(Box), } +#[cfg(feature = "std")] +impl OverlayedExtension<'_> { + fn extension(&mut self) -> &mut dyn Extension { + match self { + Self::MutRef(ext) => *ext, + Self::Owned(ext) => &mut *ext, + } + } +} + /// Overlayed extensions which are sourced from [`Extensions`]. /// /// The sourced extensions will be stored as mutable references, @@ -870,6 +880,29 @@ impl<'a> OverlayedExtensions<'a> { pub fn deregister(&mut self, type_id: TypeId) -> bool { self.extensions.remove(&type_id).is_some() } + + /// Start a transaction. + /// + /// The `ty` declares the type of transaction. + pub fn start_transaction(&mut self, ty: TransactionType) { + self.extensions.values_mut().for_each(|e| e.extension().start_transaction(ty)); + } + + /// Commit a transaction. + /// + /// The `ty` declares the type of transaction. + pub fn commit_transaction(&mut self, ty: TransactionType) { + self.extensions.values_mut().for_each(|e| e.extension().commit_transaction(ty)); + } + + /// Rollback a transaction. + /// + /// The `ty` declares the type of transaction. + pub fn rollback_transaction(&mut self, ty: TransactionType) { + self.extensions + .values_mut() + .for_each(|e| e.extension().rollback_transaction(ty)); + } } #[cfg(test)] diff --git a/substrate/primitives/trie/src/lib.rs b/substrate/primitives/trie/src/lib.rs index de828c2e10194..46d4f4cde41c4 100644 --- a/substrate/primitives/trie/src/lib.rs +++ b/substrate/primitives/trie/src/lib.rs @@ -188,6 +188,31 @@ pub trait TrieRecorderProvider { pub trait ProofSizeProvider { /// Returns the storage proof size. fn estimate_encoded_size(&self) -> usize; + + /// Start a transaction. + /// + /// `is_host` is set to `true` when the transaction was started by the host. + fn start_transaction(&mut self, is_host: bool) { + let _ = is_host; + } + + /// Rollback the last transaction. + /// + /// `is_host` is set to `true` when the transaction to rollback was started by the host. + /// + /// If there is no active transaction, the call should be ignored. + fn rollback_transaction(&mut self, is_host: bool) { + let _ = is_host; + } + + /// Commit the last transaction. + /// + /// `is_host` is set to `true` when the transaction to commit was started by the host. + /// + /// If there is no active transaction, the call should be ignored. + fn commit_transaction(&mut self, is_host: bool) { + let _ = is_host; + } } /// TrieDB error over `TrieConfiguration` trait. diff --git a/substrate/primitives/trie/src/proof_size_extension.rs b/substrate/primitives/trie/src/proof_size_extension.rs index c97f334494afd..49d3036c4add0 100644 --- a/substrate/primitives/trie/src/proof_size_extension.rs +++ b/substrate/primitives/trie/src/proof_size_extension.rs @@ -18,12 +18,29 @@ //! Externalities extension that provides access to the current proof size //! of the underlying recorder. +use parking_lot::Mutex; + use crate::ProofSizeProvider; +use std::{collections::VecDeque, sync::Arc}; sp_externalities::decl_extension! { /// The proof size extension to fetch the current storage proof size /// in externalities. pub struct ProofSizeExt(Box); + + impl ProofSizeExt { + fn start_transaction(&mut self, ty: sp_externalities::TransactionType) { + self.0.start_transaction(ty.is_host()); + } + + fn rollback_transaction(&mut self, ty: sp_externalities::TransactionType) { + self.0.rollback_transaction(ty.is_host()); + } + + fn commit_transaction(&mut self, ty: sp_externalities::TransactionType) { + self.0.commit_transaction(ty.is_host()); + } + } } impl ProofSizeExt { @@ -37,3 +54,444 @@ impl ProofSizeExt { self.0.estimate_encoded_size() as _ } } + +/// Proof size estimations as recorded by [`RecordingProofSizeProvider`]. +/// +/// Each item is the estimated proof size as observed when calling +/// [`ProofSizeProvider::estimate_encoded_size`]. The items are ordered by their observation and +/// need to be replayed in the exact same order. +pub struct RecordedProofSizeEstimations(pub VecDeque); + +/// Inner structure of [`RecordingProofSizeProvider`]. +struct RecordingProofSizeProviderInner { + inner: Box, + /// Stores the observed proof estimations (in order of observation) per transaction. + /// + /// Last element of the outer vector is the active transaction. + proof_size_estimations: Vec>, +} + +/// An implementation of [`ProofSizeProvider`] that records the return value of the calls to +/// [`ProofSizeProvider::estimate_encoded_size`]. +/// +/// Wraps an inner [`ProofSizeProvider`] that is used to get the actual encoded size estimations. +/// Each estimation is recorded in the order it was observed. +#[derive(Clone)] +pub struct RecordingProofSizeProvider { + inner: Arc>, +} + +impl RecordingProofSizeProvider { + /// Creates a new instance of [`RecordingProofSizeProvider`]. + pub fn new(recorder: T) -> Self { + Self { + inner: Arc::new(Mutex::new(RecordingProofSizeProviderInner { + inner: Box::new(recorder), + // Init the always existing transaction. + proof_size_estimations: vec![Vec::new()], + })), + } + } + + /// Returns the recorded estimations returned by each call to + /// [`Self::estimate_encoded_size`]. + pub fn recorded_estimations(&self) -> Vec { + self.inner.lock().proof_size_estimations.iter().flatten().copied().collect() + } +} + +impl ProofSizeProvider for RecordingProofSizeProvider { + fn estimate_encoded_size(&self) -> usize { + let mut inner = self.inner.lock(); + + let estimation = inner.inner.estimate_encoded_size(); + + inner + .proof_size_estimations + .last_mut() + .expect("There is always at least one transaction open; qed") + .push(estimation); + + estimation + } + + fn start_transaction(&mut self, is_host: bool) { + // We don't care about runtime transactions, because they are part of the consensus critical + // path, that will always deterministically call this code. + // + // For example a runtime execution is creating 10 runtime transaction and calling in every + // transaction the proof size estimation host function and 8 of these transactions are + // rolled back. We need to keep all the 10 estimations. When the runtime execution is + // replayed (by e.g. importing a block), we will deterministically again create 10 runtime + // executions and roll back 8. However, in between we require all 10 estimations as + // otherwise the execution would not be deterministically anymore. + // + // A host transaction is only rolled back while for example building a block and an + // extrinsic failed in the early checks in the runtime. In this case, the extrinsic will + // also never appear in a block and thus, will not need to be replayed later on. + if is_host { + self.inner.lock().proof_size_estimations.push(Default::default()); + } + } + + fn rollback_transaction(&mut self, is_host: bool) { + let mut inner = self.inner.lock(); + + // The host side transaction needs to be reverted, because this is only done when an + // entire execution is rolled back. So, the execution will never be part of the consensus + // critical path. + if is_host && inner.proof_size_estimations.len() > 1 { + inner.proof_size_estimations.pop(); + } + } + + fn commit_transaction(&mut self, is_host: bool) { + let mut inner = self.inner.lock(); + + if is_host && inner.proof_size_estimations.len() > 1 { + let last = inner + .proof_size_estimations + .pop() + .expect("There are more than one element in the vector; qed"); + + inner + .proof_size_estimations + .last_mut() + .expect("There are more than one element in the vector; qed") + .extend(last); + } + } +} + +/// An implementation of [`ProofSizeProvider`] that replays estimations recorded by +/// [`RecordingProofSizeProvider`]. +/// +/// The recorded estimations are removed as they are required by calls to +/// [`Self::estimate_encoded_size`]. Will return `0` when all estimations are consumed. +pub struct ReplayProofSizeProvider(Arc>); + +impl ReplayProofSizeProvider { + /// Creates a new instance from the given [`RecordedProofSizeEstimations`]. + pub fn from_recorded(recorded: RecordedProofSizeEstimations) -> Self { + Self(Arc::new(Mutex::new(recorded))) + } +} + +impl From for ReplayProofSizeProvider { + fn from(value: RecordedProofSizeEstimations) -> Self { + Self::from_recorded(value) + } +} + +impl ProofSizeProvider for ReplayProofSizeProvider { + fn estimate_encoded_size(&self) -> usize { + self.0.lock().0.pop_front().unwrap_or_default() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::sync::atomic::{AtomicUsize, Ordering}; + + // Mock ProofSizeProvider for testing + #[derive(Clone)] + struct MockProofSizeProvider { + size: Arc, + } + + impl MockProofSizeProvider { + fn new(initial_size: usize) -> Self { + Self { size: Arc::new(AtomicUsize::new(initial_size)) } + } + + fn set_size(&self, new_size: usize) { + self.size.store(new_size, Ordering::Relaxed); + } + } + + impl ProofSizeProvider for MockProofSizeProvider { + fn estimate_encoded_size(&self) -> usize { + self.size.load(Ordering::Relaxed) + } + + fn start_transaction(&mut self, _is_host: bool) {} + fn rollback_transaction(&mut self, _is_host: bool) {} + fn commit_transaction(&mut self, _is_host: bool) {} + } + + #[test] + fn recording_proof_size_provider_basic_functionality() { + let mock = MockProofSizeProvider::new(100); + let tracker = RecordingProofSizeProvider::new(mock.clone()); + + // Initial state - no estimations recorded yet + assert_eq!(tracker.recorded_estimations(), Vec::::new()); + + // Call estimate_encoded_size and verify it's recorded + let size = tracker.estimate_encoded_size(); + assert_eq!(size, 100); + assert_eq!(tracker.recorded_estimations(), vec![100]); + + // Change the mock size and call again + mock.set_size(200); + let size = tracker.estimate_encoded_size(); + assert_eq!(size, 200); + assert_eq!(tracker.recorded_estimations(), vec![100, 200]); + + // Multiple calls with same size + let size = tracker.estimate_encoded_size(); + assert_eq!(size, 200); + assert_eq!(tracker.recorded_estimations(), vec![100, 200, 200]); + } + + #[test] + fn recording_proof_size_provider_host_transactions() { + let mock = MockProofSizeProvider::new(100); + let mut tracker = RecordingProofSizeProvider::new(mock.clone()); + + // Record some estimations in the initial transaction + tracker.estimate_encoded_size(); + tracker.estimate_encoded_size(); + assert_eq!(tracker.recorded_estimations(), vec![100, 100]); + + // Start a host transaction + tracker.start_transaction(true); + mock.set_size(200); + tracker.estimate_encoded_size(); + + // Should have 3 estimations total + assert_eq!(tracker.recorded_estimations(), vec![100, 100, 200]); + + // Commit the host transaction + tracker.commit_transaction(true); + + // All estimations should still be there + assert_eq!(tracker.recorded_estimations(), vec![100, 100, 200]); + + // Add more estimations + mock.set_size(300); + tracker.estimate_encoded_size(); + assert_eq!(tracker.recorded_estimations(), vec![100, 100, 200, 300]); + } + + #[test] + fn recording_proof_size_provider_host_transaction_rollback() { + let mock = MockProofSizeProvider::new(100); + let mut tracker = RecordingProofSizeProvider::new(mock.clone()); + + // Record some estimations in the initial transaction + tracker.estimate_encoded_size(); + assert_eq!(tracker.recorded_estimations(), vec![100]); + + // Start a host transaction + tracker.start_transaction(true); + mock.set_size(200); + tracker.estimate_encoded_size(); + tracker.estimate_encoded_size(); + + // Should have 3 estimations total + assert_eq!(tracker.recorded_estimations(), vec![100, 200, 200]); + + // Rollback the host transaction + tracker.rollback_transaction(true); + + // Should only have the original estimation + assert_eq!(tracker.recorded_estimations(), vec![100]); + } + + #[test] + fn recording_proof_size_provider_runtime_transactions_ignored() { + let mock = MockProofSizeProvider::new(100); + let mut tracker = RecordingProofSizeProvider::new(mock.clone()); + + // Record initial estimation + tracker.estimate_encoded_size(); + assert_eq!(tracker.recorded_estimations(), vec![100]); + + // Start a runtime transaction (is_host = false) + tracker.start_transaction(false); + mock.set_size(200); + tracker.estimate_encoded_size(); + + // Should have both estimations + assert_eq!(tracker.recorded_estimations(), vec![100, 200]); + + // Commit runtime transaction - should not affect recording + tracker.commit_transaction(false); + assert_eq!(tracker.recorded_estimations(), vec![100, 200]); + + // Rollback runtime transaction - should not affect recording + tracker.rollback_transaction(false); + assert_eq!(tracker.recorded_estimations(), vec![100, 200]); + } + + #[test] + fn recording_proof_size_provider_nested_host_transactions() { + let mock = MockProofSizeProvider::new(100); + let mut tracker = RecordingProofSizeProvider::new(mock.clone()); + + // Initial estimation + tracker.estimate_encoded_size(); + assert_eq!(tracker.recorded_estimations(), vec![100]); + + // Start first host transaction + tracker.start_transaction(true); + mock.set_size(200); + tracker.estimate_encoded_size(); + + // Start nested host transaction + tracker.start_transaction(true); + mock.set_size(300); + tracker.estimate_encoded_size(); + + assert_eq!(tracker.recorded_estimations(), vec![100, 200, 300]); + + // Commit nested transaction + tracker.commit_transaction(true); + assert_eq!(tracker.recorded_estimations(), vec![100, 200, 300]); + + // Commit outer transaction + tracker.commit_transaction(true); + assert_eq!(tracker.recorded_estimations(), vec![100, 200, 300]); + } + + #[test] + fn recording_proof_size_provider_nested_host_transaction_rollback() { + let mock = MockProofSizeProvider::new(100); + let mut tracker = RecordingProofSizeProvider::new(mock.clone()); + + // Initial estimation + tracker.estimate_encoded_size(); + + // Start first host transaction + tracker.start_transaction(true); + mock.set_size(200); + tracker.estimate_encoded_size(); + + // Start nested host transaction + tracker.start_transaction(true); + mock.set_size(300); + tracker.estimate_encoded_size(); + + assert_eq!(tracker.recorded_estimations(), vec![100, 200, 300]); + + // Rollback nested transaction + tracker.rollback_transaction(true); + assert_eq!(tracker.recorded_estimations(), vec![100, 200]); + + // Rollback outer transaction + tracker.rollback_transaction(true); + assert_eq!(tracker.recorded_estimations(), vec![100]); + } + + #[test] + fn recording_proof_size_provider_rollback_on_base_transaction_does_nothing() { + let mock = MockProofSizeProvider::new(100); + let mut tracker = RecordingProofSizeProvider::new(mock.clone()); + + // Record some estimations + tracker.estimate_encoded_size(); + tracker.estimate_encoded_size(); + assert_eq!(tracker.recorded_estimations(), vec![100, 100]); + + // Try to rollback the base transaction - should do nothing + tracker.rollback_transaction(true); + assert_eq!(tracker.recorded_estimations(), vec![100, 100]); + } + + #[test] + fn recorded_proof_size_estimations_struct() { + let estimations = vec![100, 200, 300]; + let recorded = RecordedProofSizeEstimations(estimations.into()); + let expected: VecDeque = vec![100, 200, 300].into(); + assert_eq!(recorded.0, expected); + } + + #[test] + fn replay_proof_size_provider_basic_functionality() { + let estimations = vec![100, 200, 300, 150]; + let recorded = RecordedProofSizeEstimations(estimations.into()); + let replay = ReplayProofSizeProvider::from_recorded(recorded); + + // Should replay estimations in order + assert_eq!(replay.estimate_encoded_size(), 100); + assert_eq!(replay.estimate_encoded_size(), 200); + assert_eq!(replay.estimate_encoded_size(), 300); + assert_eq!(replay.estimate_encoded_size(), 150); + } + + #[test] + fn replay_proof_size_provider_exhausted_returns_zero() { + let estimations = vec![100, 200]; + let recorded = RecordedProofSizeEstimations(estimations.into()); + let replay = ReplayProofSizeProvider::from_recorded(recorded); + + // Consume all estimations + assert_eq!(replay.estimate_encoded_size(), 100); + assert_eq!(replay.estimate_encoded_size(), 200); + + // Should return 0 when exhausted + assert_eq!(replay.estimate_encoded_size(), 0); + assert_eq!(replay.estimate_encoded_size(), 0); + } + + #[test] + fn replay_proof_size_provider_empty_returns_zero() { + let recorded = RecordedProofSizeEstimations(VecDeque::new()); + let replay = ReplayProofSizeProvider::from_recorded(recorded); + + // Should return 0 for empty estimations + assert_eq!(replay.estimate_encoded_size(), 0); + assert_eq!(replay.estimate_encoded_size(), 0); + } + + #[test] + fn replay_proof_size_provider_from_trait() { + let estimations = vec![42, 84]; + let recorded = RecordedProofSizeEstimations(estimations.into()); + let replay: ReplayProofSizeProvider = recorded.into(); + + assert_eq!(replay.estimate_encoded_size(), 42); + assert_eq!(replay.estimate_encoded_size(), 84); + assert_eq!(replay.estimate_encoded_size(), 0); + } + + #[test] + fn record_and_replay_integration() { + let mock = MockProofSizeProvider::new(100); + let recorder = RecordingProofSizeProvider::new(mock.clone()); + + // Record some estimations + recorder.estimate_encoded_size(); + mock.set_size(200); + recorder.estimate_encoded_size(); + mock.set_size(300); + recorder.estimate_encoded_size(); + + // Get recorded estimations + let recorded_estimations = recorder.recorded_estimations(); + assert_eq!(recorded_estimations, vec![100, 200, 300]); + + // Create replay provider from recorded estimations + let recorded = RecordedProofSizeEstimations(recorded_estimations.into()); + let replay = ReplayProofSizeProvider::from_recorded(recorded); + + // Replay should return the same sequence + assert_eq!(replay.estimate_encoded_size(), 100); + assert_eq!(replay.estimate_encoded_size(), 200); + assert_eq!(replay.estimate_encoded_size(), 300); + assert_eq!(replay.estimate_encoded_size(), 0); + } + + #[test] + fn replay_proof_size_provider_single_value() { + let estimations = vec![42]; + let recorded = RecordedProofSizeEstimations(estimations.into()); + let replay = ReplayProofSizeProvider::from_recorded(recorded); + + // Should return the single value then default to 0 + assert_eq!(replay.estimate_encoded_size(), 42); + assert_eq!(replay.estimate_encoded_size(), 0); + } +}